summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig10
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/acp/include/acp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h347
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c194
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1577
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c266
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c547
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c803
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c613
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c225
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c585
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c290
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c164
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c576
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c149
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1387
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c331
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/emu_soc.c (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c279
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c681
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c301
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c521
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c405
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c203
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c330
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c10
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/vce_v4_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm1384
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c581
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1262
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c377
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c467
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c357
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.h78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c92
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c192
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h184
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c849
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c81
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1055
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h34
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/TODO3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c761
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c274
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c126
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c83
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c199
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c226
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c790
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c334
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h638
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h211
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h319
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c231
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c410
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c804
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c450
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h561
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c271
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c499
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h215
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c155
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h590
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2518
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c529
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c)439
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h)158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c157
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1763
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_instance.h)26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h157
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h116
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h33
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h10
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h34
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c1396
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h53
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c191
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h (renamed from drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c)72
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c334
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h172
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h453
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h2045
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h)14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_default.h)7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_offset.h)14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_sh_mask.h)45
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h)14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h)41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_offset.h7497
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h31160
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h601
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_offset.h1991
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_sh_mask.h10265
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h1463
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_offset.h337
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_sh_mask.h1249
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h7988
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h4005
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_default.h1028
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_default.h202
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h286
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h547
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h1852
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h1810
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_offset.h)3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_sh_mask.h)3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h31
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h36
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h241
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h453
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h2045
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h9868
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h117
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h601
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h342
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h1463
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h1271
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h286
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h547
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h1852
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h1810
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h127
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h82
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h202
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h119
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h146
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h155
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h286
-rw-r--r--drivers/gpu/drm/amd/include/soc15_hw_ip.h98
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h70
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/vega10_enum.h)0
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/soc15ip.h)370
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c975
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c209
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c59
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c667
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c55
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c89
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h65
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c956
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1042
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h)121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h)22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c991
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c)1100
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h)82
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c610
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h189
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c821
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c73
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c2090
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h438
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c1364
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c430
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c324
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h277
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h279
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_feature.h67
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h)31
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h52
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h758
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h123
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c254
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c858
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h98
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c263
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c63
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c279
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c398
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c344
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c891
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h99
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c79
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c214
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c432
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c561
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h62
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h83
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h185
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c3
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c12
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c20
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c108
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h4
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c26
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h15
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c147
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h11
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c396
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h24
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c283
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h4
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c31
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c21
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c252
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h11
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c38
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c59
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c20
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c150
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c101
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c210
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c40
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c28
-rw-r--r--drivers/gpu/drm/drm_atomic.c148
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c131
-rw-r--r--drivers/gpu/drm/drm_auth.c6
-rw-r--r--drivers/gpu/drm/drm_blend.c26
-rw-r--r--drivers/gpu/drm/drm_bufs.c16
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c133
-rw-r--r--drivers/gpu/drm/drm_connector.c193
-rw-r--r--drivers/gpu/drm/drm_crtc.c16
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h6
-rw-r--r--drivers/gpu/drm/drm_debugfs.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c19
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c13
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_drv.c49
-rw-r--r--drivers/gpu/drm/drm_edid.c140
-rw-r--r--drivers/gpu/drm/drm_encoder.c4
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c156
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c354
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_fourcc.c50
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c105
-rw-r--r--drivers/gpu/drm/drm_gem.c25
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c53
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_memory.c13
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c9
-rw-r--r--drivers/gpu/drm/drm_mm.c32
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c108
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c76
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c1
-rw-r--r--drivers/gpu/drm/drm_of.c8
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c178
-rw-r--r--drivers/gpu/drm/drm_plane.c41
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c120
-rw-r--r--drivers/gpu/drm/drm_prime.c195
-rw-r--r--drivers/gpu/drm/drm_print.c87
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c25
-rw-r--r--drivers/gpu/drm/drm_property.c102
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c55
-rw-r--r--drivers/gpu/drm/drm_syncobj.c58
-rw-r--r--drivers/gpu/drm/drm_vblank.c112
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c15
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig10
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile4
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h281
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c58
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c29
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c74
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h27
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c241
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c559
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h61
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c80
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c170
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.h35
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h256
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h5
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h52
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h150
-rw-r--r--drivers/gpu/drm/exynos/Kconfig11
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1806
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c14
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h209
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h353
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h2
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c16
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c4
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c12
-rw-r--r--drivers/gpu/drm/gma500/mmu.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c27
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c28
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c3
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug29
-rw-r--r--drivers/gpu/drm/i915/Makefile37
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c272
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h24
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c212
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c80
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c536
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.h67
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c502
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c514
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h169
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c1523
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h203
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c153
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h146
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c881
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c736
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c136
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c587
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h (renamed from drivers/gpu/drm/i915/gvt/render.h)14
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h131
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c418
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c184
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.h56
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c405
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c787
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h43
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h25
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c78
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c745
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c514
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h785
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c917
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c433
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c169
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c401
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c139
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c142
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c327
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c428
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c439
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c7
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c121
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h34
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.c64
-rw-r--r--drivers/gpu/drm/i915/i915_params.h16
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c222
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c229
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c1072
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h125
-rw-r--r--drivers/gpu/drm/i915/i915_query.c125
-rw-r--r--drivers/gpu/drm/i915/i915_query.h15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h475
-rw-r--r--drivers/gpu/drm/i915/i915_request.c (renamed from drivers/gpu/drm/i915/i915_gem_request.c)504
-rw-r--r--drivers/gpu/drm/i915/i915_request.h (renamed from drivers/gpu/drm/i915/i915_gem_request.h)243
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c59
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c55
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h160
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c43
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h37
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c10
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c210
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c181
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c402
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c716
-rw-r--r--drivers/gpu/drm/i915/intel_color.c135
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c660
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c485
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h253
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1462
-rw-r--r--drivers/gpu/drm/i915/intel_display.h337
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1655
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c79
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c124
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c107
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h238
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c33
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c690
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c190
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c16
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c227
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h25
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c151
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.h33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c258
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h40
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c322
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/i915_guc_reg.h)14
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c (renamed from drivers/gpu/drm/i915/i915_guc_submission.c)882
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h (renamed from drivers/gpu/drm/i915/i915_guc_submission.h)17
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c72
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c807
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c425
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c44
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c189
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c166
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.h15
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c159
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c958
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h9
-rw-r--r--drivers/gpu/drm/i915/intel_lrc_reg.h68
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c46
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c30
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c18
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c89
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c108
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c637
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c233
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c961
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h370
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c220
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c272
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c34
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c372
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h25
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c378
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h42
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h23
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c195
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c24
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c45
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c166
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_request.c)147
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c134
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c355
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c521
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c51
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h8
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c125
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c46
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c57
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c19
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h4
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c219
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c347
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c31
-rw-r--r--drivers/gpu/drm/msm/Kconfig20
-rw-r--r--drivers/gpu/drm/msm/Makefile50
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c137
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c192
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c155
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c)6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h)9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c)4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c)13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c)60
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c)5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h)10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c)31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp_common.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp/mdp_format.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.h)0
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h187
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c47
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c251
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c822
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c124
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h16
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c63
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c222
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h746
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h802
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1006
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h)18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h21
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c52
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c148
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c42
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c42
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c44
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c71
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c73
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c327
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c44
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c62
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c32
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c42
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2369
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c21
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c99
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1567
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c840
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h281
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c419
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c432
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c17
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h221
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c164
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h37
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c115
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c71
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c165
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h148
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c22
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h46
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c45
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h99
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c42
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.h39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h37
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c16
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h4
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h6
-rw-r--r--drivers/gpu/drm/panel/Kconfig27
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c377
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c962
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c30
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c6
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c32
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c21
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c16
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c448
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c38
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c37
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c144
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c115
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h14
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c157
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c118
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c67
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c47
-rw-r--r--drivers/gpu/drm/radeon/cik.c145
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c433
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig9
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c89
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c102
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c175
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c97
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h24
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c272
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h64
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of.c322
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of.h20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts76
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c64
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c540
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h6
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c131
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c11
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c11
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c30
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c26
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c35
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c125
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c92
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.h4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c113
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h6
-rw-r--r--drivers/gpu/drm/scheduler/Makefile26
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c (renamed from drivers/gpu/drm/amd/scheduler/gpu_scheduler.c)396
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c (renamed from drivers/gpu/drm/amd/scheduler/sched_fence.c)122
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c5
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c2
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.h2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c29
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c12
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h2
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c40
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h2
-rw-r--r--drivers/gpu/drm/stm/drv.c49
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c67
-rw-r--r--drivers/gpu/drm/stm/ltdc.c193
-rw-r--r--drivers/gpu/drm/stm/ltdc.h5
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig11
-rw-r--r--drivers/gpu/drm/sun4i/Makefile13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c442
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c25
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c64
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c34
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c389
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h99
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c162
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.h12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c232
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.h12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c369
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c93
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c196
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h193
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c543
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c132
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.c134
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.h36
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c540
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h126
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c343
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h63
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c172
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c384
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h51
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c971
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h58
-rw-r--r--drivers/gpu/drm/sun4i/sunxi_engine.h90
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c1956
-rw-r--r--drivers/gpu/drm/tegra/dc.h290
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c120
-rw-r--r--drivers/gpu/drm/tegra/drm.c158
-rw-r--r--drivers/gpu/drm/tegra/drm.h16
-rw-r--r--drivers/gpu/drm/tegra/dsi.c229
-rw-r--r--drivers/gpu/drm/tegra/fb.c75
-rw-r--r--drivers/gpu/drm/tegra/gem.c84
-rw-r--r--drivers/gpu/drm/tegra/gem.h5
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c504
-rw-r--r--drivers/gpu/drm/tegra/hub.c913
-rw-r--r--drivers/gpu/drm/tegra/hub.h98
-rw-r--r--drivers/gpu/drm/tegra/output.c25
-rw-r--r--drivers/gpu/drm/tegra/plane.c396
-rw-r--r--drivers/gpu/drm/tegra/plane.h70
-rw-r--r--drivers/gpu/drm/tegra/sor.c1117
-rw-r--r--drivers/gpu/drm/tegra/sor.h16
-rw-r--r--drivers/gpu/drm/tegra/vic.c20
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c55
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c32
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c29
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c23
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c8
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig24
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c104
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c99
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c34
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c465
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c111
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c137
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c18
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c211
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c145
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c130
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c80
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c137
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c183
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c10
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h3
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c19
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c25
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c31
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h130
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c188
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c159
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h36
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c64
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.h4
-rw-r--r--drivers/gpu/drm/via/via_irq.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c19
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c55
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c38
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c506
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c54
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c72
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h65
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c106
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c249
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c168
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c285
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c20
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c48
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c1
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c26
1403 files changed, 168447 insertions, 100964 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4d9f218..deeefa7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
+ select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
select FB_CMDLINE
select I2C
@@ -149,6 +150,10 @@ config DRM_VM
bool
depends on DRM && MMU
+config DRM_SCHED
+ tristate
+ depends on DRM
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -178,6 +183,7 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_SCHED
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -362,6 +368,10 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
+config DRM_PANEL_ORIENTATION_QUIRKS
+ tristate
+
config DRM_LIB_RANDOM
bool
default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e950084..50093ff 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -47,8 +47,10 @@ obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
+obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-y += amd/lib/
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
index a72ddb2f..feab8eb 100644
--- a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -25,7 +25,6 @@
#define _ACP_GFX_IF_H
#include <linux/types.h>
-#include "cgs_linux.h"
#include "cgs_common.h"
int amd_acp_hw_init(struct cgs_device *cgs_device,
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 90202cf..2ca2b51 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,7 +30,6 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
- -I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
@@ -52,7 +51,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \
+ amdgpu_ids.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -62,7 +62,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block
amdgpu-y += \
@@ -87,8 +87,7 @@ amdgpu-y += \
# add SMC block
amdgpu-y += \
- amdgpu_dpm.o \
- amdgpu_powerplay.o
+ amdgpu_dpm.o
# add DCE block
amdgpu-y += \
@@ -129,16 +128,15 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += \
amdgpu_amdkfd.o \
+ amdgpu_amdkfd_fence.o \
+ amdgpu_amdkfd_gpuvm.o \
amdgpu_amdkfd_gfx_v8.o
# add cgs
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
-amdgpu-y += \
- ../scheduler/gpu_scheduler.o \
- ../scheduler/sched_fence.o \
- amdgpu_job.o
+amdgpu-y += amdgpu_job.o
# ACP componet
ifneq ($(CONFIG_DRM_AMD_ACP),)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0b14b53..f44a83a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -45,8 +45,11 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
+#include "dm_pp_interface.h"
+#include "kgd_pp_interface.h"
#include "amd_shared.h"
#include "amdgpu_mode.h"
@@ -59,18 +62,17 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_vm.h"
-#include "amd_powerplay.h"
#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
+#include "amdgpu_gmc.h"
#include "amdgpu_dm.h"
-
-#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
+#include "amdgpu_debugfs.h"
/*
* Modules parameters.
@@ -125,6 +127,8 @@ extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
+extern int amdgpu_emu_mode;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -218,17 +222,18 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags);
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
#define AMDGPU_MAX_IP_NUM 16
@@ -253,15 +258,16 @@ struct amdgpu_ip_block {
const struct amdgpu_ip_block_version *version;
};
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor);
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor);
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version);
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -310,13 +316,6 @@ struct amdgpu_vm_pte_funcs {
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr);
-
- /* maximum nums of PTEs/PDEs in a single operation */
- uint32_t set_max_nums_pte_pde;
-
- /* number of dw to reserve per operation */
- unsigned set_pte_pde_num_dw;
-
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -324,27 +323,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags);
};
-/* provided by the gmc block */
-struct amdgpu_gart_funcs {
- /* flush the vm tlb via mmio */
- void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
- /* enable/disable PRT support */
- void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
- /* get the pde for a given mc addr */
- u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
-};
-
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
@@ -362,17 +340,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
/*
- * Dummy page
- */
-struct amdgpu_dummy_page {
- struct page *page;
- dma_addr_t addr;
-};
-int amdgpu_dummy_page_init(struct amdgpu_device *adev);
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
-
-
-/*
* Clocks
*/
@@ -412,13 +379,12 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags);
-int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
@@ -475,7 +441,7 @@ struct amdgpu_sa_bo {
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
- u64 flags, bool kernel,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj);
@@ -489,55 +455,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * VMHUB structures, functions & helpers
- */
-struct amdgpu_vmhub {
- uint32_t ctx0_ptb_addr_lo32;
- uint32_t ctx0_ptb_addr_hi32;
- uint32_t vm_inv_eng0_req;
- uint32_t vm_inv_eng0_ack;
- uint32_t vm_context0_cntl;
- uint32_t vm_l2_pro_fault_status;
- uint32_t vm_l2_pro_fault_cntl;
-};
-
-/*
- * GPU MC structures, functions & helpers
- */
-struct amdgpu_mc {
- resource_size_t aper_size;
- resource_size_t aper_base;
- resource_size_t agp_base;
- /* for some chips with <= 32MB we need to lie
- * about vram size near mc fb location */
- u64 mc_vram_size;
- u64 visible_vram_size;
- u64 gart_size;
- u64 gart_start;
- u64 gart_end;
- u64 vram_start;
- u64 vram_end;
- unsigned vram_width;
- u64 real_vram_size;
- int vram_mtrr;
- u64 mc_mask;
- const struct firmware *fw; /* MC firmware */
- uint32_t fw_version;
- struct amdgpu_irq_src vm_fault;
- uint32_t vram_type;
- uint32_t srbm_soft_reset;
- bool prt_warning;
- uint64_t stolen_size;
- /* apertures */
- u64 shared_aperture_start;
- u64 shared_aperture_end;
- u64 private_aperture_start;
- u64 private_aperture_end;
- /* protects concurrent invalidation */
- spinlock_t invalidate_lock;
-};
-
-/*
* GPU doorbell structures, functions & helpers
*/
typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
@@ -645,12 +562,6 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
-
/*
* IRQS.
*/
@@ -684,7 +595,7 @@ struct amdgpu_ib {
uint32_t flags;
};
-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
@@ -694,7 +605,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f);
/*
@@ -727,7 +638,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
struct dma_fence **fences;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_ctx {
@@ -735,14 +646,16 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
+ unsigned reset_counter_query;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
- enum amd_sched_priority init_priority;
- enum amd_sched_priority override_priority;
+ enum drm_sched_priority init_priority;
+ enum drm_sched_priority override_priority;
struct mutex lock;
+ atomic_t guilty;
};
struct amdgpu_ctx_mgr {
@@ -760,7 +673,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -957,6 +870,7 @@ struct amdgpu_gfx_config {
};
struct amdgpu_cu_info {
+ uint32_t simd_per_cu;
uint32_t max_waves_per_simd;
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
@@ -1109,12 +1023,11 @@ struct amdgpu_cs_parser {
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
struct amdgpu_job {
- struct amd_sched_job base;
+ struct drm_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
- struct amdgpu_sync dep_sync;
struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs;
struct dma_fence *fence; /* the hw fence */
@@ -1123,8 +1036,9 @@ struct amdgpu_job {
void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
- unsigned vm_id;
uint64_t vm_pd_addr;
+ unsigned vmid;
+ unsigned pasid;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
@@ -1154,7 +1068,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -1164,10 +1078,8 @@ struct amdgpu_wb {
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
};
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-
-void amdgpu_get_pcie_info(struct amdgpu_device *adev);
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
/*
* SDMA
@@ -1232,24 +1144,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
-/*
- * Debugfs
- */
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-
-#if defined(CONFIG_DEBUG_FS)
-int amdgpu_debugfs_init(struct drm_minor *minor);
-#endif
-
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
/*
* amdgpu smumgr functions
@@ -1304,6 +1198,11 @@ struct amdgpu_asic_funcs {
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
/* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev);
+ /* flush hdp write queue */
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ /* invalidate hdp read cache */
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
};
/*
@@ -1404,8 +1303,6 @@ struct amdgpu_fw_vram_usage {
void *va;
};
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
-
/*
* CGS
*/
@@ -1421,6 +1318,85 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+/*
+ * amdgpu nbio functions
+ *
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+};
+
+
+/* Define the HW IP blocks will be used in driver , add more if necessary */
+enum amd_hw_ip_block_type {
+ GC_HWIP = 1,
+ HDP_HWIP,
+ SDMA0_HWIP,
+ SDMA1_HWIP,
+ MMHUB_HWIP,
+ ATHUB_HWIP,
+ NBIO_HWIP,
+ MP0_HWIP,
+ UVD_HWIP,
+ VCN_HWIP = UVD_HWIP,
+ VCE_HWIP,
+ DF_HWIP,
+ DCE_HWIP,
+ OSSSYS_HWIP,
+ SMUIO_HWIP,
+ PWR_HWIP,
+ NBIF_HWIP,
+ MAX_HWIP
+};
+
+#define HWIP_MAX_INSTANCE 6
+
+struct amd_powerplay {
+ void *pp_handle;
+ const struct amd_pm_funcs *pp_funcs;
+};
+
#define AMDGPU_RESET_MAGIC_NUM 64
struct amdgpu_device {
struct device *dev;
@@ -1441,6 +1417,7 @@ struct amdgpu_device {
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
bool need_dma32;
+ bool need_swiotlb;
bool accel_working;
struct work_struct reset_work;
struct notifier_block acpi_nb;
@@ -1510,9 +1487,9 @@ struct amdgpu_device {
struct amdgpu_clock clock;
/* MC */
- struct amdgpu_mc mc;
+ struct amdgpu_gmc gmc;
struct amdgpu_gart gart;
- struct amdgpu_dummy_page dummy_page;
+ dma_addr_t dummy_page_addr;
struct amdgpu_vm_manager vm_manager;
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
@@ -1606,6 +1583,11 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ const struct amdgpu_nbio_funcs *nbio_funcs;
+
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
@@ -1616,9 +1598,6 @@ struct amdgpu_device {
/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
- /* link all gtt */
- spinlock_t gtt_list_lock;
- struct list_head gtt_list;
/* keep an lru list of rings by HW IP */
struct list_head ring_lru_list;
spinlock_t ring_lru_list_lock;
@@ -1629,7 +1608,8 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
- bool in_sriov_reset;
+ bool in_gpu_reset;
+ struct mutex lock_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1648,6 +1628,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
+
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
@@ -1659,6 +1642,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+int emu_soc_asic_init(struct amdgpu_device *adev);
+
/*
* Registers read & write functions.
*/
@@ -1669,6 +1654,9 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
+#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
+
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
@@ -1771,30 +1759,34 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
+#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
+#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
+#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
-#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
@@ -1804,7 +1796,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
-#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
@@ -1823,22 +1814,22 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
/* Common functions */
-int amdgpu_gpu_reset(struct amdgpu_device *adev);
-bool amdgpu_need_backup(struct amdgpu_device *adev);
-void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_need_post(struct amdgpu_device *adev);
-void amdgpu_update_display_priority(struct amdgpu_device *adev);
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job* job, bool force);
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+bool amdgpu_device_need_post(struct amdgpu_device *adev);
+void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc, u64 base);
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc);
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -1872,7 +1863,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend(struct amdgpu_device *adev);
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index c04f44a..a29362f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -277,7 +277,7 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block *ip_block =
- amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57afad7..8fa850a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
size_t size;
u32 retry = 3;
+ if (amdgpu_acpi_pcie_notify_device_ready(adev))
+ return -EINVAL;
+
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5432af3..4d36203 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -30,6 +30,8 @@
const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
+static const unsigned int compute_vmid_bitmap = 0xFF00;
+
int amdgpu_amdkfd_init(void)
{
int ret;
@@ -56,6 +58,7 @@ int amdgpu_amdkfd_init(void)
#else
ret = -ENOENT;
#endif
+ amdgpu_amdkfd_gpuvm_init_mem_limits();
return ret;
}
@@ -78,14 +81,19 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
+ case CHIP_HAWAII:
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
break;
#endif
case CHIP_CARRIZO:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
+ dev_dbg(adev->dev, "kfd not supported on this ASIC\n");
return;
}
@@ -93,15 +101,52 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
adev->pdev, kfd2kgd);
}
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /*
+ * The first num_doorbells are used by amdgpu.
+ * amdkfd takes whatever's left in the aperture.
+ */
+ if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = adev->doorbell.size;
+ *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
- .compute_vmid_bitmap = 0xFF00,
+ .compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
- .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
+ .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
+ .gpuvm_size = min(adev->vm_manager.max_pfn
+ << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_VA_HOLE_START),
+ .drm_render_minor = adev->ddev->render->index
};
/* this is going to have a few of the MSBs set that we need to
@@ -171,20 +216,14 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **cpu_ptr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+ struct amdgpu_bo *bo = NULL;
int r;
+ uint64_t gpu_addr_tmp = 0;
+ void *cpu_ptr_tmp = NULL;
- BUG_ON(kgd == NULL);
- BUG_ON(gpu_addr == NULL);
- BUG_ON(cpu_ptr == NULL);
-
- *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if ((*mem) == NULL)
- return -ENOMEM;
-
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
- &(*mem)->bo);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
+ NULL, &bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
@@ -192,64 +231,88 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
}
/* map the buffer */
- r = amdgpu_bo_reserve((*mem)->bo, true);
+ r = amdgpu_bo_reserve(bo, true);
if (r) {
dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
goto allocate_mem_reserve_bo_failed;
}
- r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
- &(*mem)->gpu_addr);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
+ &gpu_addr_tmp);
if (r) {
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
goto allocate_mem_pin_bo_failed;
}
- *gpu_addr = (*mem)->gpu_addr;
- r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+ r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
if (r) {
dev_err(adev->dev,
"(%d) failed to map bo to kernel for amdkfd\n", r);
goto allocate_mem_kmap_bo_failed;
}
- *cpu_ptr = (*mem)->cpu_ptr;
- amdgpu_bo_unreserve((*mem)->bo);
+ *mem_obj = bo;
+ *gpu_addr = gpu_addr_tmp;
+ *cpu_ptr = cpu_ptr_tmp;
+
+ amdgpu_bo_unreserve(bo);
return 0;
allocate_mem_kmap_bo_failed:
- amdgpu_bo_unpin((*mem)->bo);
+ amdgpu_bo_unpin(bo);
allocate_mem_pin_bo_failed:
- amdgpu_bo_unreserve((*mem)->bo);
+ amdgpu_bo_unreserve(bo);
allocate_mem_reserve_bo_failed:
- amdgpu_bo_unref(&(*mem)->bo);
+ amdgpu_bo_unref(&bo);
return r;
}
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
- struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
-
- BUG_ON(mem == NULL);
+ struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
- amdgpu_bo_reserve(mem->bo, true);
- amdgpu_bo_kunmap(mem->bo);
- amdgpu_bo_unpin(mem->bo);
- amdgpu_bo_unreserve(mem->bo);
- amdgpu_bo_unref(&(mem->bo));
- kfree(mem);
+ amdgpu_bo_reserve(bo, true);
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&(bo));
}
-uint64_t get_vmem_size(struct kgd_dev *kgd)
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
- struct amdgpu_device *adev =
- (struct amdgpu_device *)kgd;
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
+ ~((1ULL << 32) - 1);
+ resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
+
+ memset(mem_info, 0, sizeof(*mem_info));
+ if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
+ mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
+ mem_info->local_mem_size_private = adev->gmc.real_vram_size -
+ adev->gmc.visible_vram_size;
+ } else {
+ mem_info->local_mem_size_public = 0;
+ mem_info->local_mem_size_private = adev->gmc.real_vram_size;
+ }
+ mem_info->vram_width = adev->gmc.vram_width;
- BUG_ON(kgd == NULL);
+ pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
+ &adev->gmc.aper_base, &aper_limit,
+ mem_info->local_mem_size_public,
+ mem_info->local_mem_size_private);
- return adev->mc.real_vram_size;
+ if (amdgpu_emu_mode == 1) {
+ mem_info->mem_clk_max = 100;
+ return;
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ mem_info->mem_clk_max = adev->clock.default_mclk / 100;
+ else
+ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
}
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
@@ -265,6 +328,107 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- /* The sclk is in quantas of 10kHz */
- return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+ /* the sclk is in quantas of 10kHz */
+ if (amdgpu_emu_mode == 1)
+ return 100;
+
+ if (amdgpu_sriov_vf(adev))
+ return adev->clock.default_sclk / 100;
+
+ return amdgpu_dpm_get_sclk(adev, false) / 100;
+}
+
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
+
+ memset(cu_info, 0, sizeof(*cu_info));
+ if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
+ return;
+
+ cu_info->cu_active_number = acu_info.number;
+ cu_info->cu_ao_mask = acu_info.ao_cu_mask;
+ memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
+ sizeof(acu_info.bitmap));
+ cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
+ cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+ cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+ cu_info->simd_per_cu = acu_info.simd_per_cu;
+ cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
+ cu_info->wave_front_size = acu_info.wave_front_size;
+ cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
+ cu_info->lds_size = acu_info.lds_size;
+}
+
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+}
+
+int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f = NULL;
+ int ret;
+
+ switch (engine) {
+ case KGD_ENGINE_MEC1:
+ ring = &adev->gfx.compute_ring[0];
+ break;
+ case KGD_ENGINE_SDMA1:
+ ring = &adev->sdma.instance[0].ring;
+ break;
+ case KGD_ENGINE_SDMA2:
+ ring = &adev->sdma.instance[1].ring;
+ break;
+ default:
+ pr_err("Invalid engine in IB submission: %d\n", engine);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = amdgpu_job_alloc(adev, 1, &job, NULL);
+ if (ret)
+ goto err;
+
+ ib = &job->ibs[0];
+ memset(ib, 0, sizeof(struct amdgpu_ib));
+
+ ib->gpu_addr = gpu_addr;
+ ib->ptr = ib_cmd;
+ ib->length_dw = ib_len;
+ /* This works for NO_HWS. TODO: need to handle without knowing VMID */
+ job->vmid = vmid;
+
+ ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
+ if (ret) {
+ DRM_ERROR("amdgpu: failed to schedule IB.\n");
+ goto err_ib_sched;
+ }
+
+ ret = dma_fence_wait(f, false);
+
+err_ib_sched:
+ dma_fence_put(f);
+ amdgpu_job_free(job);
+err:
+ return ret;
+}
+
+bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
+{
+ if (adev->kfd) {
+ if ((1 << vmid) & compute_vmid_bitmap)
+ return true;
+ }
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8d689ab..c2c2bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -26,15 +26,71 @@
#define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/types.h>
+#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <kgd_kfd_interface.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include "amdgpu_sync.h"
+#include "amdgpu_vm.h"
+
+extern const struct kgd2kfd_calls *kgd2kfd;
struct amdgpu_device;
+struct kfd_bo_va_list {
+ struct list_head bo_list;
+ struct amdgpu_bo_va *bo_va;
+ void *kgd_dev;
+ bool is_mapped;
+ uint64_t va;
+ uint64_t pte_flags;
+};
+
struct kgd_mem {
+ struct mutex lock;
struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- void *cpu_ptr;
+ struct list_head bo_va_list;
+ /* protected by amdkfd_process_info.lock */
+ struct ttm_validate_buffer validate_list;
+ struct ttm_validate_buffer resv_list;
+ uint32_t domain;
+ unsigned int mapped_to_gpu_memory;
+ uint64_t va;
+
+ uint32_t mapping_flags;
+
+ struct amdkfd_process_info *process_info;
+
+ struct amdgpu_sync sync;
+
+ bool aql_queue;
+};
+
+/* KFD Memory Eviction */
+struct amdgpu_amdkfd_fence {
+ struct dma_fence base;
+ struct mm_struct *mm;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm);
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+
+struct amdkfd_process_info {
+ /* List head of all VMs that belong to a KFD process */
+ struct list_head vm_list_head;
+ /* List head for all KFD BOs that belong to a KFD process. */
+ struct list_head kfd_bo_list;
+ /* Lock to protect kfd_bo_list */
+ struct mutex lock;
+
+ /* Number of VMs */
+ unsigned int n_vms;
+ /* Eviction Fence */
+ struct amdgpu_amdkfd_fence *eviction_fence;
};
int amdgpu_amdkfd_init(void);
@@ -48,18 +104,27 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
+int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len);
+
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-uint64_t get_vmem_size(struct kgd_dev *kgd);
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
#define read_user_wptr(mmptr, wptr, dst) \
({ \
@@ -76,4 +141,36 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
valid; \
})
+/* GPUVM API */
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+ void **process_info,
+ struct dma_fence **ef);
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ struct file *filp,
+ void **vm, void **process_info,
+ struct dma_fence **ef);
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
+uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
+int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ void *vm, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags);
+int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem);
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
+int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
+int amdgpu_amdkfd_gpuvm_sync_memory(
+ struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+ struct kgd_mem *mem, void **kptr, uint64_t *size);
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
+ struct dma_fence **ef);
+
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
+void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
new file mode 100644
index 0000000..2c14025
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/stacktrace.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include "amdgpu_amdkfd.h"
+
+static const struct dma_fence_ops amdkfd_fence_ops;
+static atomic_t fence_seq = ATOMIC_INIT(0);
+
+/* Eviction Fence
+ * Fence helper functions to deal with KFD memory eviction.
+ * Big Idea - Since KFD submissions are done by user queues, a BO cannot be
+ * evicted unless all the user queues for that process are evicted.
+ *
+ * All the BOs in a process share an eviction fence. When process X wants
+ * to map VRAM memory but TTM can't find enough space, TTM will attempt to
+ * evict BOs from its LRU list. TTM checks if the BO is valuable to evict
+ * by calling ttm_bo_driver->eviction_valuable().
+ *
+ * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
+ * to process X. Otherwise, it will return true to indicate BO can be
+ * evicted by TTM.
+ *
+ * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
+ * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
+ * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
+ *
+ * GPU Scheduler (amd_sched_main) - sets up a cb (fence_add_callback) to
+ * nofity when the BO is free to move. fence_add_callback --> enable_signaling
+ * --> amdgpu_amdkfd_fence.enable_signaling
+ *
+ * amdgpu_amdkfd_fence.enable_signaling - Start a work item that will quiesce
+ * user queues and signal fence. The work item will also start another delayed
+ * work item to restore BOs
+ */
+
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm)
+{
+ struct amdgpu_amdkfd_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ /* This reference gets released in amdkfd_fence_release */
+ mmgrab(mm);
+ fence->mm = mm;
+ get_task_comm(fence->timeline_name, current);
+ spin_lock_init(&fence->lock);
+
+ dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock,
+ context, atomic_inc_return(&fence_seq));
+
+ return fence;
+}
+
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence;
+
+ if (!f)
+ return NULL;
+
+ fence = container_of(f, struct amdgpu_amdkfd_fence, base);
+ if (fence && f->ops == &amdkfd_fence_ops)
+ return fence;
+
+ return NULL;
+}
+
+static const char *amdkfd_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_amdkfd_fence";
+}
+
+static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ return fence->timeline_name;
+}
+
+/**
+ * amdkfd_fence_enable_signaling - This gets called when TTM wants to evict
+ * a KFD BO and schedules a job to move the BO.
+ * If fence is already signaled return true.
+ * If fence is not signaled schedule a evict KFD process work item.
+ */
+static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ if (!fence)
+ return false;
+
+ if (dma_fence_is_signaled(f))
+ return true;
+
+ if (!kgd2kfd->schedule_evict_and_restore_process(fence->mm, f))
+ return true;
+
+ return false;
+}
+
+/**
+ * amdkfd_fence_release - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * Drops the mm_struct reference and RCU schedules freeing up the fence.
+ */
+static void amdkfd_fence_release(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ /* Unconditionally signal the fence. The process is getting
+ * terminated.
+ */
+ if (WARN_ON(!fence))
+ return; /* Not an amdgpu_amdkfd_fence */
+
+ mmdrop(fence->mm);
+ kfree_rcu(f, rcu);
+}
+
+/**
+ * amdkfd_fence_check_mm - Check if @mm is same as that of the fence @f
+ * if same return TRUE else return FALSE.
+ *
+ * @f: [IN] fence
+ * @mm: [IN] mm that needs to be verified
+ */
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ if (!fence)
+ return false;
+ else if (fence->mm == mm)
+ return true;
+
+ return false;
+}
+
+static const struct dma_fence_ops amdkfd_fence_ops = {
+ .get_driver_name = amdkfd_fence_get_driver_name,
+ .get_timeline_name = amdkfd_fence_get_timeline_name,
+ .enable_signaling = amdkfd_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = dma_fence_default_wait,
+ .release = amdkfd_fence_release,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 1e3e9be..ea54e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -105,7 +105,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
@@ -132,11 +139,14 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t page_table_base);
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -166,17 +176,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -187,10 +199,26 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
+ .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
+ .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
+ .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
+ .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
+ .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .submit_ib = amdgpu_amdkfd_submit_ib,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -375,7 +403,44 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (35+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
@@ -410,10 +475,17 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdma_rlc_rb_rptr);
+
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
@@ -423,8 +495,37 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
+
+ data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
return 0;
}
@@ -575,7 +676,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -588,10 +689,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
@@ -599,6 +699,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+
return 0;
}
@@ -702,14 +804,7 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -727,8 +822,6 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
const union amdgpu_firmware_header *hdr;
- BUG_ON(kgd == NULL);
-
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
@@ -781,3 +874,50 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
return hdr->common.ucode_version;
}
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t page_table_base)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
+ }
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+}
+
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ int vmid;
+ unsigned int tmp;
+
+ for (vmid = 0; vmid < 16; vmid++) {
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
+ continue;
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("non kfd vmid\n");
+ return 0;
+ }
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 056929b..89264c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -45,7 +45,7 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-struct cik_sdma_rlc_registers;
+struct vi_sdma_mqd;
/*
* Register access functions
@@ -64,7 +64,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -74,7 +81,6 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
unsigned int watch_point_id,
@@ -92,10 +98,13 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t page_table_base);
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -125,17 +134,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -148,10 +159,26 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
+ .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
+ .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
+ .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
+ .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
+ .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
+ .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
+ .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
+ .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
+ .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
+ .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .submit_ib = amdgpu_amdkfd_submit_ib,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -268,9 +295,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
{
- return 0;
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
}
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -278,9 +311,9 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
{
- return (struct cik_sdma_rlc_registers *)mqd;
+ return (struct vi_sdma_mqd *)mqd;
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -358,8 +391,138 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (54+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_sdma_mqd *m;
+ unsigned long end_jiffies;
+ uint32_t sdma_base_addr;
+ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdmax_rlcx_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4+2+3+7)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
return 0;
}
@@ -388,7 +551,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t sdma_rlc_rb_cntl;
@@ -509,10 +672,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -523,18 +686,19 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -556,14 +720,7 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -627,8 +784,6 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
const union amdgpu_firmware_header *hdr;
- BUG_ON(kgd == NULL);
-
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
@@ -680,3 +835,51 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
/* Only 12 bit in use*/
return hdr->common.ucode_version;
}
+
+static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+ uint32_t page_table_base)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
+ }
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
+}
+
+static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ int vmid;
+ unsigned int tmp;
+
+ for (vmid = 0; vmid < 16; vmid++) {
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
+ continue;
+
+ tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
+ (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("non kfd vmid %d\n", vmid);
+ return -EINVAL;
+ }
+
+ WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ RREG32(mmVM_INVALIDATE_RESPONSE);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
new file mode 100644
index 0000000..1d6e147
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright 2014-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "kfd2kgd: " fmt
+
+#include <linux/list.h>
+#include <drm/drmP.h>
+#include "amdgpu_object.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_amdkfd.h"
+
+/* Special VM and GART address alignment needed for VI pre-Fiji due to
+ * a HW bug.
+ */
+#define VI_BO_SIZE_ALIGN (0x8000)
+
+/* Impose limit on how much memory KFD can use */
+static struct {
+ uint64_t max_system_mem_limit;
+ int64_t system_mem_used;
+ spinlock_t mem_limit_lock;
+} kfd_mem_limit;
+
+/* Struct used for amdgpu_amdkfd_bo_validate */
+struct amdgpu_vm_parser {
+ uint32_t domain;
+ bool wait;
+};
+
+static const char * const domain_bit_to_string[] = {
+ "CPU",
+ "GTT",
+ "VRAM",
+ "GDS",
+ "GWS",
+ "OA"
+};
+
+#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
+
+
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+ return (struct amdgpu_device *)kgd;
+}
+
+static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
+ struct kgd_mem *mem)
+{
+ struct kfd_bo_va_list *entry;
+
+ list_for_each_entry(entry, &mem->bo_va_list, bo_list)
+ if (entry->bo_va->base.vm == avm)
+ return false;
+
+ return true;
+}
+
+/* Set memory usage limits. Current, limits are
+ * System (kernel) memory - 3/8th System RAM
+ */
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+ struct sysinfo si;
+ uint64_t mem;
+
+ si_meminfo(&si);
+ mem = si.totalram - si.totalhigh;
+ mem *= si.mem_unit;
+
+ spin_lock_init(&kfd_mem_limit.mem_limit_lock);
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM\n",
+ (kfd_mem_limit.max_system_mem_limit >> 20));
+}
+
+static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain)
+{
+ size_t acc_size;
+ int ret = 0;
+
+ acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
+ sizeof(struct amdgpu_bo));
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ if (kfd_mem_limit.system_mem_used + (acc_size + size) >
+ kfd_mem_limit.max_system_mem_limit) {
+ ret = -ENOMEM;
+ goto err_no_mem;
+ }
+ kfd_mem_limit.system_mem_used += (acc_size + size);
+ }
+err_no_mem:
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ return ret;
+}
+
+static void unreserve_system_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain)
+{
+ size_t acc_size;
+
+ acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
+ sizeof(struct amdgpu_bo));
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (domain == AMDGPU_GEM_DOMAIN_GTT)
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "kfd system memory accounting unbalanced");
+
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+}
+
+void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+{
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
+ kfd_mem_limit.system_mem_used -=
+ (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ }
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "kfd system memory accounting unbalanced");
+
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+}
+
+
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
+ * reservation object.
+ *
+ * @bo: [IN] Remove eviction fence(s) from this BO
+ * @ef: [IN] If ef is specified, then this eviction fence is removed if it
+ * is present in the shared list.
+ * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
+ * from BO's reservation object shared list.
+ * @ef_count: [OUT] Number of fences in ef_list.
+ *
+ * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
+ * called to restore the eviction fences and to avoid memory leak. This is
+ * useful for shared BOs.
+ * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
+ */
+static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
+ struct amdgpu_amdkfd_fence *ef,
+ struct amdgpu_amdkfd_fence ***ef_list,
+ unsigned int *ef_count)
+{
+ struct reservation_object_list *fobj;
+ struct reservation_object *resv;
+ unsigned int i = 0, j = 0, k = 0, shared_count;
+ unsigned int count = 0;
+ struct amdgpu_amdkfd_fence **fence_list;
+
+ if (!ef && !ef_list)
+ return -EINVAL;
+
+ if (ef_list) {
+ *ef_list = NULL;
+ *ef_count = 0;
+ }
+
+ resv = bo->tbo.resv;
+ fobj = reservation_object_get_list(resv);
+
+ if (!fobj)
+ return 0;
+
+ preempt_disable();
+ write_seqcount_begin(&resv->seq);
+
+ /* Go through all the shared fences in the resevation object. If
+ * ef is specified and it exists in the list, remove it and reduce the
+ * count. If ef is not specified, then get the count of eviction fences
+ * present.
+ */
+ shared_count = fobj->shared_count;
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(resv));
+
+ if (ef) {
+ if (f->context == ef->base.context) {
+ dma_fence_put(f);
+ fobj->shared_count--;
+ } else {
+ RCU_INIT_POINTER(fobj->shared[j++], f);
+ }
+ } else if (to_amdgpu_amdkfd_fence(f))
+ count++;
+ }
+ write_seqcount_end(&resv->seq);
+ preempt_enable();
+
+ if (ef || !count)
+ return 0;
+
+ /* Alloc memory for count number of eviction fence pointers. Fill the
+ * ef_list array and ef_count
+ */
+ fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
+ GFP_KERNEL);
+ if (!fence_list)
+ return -ENOMEM;
+
+ preempt_disable();
+ write_seqcount_begin(&resv->seq);
+
+ j = 0;
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *f;
+ struct amdgpu_amdkfd_fence *efence;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(resv));
+
+ efence = to_amdgpu_amdkfd_fence(f);
+ if (efence) {
+ fence_list[k++] = efence;
+ fobj->shared_count--;
+ } else {
+ RCU_INIT_POINTER(fobj->shared[j++], f);
+ }
+ }
+
+ write_seqcount_end(&resv->seq);
+ preempt_enable();
+
+ *ef_list = fence_list;
+ *ef_count = k;
+
+ return 0;
+}
+
+/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
+ * reservation object.
+ *
+ * @bo: [IN] Add eviction fences to this BO
+ * @ef_list: [IN] List of eviction fences to be added
+ * @ef_count: [IN] Number of fences in ef_list.
+ *
+ * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
+ * function.
+ */
+static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
+ struct amdgpu_amdkfd_fence **ef_list,
+ unsigned int ef_count)
+{
+ int i;
+
+ if (!ef_list || !ef_count)
+ return;
+
+ for (i = 0; i < ef_count; i++) {
+ amdgpu_bo_fence(bo, &ef_list[i]->base, true);
+ /* Re-adding the fence takes an additional reference. Drop that
+ * reference.
+ */
+ dma_fence_put(&ef_list[i]->base);
+ }
+
+ kfree(ef_list);
+}
+
+static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+ bool wait)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
+ "Called with userptr BO"))
+ return -EINVAL;
+
+ amdgpu_ttm_placement_from_domain(bo, domain);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto validate_fail;
+ if (wait) {
+ struct amdgpu_amdkfd_fence **ef_list;
+ unsigned int ef_count;
+
+ ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
+ &ef_count);
+ if (ret)
+ goto validate_fail;
+
+ ttm_bo_wait(&bo->tbo, false, false);
+ amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
+ }
+
+validate_fail:
+ return ret;
+}
+
+static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_parser *p = param;
+
+ return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
+}
+
+/* vm_validate_pt_pd_bos - Validate page table and directory BOs
+ *
+ * Page directories are not updated here because huge page handling
+ * during page table updates can invalidate page directory entries
+ * again. Page directories are only updated after updating page
+ * tables.
+ */
+static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+{
+ struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+ struct amdgpu_vm_parser param;
+ uint64_t addr, flags = AMDGPU_PTE_VALID;
+ int ret;
+
+ param.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ param.wait = false;
+
+ ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
+ &param);
+ if (ret) {
+ pr_err("amdgpu: failed to validate PT BOs\n");
+ return ret;
+ }
+
+ ret = amdgpu_amdkfd_validate(&param, pd);
+ if (ret) {
+ pr_err("amdgpu: failed to validate PD\n");
+ return ret;
+ }
+
+ addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
+ amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
+ vm->pd_phys_addr = addr;
+
+ if (vm->use_cpu_for_update) {
+ ret = amdgpu_bo_kmap(pd, NULL);
+ if (ret) {
+ pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_fence *f)
+{
+ int ret = amdgpu_sync_fence(adev, sync, f, false);
+
+ /* Sync objects can't handle multiple GPUs (contexts) updating
+ * sync->last_vm_update. Fortunately we don't need it for
+ * KFD's purposes, so we can just drop that fence.
+ */
+ if (sync->last_vm_update) {
+ dma_fence_put(sync->last_vm_update);
+ sync->last_vm_update = NULL;
+ }
+
+ return ret;
+}
+
+static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+{
+ struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+ int ret;
+
+ ret = amdgpu_vm_update_directories(adev, vm);
+ if (ret)
+ return ret;
+
+ return sync_vm_fence(adev, sync, vm->last_update);
+}
+
+/* add_bo_to_vm - Add a BO to a VM
+ *
+ * Everything that needs to bo done only once when a BO is first added
+ * to a VM. It can later be mapped and unmapped many times without
+ * repeating these steps.
+ *
+ * 1. Allocate and initialize BO VA entry data structure
+ * 2. Add BO to the VM
+ * 3. Determine ASIC-specific PTE flags
+ * 4. Alloc page tables and directories if needed
+ * 4a. Validate new page tables and directories
+ */
+static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
+ struct amdgpu_vm *vm, bool is_aql,
+ struct kfd_bo_va_list **p_bo_va_entry)
+{
+ int ret;
+ struct kfd_bo_va_list *bo_va_entry;
+ struct amdgpu_bo *pd = vm->root.base.bo;
+ struct amdgpu_bo *bo = mem->bo;
+ uint64_t va = mem->va;
+ struct list_head *list_bo_va = &mem->bo_va_list;
+ unsigned long bo_size = bo->tbo.mem.size;
+
+ if (!va) {
+ pr_err("Invalid VA when adding BO to VM\n");
+ return -EINVAL;
+ }
+
+ if (is_aql)
+ va += bo_size;
+
+ bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
+ if (!bo_va_entry)
+ return -ENOMEM;
+
+ pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
+ va + bo_size, vm);
+
+ /* Add BO to VM internal data structures*/
+ bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!bo_va_entry->bo_va) {
+ ret = -EINVAL;
+ pr_err("Failed to add BO object to VM. ret == %d\n",
+ ret);
+ goto err_vmadd;
+ }
+
+ bo_va_entry->va = va;
+ bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
+ mem->mapping_flags);
+ bo_va_entry->kgd_dev = (void *)adev;
+ list_add(&bo_va_entry->bo_list, list_bo_va);
+
+ if (p_bo_va_entry)
+ *p_bo_va_entry = bo_va_entry;
+
+ /* Allocate new page tables if needed and validate
+ * them. Clearing of new page tables and validate need to wait
+ * on move fences. We don't want that to trigger the eviction
+ * fence, so remove it temporarily.
+ */
+ amdgpu_amdkfd_remove_eviction_fence(pd,
+ vm->process_info->eviction_fence,
+ NULL, NULL);
+
+ ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
+ if (ret) {
+ pr_err("Failed to allocate pts, err=%d\n", ret);
+ goto err_alloc_pts;
+ }
+
+ ret = vm_validate_pt_pd_bos(vm);
+ if (ret) {
+ pr_err("validate_pt_pd_bos() failed\n");
+ goto err_alloc_pts;
+ }
+
+ /* Add the eviction fence back */
+ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
+
+ return 0;
+
+err_alloc_pts:
+ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
+ amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
+ list_del(&bo_va_entry->bo_list);
+err_vmadd:
+ kfree(bo_va_entry);
+ return ret;
+}
+
+static void remove_bo_from_vm(struct amdgpu_device *adev,
+ struct kfd_bo_va_list *entry, unsigned long size)
+{
+ pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
+ entry->va,
+ entry->va + size, entry);
+ amdgpu_vm_bo_rmv(adev, entry->bo_va);
+ list_del(&entry->bo_list);
+ kfree(entry);
+}
+
+static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
+ struct amdkfd_process_info *process_info)
+{
+ struct ttm_validate_buffer *entry = &mem->validate_list;
+ struct amdgpu_bo *bo = mem->bo;
+
+ INIT_LIST_HEAD(&entry->head);
+ entry->shared = true;
+ entry->bo = &bo->tbo;
+ mutex_lock(&process_info->lock);
+ list_add_tail(&entry->head, &process_info->kfd_bo_list);
+ mutex_unlock(&process_info->lock);
+}
+
+/* Reserving a BO and its page table BOs must happen atomically to
+ * avoid deadlocks. Some operations update multiple VMs at once. Track
+ * all the reservation info in a context structure. Optionally a sync
+ * object can track VM updates.
+ */
+struct bo_vm_reservation_context {
+ struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
+ unsigned int n_vms; /* Number of VMs reserved */
+ struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
+ struct ww_acquire_ctx ticket; /* Reservation ticket */
+ struct list_head list, duplicates; /* BO lists */
+ struct amdgpu_sync *sync; /* Pointer to sync object */
+ bool reserved; /* Whether BOs are reserved */
+};
+
+enum bo_vm_match {
+ BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
+ BO_VM_MAPPED, /* Match VMs where a BO is mapped */
+ BO_VM_ALL, /* Match all VMs a BO was added to */
+};
+
+/**
+ * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
+ * @mem: KFD BO structure.
+ * @vm: the VM to reserve.
+ * @ctx: the struct that will be used in unreserve_bo_and_vms().
+ */
+static int reserve_bo_and_vm(struct kgd_mem *mem,
+ struct amdgpu_vm *vm,
+ struct bo_vm_reservation_context *ctx)
+{
+ struct amdgpu_bo *bo = mem->bo;
+ int ret;
+
+ WARN_ON(!vm);
+
+ ctx->reserved = false;
+ ctx->n_vms = 1;
+ ctx->sync = &mem->sync;
+
+ INIT_LIST_HEAD(&ctx->list);
+ INIT_LIST_HEAD(&ctx->duplicates);
+
+ ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
+ if (!ctx->vm_pd)
+ return -ENOMEM;
+
+ ctx->kfd_bo.robj = bo;
+ ctx->kfd_bo.priority = 0;
+ ctx->kfd_bo.tv.bo = &bo->tbo;
+ ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.user_pages = NULL;
+ list_add(&ctx->kfd_bo.tv.head, &ctx->list);
+
+ amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
+
+ ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
+ false, &ctx->duplicates);
+ if (!ret)
+ ctx->reserved = true;
+ else {
+ pr_err("Failed to reserve buffers in ttm\n");
+ kfree(ctx->vm_pd);
+ ctx->vm_pd = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
+ * @mem: KFD BO structure.
+ * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
+ * is used. Otherwise, a single VM associated with the BO.
+ * @map_type: the mapping status that will be used to filter the VMs.
+ * @ctx: the struct that will be used in unreserve_bo_and_vms().
+ *
+ * Returns 0 for success, negative for failure.
+ */
+static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
+ struct amdgpu_vm *vm, enum bo_vm_match map_type,
+ struct bo_vm_reservation_context *ctx)
+{
+ struct amdgpu_bo *bo = mem->bo;
+ struct kfd_bo_va_list *entry;
+ unsigned int i;
+ int ret;
+
+ ctx->reserved = false;
+ ctx->n_vms = 0;
+ ctx->vm_pd = NULL;
+ ctx->sync = &mem->sync;
+
+ INIT_LIST_HEAD(&ctx->list);
+ INIT_LIST_HEAD(&ctx->duplicates);
+
+ list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ if ((vm && vm != entry->bo_va->base.vm) ||
+ (entry->is_mapped != map_type
+ && map_type != BO_VM_ALL))
+ continue;
+
+ ctx->n_vms++;
+ }
+
+ if (ctx->n_vms != 0) {
+ ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
+ GFP_KERNEL);
+ if (!ctx->vm_pd)
+ return -ENOMEM;
+ }
+
+ ctx->kfd_bo.robj = bo;
+ ctx->kfd_bo.priority = 0;
+ ctx->kfd_bo.tv.bo = &bo->tbo;
+ ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.user_pages = NULL;
+ list_add(&ctx->kfd_bo.tv.head, &ctx->list);
+
+ i = 0;
+ list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ if ((vm && vm != entry->bo_va->base.vm) ||
+ (entry->is_mapped != map_type
+ && map_type != BO_VM_ALL))
+ continue;
+
+ amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
+ &ctx->vm_pd[i]);
+ i++;
+ }
+
+ ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
+ false, &ctx->duplicates);
+ if (!ret)
+ ctx->reserved = true;
+ else
+ pr_err("Failed to reserve buffers in ttm.\n");
+
+ if (ret) {
+ kfree(ctx->vm_pd);
+ ctx->vm_pd = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
+ * @ctx: Reservation context to unreserve
+ * @wait: Optionally wait for a sync object representing pending VM updates
+ * @intr: Whether the wait is interruptible
+ *
+ * Also frees any resources allocated in
+ * reserve_bo_and_(cond_)vm(s). Returns the status from
+ * amdgpu_sync_wait.
+ */
+static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
+ bool wait, bool intr)
+{
+ int ret = 0;
+
+ if (wait)
+ ret = amdgpu_sync_wait(ctx->sync, intr);
+
+ if (ctx->reserved)
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
+ kfree(ctx->vm_pd);
+
+ ctx->sync = NULL;
+
+ ctx->reserved = false;
+ ctx->vm_pd = NULL;
+
+ return ret;
+}
+
+static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
+ struct kfd_bo_va_list *entry,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *pd = vm->root.base.bo;
+
+ /* Remove eviction fence from PD (and thereby from PTs too as
+ * they share the resv. object). Otherwise during PT update
+ * job (see amdgpu_vm_bo_update_mapping), eviction fence would
+ * get added to job->sync object and job execution would
+ * trigger the eviction fence.
+ */
+ amdgpu_amdkfd_remove_eviction_fence(pd,
+ vm->process_info->eviction_fence,
+ NULL, NULL);
+ amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+
+ amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+
+ /* Add the eviction fence back */
+ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
+
+ sync_vm_fence(adev, sync, bo_va->last_pt_update);
+
+ return 0;
+}
+
+static int update_gpuvm_pte(struct amdgpu_device *adev,
+ struct kfd_bo_va_list *entry,
+ struct amdgpu_sync *sync)
+{
+ int ret;
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+
+ bo_va = entry->bo_va;
+ vm = bo_va->base.vm;
+ bo = bo_va->base.bo;
+
+ /* Update the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret) {
+ pr_err("amdgpu_vm_bo_update failed\n");
+ return ret;
+ }
+
+ return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+}
+
+static int map_bo_to_gpuvm(struct amdgpu_device *adev,
+ struct kfd_bo_va_list *entry, struct amdgpu_sync *sync)
+{
+ int ret;
+
+ /* Set virtual address for the allocation */
+ ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
+ amdgpu_bo_size(entry->bo_va->base.bo),
+ entry->pte_flags);
+ if (ret) {
+ pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
+ entry->va, ret);
+ return ret;
+ }
+
+ ret = update_gpuvm_pte(adev, entry, sync);
+ if (ret) {
+ pr_err("update_gpuvm_pte() failed\n");
+ goto update_gpuvm_pte_failed;
+ }
+
+ return 0;
+
+update_gpuvm_pte_failed:
+ unmap_bo_from_gpuvm(adev, entry, sync);
+ return ret;
+}
+
+static int process_validate_vms(struct amdkfd_process_info *process_info)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = vm_validate_pt_pd_bos(peer_vm);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int process_update_pds(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = vm_update_pds(peer_vm, sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+ struct dma_fence **ef)
+{
+ struct amdkfd_process_info *info = NULL;
+ int ret;
+
+ if (!*process_info) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ INIT_LIST_HEAD(&info->vm_list_head);
+ INIT_LIST_HEAD(&info->kfd_bo_list);
+
+ info->eviction_fence =
+ amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
+ current->mm);
+ if (!info->eviction_fence) {
+ pr_err("Failed to create eviction fence\n");
+ ret = -ENOMEM;
+ goto create_evict_fence_fail;
+ }
+
+ *process_info = info;
+ *ef = dma_fence_get(&info->eviction_fence->base);
+ }
+
+ vm->process_info = *process_info;
+
+ /* Validate page directory and attach eviction fence */
+ ret = amdgpu_bo_reserve(vm->root.base.bo, true);
+ if (ret)
+ goto reserve_pd_fail;
+ ret = vm_validate_pt_pd_bos(vm);
+ if (ret) {
+ pr_err("validate_pt_pd_bos() failed\n");
+ goto validate_pd_fail;
+ }
+ ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
+ if (ret)
+ goto wait_pd_fail;
+ amdgpu_bo_fence(vm->root.base.bo,
+ &vm->process_info->eviction_fence->base, true);
+ amdgpu_bo_unreserve(vm->root.base.bo);
+
+ /* Update process info */
+ mutex_lock(&vm->process_info->lock);
+ list_add_tail(&vm->vm_list_node,
+ &(vm->process_info->vm_list_head));
+ vm->process_info->n_vms++;
+ mutex_unlock(&vm->process_info->lock);
+
+ return 0;
+
+wait_pd_fail:
+validate_pd_fail:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+reserve_pd_fail:
+ vm->process_info = NULL;
+ if (info) {
+ /* Two fence references: one in info and one in *ef */
+ dma_fence_put(&info->eviction_fence->base);
+ dma_fence_put(*ef);
+ *ef = NULL;
+ *process_info = NULL;
+create_evict_fence_fail:
+ mutex_destroy(&info->lock);
+ kfree(info);
+ }
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
+ void **process_info,
+ struct dma_fence **ef)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_vm *new_vm;
+ int ret;
+
+ new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
+ if (!new_vm)
+ return -ENOMEM;
+
+ /* Initialize AMDGPU part of the VM */
+ ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
+ if (ret) {
+ pr_err("Failed init vm ret %d\n", ret);
+ goto amdgpu_vm_init_fail;
+ }
+
+ /* Initialize KFD part of the VM and process info */
+ ret = init_kfd_vm(new_vm, process_info, ef);
+ if (ret)
+ goto init_kfd_vm_fail;
+
+ *vm = (void *) new_vm;
+
+ return 0;
+
+init_kfd_vm_fail:
+ amdgpu_vm_fini(adev, new_vm);
+amdgpu_vm_init_fail:
+ kfree(new_vm);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
+ struct file *filp,
+ void **vm, void **process_info,
+ struct dma_fence **ef)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct drm_file *drm_priv = filp->private_data;
+ struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
+ struct amdgpu_vm *avm = &drv_priv->vm;
+ int ret;
+
+ /* Already a compute VM? */
+ if (avm->process_info)
+ return -EINVAL;
+
+ /* Convert VM into a compute VM */
+ ret = amdgpu_vm_make_compute(adev, avm);
+ if (ret)
+ return ret;
+
+ /* Initialize KFD part of the VM and process info */
+ ret = init_kfd_vm(avm, process_info, ef);
+ if (ret)
+ return ret;
+
+ *vm = (void *)avm;
+
+ return 0;
+}
+
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ struct amdkfd_process_info *process_info = vm->process_info;
+ struct amdgpu_bo *pd = vm->root.base.bo;
+
+ if (!process_info)
+ return;
+
+ /* Release eviction fence from PD */
+ amdgpu_bo_reserve(pd, false);
+ amdgpu_bo_fence(pd, NULL, false);
+ amdgpu_bo_unreserve(pd);
+
+ /* Update process info */
+ mutex_lock(&process_info->lock);
+ process_info->n_vms--;
+ list_del(&vm->vm_list_node);
+ mutex_unlock(&process_info->lock);
+
+ /* Release per-process resources when last compute VM is destroyed */
+ if (!process_info->n_vms) {
+ WARN_ON(!list_empty(&process_info->kfd_bo_list));
+
+ dma_fence_put(&process_info->eviction_fence->base);
+ mutex_destroy(&process_info->lock);
+ kfree(process_info);
+ }
+}
+
+void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (WARN_ON(!kgd || !vm))
+ return;
+
+ pr_debug("Destroying process vm %p\n", vm);
+
+ /* Release the VM context */
+ amdgpu_vm_fini(adev, avm);
+ kfree(vm);
+}
+
+uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+{
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+}
+
+int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct kgd_dev *kgd, uint64_t va, uint64_t size,
+ void *vm, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_bo *bo;
+ int byte_align;
+ u32 alloc_domain;
+ u64 alloc_flags;
+ uint32_t mapping_flags;
+ int ret;
+
+ /*
+ * Check on which domain to allocate BO
+ */
+ if (flags & ALLOC_MEM_FLAGS_VRAM) {
+ alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+ alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_flags = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+
+ /* Workaround for AQL queue wraparound bug. Map the same
+ * memory twice. That means we only actually allocate half
+ * the memory.
+ */
+ if ((*mem)->aql_queue)
+ size = size >> 1;
+
+ /* Workaround for TLB bug on older VI chips */
+ byte_align = (adev->family == AMDGPU_FAMILY_VI &&
+ adev->asic_type != CHIP_FIJI &&
+ adev->asic_type != CHIP_POLARIS10 &&
+ adev->asic_type != CHIP_POLARIS11) ?
+ VI_BO_SIZE_ALIGN : 1;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+ if (flags & ALLOC_MEM_FLAGS_COHERENT)
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ (*mem)->mapping_flags = mapping_flags;
+
+ amdgpu_sync_create(&(*mem)->sync);
+
+ ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ if (ret) {
+ pr_debug("Insufficient system memory\n");
+ goto err_reserve_system_mem;
+ }
+
+ pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
+ va, size, domain_string(alloc_domain));
+
+ ret = amdgpu_bo_create(adev, size, byte_align,
+ alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
+ if (ret) {
+ pr_debug("Failed to create BO on domain %s. ret %d\n",
+ domain_string(alloc_domain), ret);
+ goto err_bo_create;
+ }
+ bo->kfd_bo = *mem;
+ (*mem)->bo = bo;
+
+ (*mem)->va = va;
+ (*mem)->domain = alloc_domain;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info);
+
+ if (offset)
+ *offset = amdgpu_bo_mmap_offset(bo);
+
+ return 0;
+
+err_bo_create:
+ unreserve_system_mem_limit(adev, size, alloc_domain);
+err_reserve_system_mem:
+ mutex_destroy(&(*mem)->lock);
+ kfree(*mem);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+ struct amdkfd_process_info *process_info = mem->process_info;
+ unsigned long bo_size = mem->bo->tbo.mem.size;
+ struct kfd_bo_va_list *entry, *tmp;
+ struct bo_vm_reservation_context ctx;
+ struct ttm_validate_buffer *bo_list_entry;
+ int ret;
+
+ mutex_lock(&mem->lock);
+
+ if (mem->mapped_to_gpu_memory > 0) {
+ pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+ mem->va, bo_size);
+ mutex_unlock(&mem->lock);
+ return -EBUSY;
+ }
+
+ mutex_unlock(&mem->lock);
+ /* lock is not needed after this, since mem is unused and will
+ * be freed anyway
+ */
+
+ /* Make sure restore workers don't access the BO any more */
+ bo_list_entry = &mem->validate_list;
+ mutex_lock(&process_info->lock);
+ list_del(&bo_list_entry->head);
+ mutex_unlock(&process_info->lock);
+
+ ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
+ if (unlikely(ret))
+ return ret;
+
+ /* The eviction fence should be removed by the last unmap.
+ * TODO: Log an error condition if the bo still has the eviction fence
+ * attached
+ */
+ amdgpu_amdkfd_remove_eviction_fence(mem->bo,
+ process_info->eviction_fence,
+ NULL, NULL);
+ pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue));
+
+ /* Remove from VM internal data structures */
+ list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
+ remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
+ entry, bo_size);
+
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
+ /* Free the sync object */
+ amdgpu_sync_free(&mem->sync);
+
+ /* Free the BO*/
+ amdgpu_bo_unref(&mem->bo);
+ mutex_destroy(&mem->lock);
+ kfree(mem);
+
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ int ret;
+ struct amdgpu_bo *bo;
+ uint32_t domain;
+ struct kfd_bo_va_list *entry;
+ struct bo_vm_reservation_context ctx;
+ struct kfd_bo_va_list *bo_va_entry = NULL;
+ struct kfd_bo_va_list *bo_va_entry_aql = NULL;
+ unsigned long bo_size;
+
+ /* Make sure restore is not running concurrently.
+ */
+ mutex_lock(&mem->process_info->lock);
+
+ mutex_lock(&mem->lock);
+
+ bo = mem->bo;
+
+ if (!bo) {
+ pr_err("Invalid BO when mapping memory to GPU\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ domain = mem->domain;
+ bo_size = bo->tbo.mem.size;
+
+ pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
+ mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue),
+ vm, domain_string(domain));
+
+ ret = reserve_bo_and_vm(mem, vm, &ctx);
+ if (unlikely(ret))
+ goto out;
+
+ if (check_if_add_bo_to_vm(avm, mem)) {
+ ret = add_bo_to_vm(adev, mem, avm, false,
+ &bo_va_entry);
+ if (ret)
+ goto add_bo_to_vm_failed;
+ if (mem->aql_queue) {
+ ret = add_bo_to_vm(adev, mem, avm,
+ true, &bo_va_entry_aql);
+ if (ret)
+ goto add_bo_to_vm_failed_aql;
+ }
+ } else {
+ ret = vm_validate_pt_pd_bos(avm);
+ if (unlikely(ret))
+ goto add_bo_to_vm_failed;
+ }
+
+ if (mem->mapped_to_gpu_memory == 0) {
+ /* Validate BO only once. The eviction fence gets added to BO
+ * the first time it is mapped. Validate will wait for all
+ * background evictions to complete.
+ */
+ ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
+ if (ret) {
+ pr_debug("Validate failed\n");
+ goto map_bo_to_gpuvm_failed;
+ }
+ }
+
+ list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
+ pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
+ entry->va, entry->va + bo_size,
+ entry);
+
+ ret = map_bo_to_gpuvm(adev, entry, ctx.sync);
+ if (ret) {
+ pr_err("Failed to map radeon bo to gpuvm\n");
+ goto map_bo_to_gpuvm_failed;
+ }
+
+ ret = vm_update_pds(vm, ctx.sync);
+ if (ret) {
+ pr_err("Failed to update page directories\n");
+ goto map_bo_to_gpuvm_failed;
+ }
+
+ entry->is_mapped = true;
+ mem->mapped_to_gpu_memory++;
+ pr_debug("\t INC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
+ }
+ }
+
+ if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
+ amdgpu_bo_fence(bo,
+ &avm->process_info->eviction_fence->base,
+ true);
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
+ goto out;
+
+map_bo_to_gpuvm_failed:
+ if (bo_va_entry_aql)
+ remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
+add_bo_to_vm_failed_aql:
+ if (bo_va_entry)
+ remove_bo_from_vm(adev, bo_va_entry, bo_size);
+add_bo_to_vm_failed:
+ unreserve_bo_and_vms(&ctx, false, false);
+out:
+ mutex_unlock(&mem->process_info->lock);
+ mutex_unlock(&mem->lock);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct amdkfd_process_info *process_info =
+ ((struct amdgpu_vm *)vm)->process_info;
+ unsigned long bo_size = mem->bo->tbo.mem.size;
+ struct kfd_bo_va_list *entry;
+ struct bo_vm_reservation_context ctx;
+ int ret;
+
+ mutex_lock(&mem->lock);
+
+ ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
+ if (unlikely(ret))
+ goto out;
+ /* If no VMs were reserved, it means the BO wasn't actually mapped */
+ if (ctx.n_vms == 0) {
+ ret = -EINVAL;
+ goto unreserve_out;
+ }
+
+ ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
+ if (unlikely(ret))
+ goto unreserve_out;
+
+ pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
+ mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue),
+ vm);
+
+ list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
+ if (entry->bo_va->base.vm == vm && entry->is_mapped) {
+ pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
+ entry->va,
+ entry->va + bo_size,
+ entry);
+
+ ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
+ if (ret == 0) {
+ entry->is_mapped = false;
+ } else {
+ pr_err("failed to unmap VA 0x%llx\n",
+ mem->va);
+ goto unreserve_out;
+ }
+
+ mem->mapped_to_gpu_memory--;
+ pr_debug("\t DEC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
+ }
+ }
+
+ /* If BO is unmapped from all VMs, unfence it. It can be evicted if
+ * required.
+ */
+ if (mem->mapped_to_gpu_memory == 0 &&
+ !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
+ amdgpu_amdkfd_remove_eviction_fence(mem->bo,
+ process_info->eviction_fence,
+ NULL, NULL);
+
+unreserve_out:
+ unreserve_bo_and_vms(&ctx, false, false);
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_sync_memory(
+ struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
+{
+ struct amdgpu_sync sync;
+ int ret;
+
+ amdgpu_sync_create(&sync);
+
+ mutex_lock(&mem->lock);
+ amdgpu_sync_clone(&mem->sync, &sync);
+ mutex_unlock(&mem->lock);
+
+ ret = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
+ struct kgd_mem *mem, void **kptr, uint64_t *size)
+{
+ int ret;
+ struct amdgpu_bo *bo = mem->bo;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ pr_err("userptr can't be mapped to kernel\n");
+ return -EINVAL;
+ }
+
+ /* delete kgd_mem from kfd_bo_list to avoid re-validating
+ * this BO in BO's restoring after eviction.
+ */
+ mutex_lock(&mem->process_info->lock);
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ pr_err("Failed to reserve bo. ret %d\n", ret);
+ goto bo_reserve_failed;
+ }
+
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (ret) {
+ pr_err("Failed to pin bo. ret %d\n", ret);
+ goto pin_failed;
+ }
+
+ ret = amdgpu_bo_kmap(bo, kptr);
+ if (ret) {
+ pr_err("Failed to map bo to kernel. ret %d\n", ret);
+ goto kmap_failed;
+ }
+
+ amdgpu_amdkfd_remove_eviction_fence(
+ bo, mem->process_info->eviction_fence, NULL, NULL);
+ list_del_init(&mem->validate_list.head);
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ amdgpu_bo_unreserve(bo);
+
+ mutex_unlock(&mem->process_info->lock);
+ return 0;
+
+kmap_failed:
+ amdgpu_bo_unpin(bo);
+pin_failed:
+ amdgpu_bo_unreserve(bo);
+bo_reserve_failed:
+ mutex_unlock(&mem->process_info->lock);
+
+ return ret;
+}
+
+/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
+ * KFD process identified by process_info
+ *
+ * @process_info: amdkfd_process_info of the KFD process
+ *
+ * After memory eviction, restore thread calls this function. The function
+ * should be called when the Process is still valid. BO restore involves -
+ *
+ * 1. Release old eviction fence and create new one
+ * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
+ * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
+ * BOs that need to be reserved.
+ * 4. Reserve all the BOs
+ * 5. Validate of PD and PT BOs.
+ * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
+ * 7. Add fence to all PD and PT BOs.
+ * 8. Unreserve all BOs
+ */
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+{
+ struct amdgpu_bo_list_entry *pd_bo_list;
+ struct amdkfd_process_info *process_info = info;
+ struct amdgpu_vm *peer_vm;
+ struct kgd_mem *mem;
+ struct bo_vm_reservation_context ctx;
+ struct amdgpu_amdkfd_fence *new_fence;
+ int ret = 0, i;
+ struct list_head duplicate_save;
+ struct amdgpu_sync sync_obj;
+
+ INIT_LIST_HEAD(&duplicate_save);
+ INIT_LIST_HEAD(&ctx.list);
+ INIT_LIST_HEAD(&ctx.duplicates);
+
+ pd_bo_list = kcalloc(process_info->n_vms,
+ sizeof(struct amdgpu_bo_list_entry),
+ GFP_KERNEL);
+ if (!pd_bo_list)
+ return -ENOMEM;
+
+ i = 0;
+ mutex_lock(&process_info->lock);
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node)
+ amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
+
+ /* Reserve all BOs and page tables/directory. Add all BOs from
+ * kfd_bo_list to ctx.list
+ */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list.head) {
+
+ list_add_tail(&mem->resv_list.head, &ctx.list);
+ mem->resv_list.bo = mem->validate_list.bo;
+ mem->resv_list.shared = mem->validate_list.shared;
+ }
+
+ ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
+ false, &duplicate_save);
+ if (ret) {
+ pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
+ goto ttm_reserve_fail;
+ }
+
+ amdgpu_sync_create(&sync_obj);
+
+ /* Validate PDs and PTs */
+ ret = process_validate_vms(process_info);
+ if (ret)
+ goto validate_map_fail;
+
+ /* Wait for PD/PTs validate to finish */
+ /* FIXME: I think this isn't needed */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *bo = peer_vm->root.base.bo;
+
+ ttm_bo_wait(&bo->tbo, false, false);
+ }
+
+ /* Validate BOs and map them to GPUVM (update VM page tables). */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list.head) {
+
+ struct amdgpu_bo *bo = mem->bo;
+ uint32_t domain = mem->domain;
+ struct kfd_bo_va_list *bo_va_entry;
+
+ ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
+ if (ret) {
+ pr_debug("Memory eviction: Validate BOs failed. Try again\n");
+ goto validate_map_fail;
+ }
+
+ list_for_each_entry(bo_va_entry, &mem->bo_va_list,
+ bo_list) {
+ ret = update_gpuvm_pte((struct amdgpu_device *)
+ bo_va_entry->kgd_dev,
+ bo_va_entry,
+ &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: update PTE failed. Try again\n");
+ goto validate_map_fail;
+ }
+ }
+ }
+
+ /* Update page directories */
+ ret = process_update_pds(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: update PDs failed. Try again\n");
+ goto validate_map_fail;
+ }
+
+ amdgpu_sync_wait(&sync_obj, false);
+
+ /* Release old eviction fence and create new one, because fence only
+ * goes from unsignaled to signaled, fence cannot be reused.
+ * Use context and mm from the old fence.
+ */
+ new_fence = amdgpu_amdkfd_fence_create(
+ process_info->eviction_fence->base.context,
+ process_info->eviction_fence->mm);
+ if (!new_fence) {
+ pr_err("Failed to create eviction fence\n");
+ ret = -ENOMEM;
+ goto validate_map_fail;
+ }
+ dma_fence_put(&process_info->eviction_fence->base);
+ process_info->eviction_fence = new_fence;
+ *ef = dma_fence_get(&new_fence->base);
+
+ /* Wait for validate to finish and attach new eviction fence */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list.head)
+ ttm_bo_wait(&mem->bo->tbo, false, false);
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list.head)
+ amdgpu_bo_fence(mem->bo,
+ &process_info->eviction_fence->base, true);
+
+ /* Attach eviction fence to PD / PT BOs */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *bo = peer_vm->root.base.bo;
+
+ amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
+ }
+
+validate_map_fail:
+ ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
+ amdgpu_sync_free(&sync_obj);
+ttm_reserve_fail:
+ mutex_unlock(&process_info->lock);
+ kfree(pd_bo_list);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f450b69..bf872f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_i2c.h"
#include "atom.h"
@@ -690,12 +691,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
/* set a reasonable default for DP */
if (adev->clock.default_dispclk < 53900) {
- DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
- adev->clock.default_dispclk / 100);
+ DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n",
+ adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
} else if (adev->clock.default_dispclk <= 60000) {
- DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
- adev->clock.default_dispclk / 100);
+ DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
@@ -1699,7 +1700,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
@@ -1721,28 +1722,6 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
@@ -1798,7 +1777,7 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
#endif
}
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
+static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
@@ -1841,3 +1820,234 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg);
+ return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32_IO(reg, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg);
+ return r;
+}
+
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context) {
+ kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
+ kfree(adev->mode_info.atom_context);
+ adev->mode_info.atom_context = NULL;
+ kfree(adev->mode_info.atom_card_info);
+ adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ adev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = adev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (adev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+ if (!adev->mode_info.atom_context) {
+ amdgpu_atombios_fini(adev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ } else {
+ amdgpu_atombios_scratch_regs_init(adev);
+ amdgpu_atombios_allocate_fb_scratch(adev);
+ }
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index b0d5d1d..fd8f180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -195,9 +195,6 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
@@ -219,6 +216,7 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
+void amdgpu_atombios_fini(struct amdgpu_device *adev);
+int amdgpu_atombios_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ff8efd0..a0f48cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -114,6 +114,9 @@ union igp_info {
struct atom_integrated_system_info_v1_11 v11;
};
+union umc_info {
+ struct atom_umc_info_v3_1 v31;
+};
/*
* Return vram width from integrated system info table, if available,
* or 0 if not.
@@ -143,6 +146,94 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
return 0;
}
+static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
+ int atom_mem_type)
+{
+ int vram_type;
+
+ if (adev->flags & AMD_IS_APU) {
+ switch (atom_mem_type) {
+ case Ddr2MemType:
+ case LpDdr2MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR2;
+ break;
+ case Ddr3MemType:
+ case LpDdr3MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR3;
+ break;
+ case Ddr4MemType:
+ case LpDdr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (atom_mem_type) {
+ case ATOM_DGPU_VRAM_TYPE_GDDR5:
+ vram_type = AMDGPU_VRAM_TYPE_GDDR5;
+ break;
+ case ATOM_DGPU_VRAM_TYPE_HBM:
+ vram_type = AMDGPU_VRAM_TYPE_HBM;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ }
+
+ return vram_type;
+}
+/*
+ * Return vram type from either integrated system info table
+ * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
+ */
+int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ u8 mem_type;
+
+ if (adev->flags & AMD_IS_APU)
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+ else
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ if (adev->flags & AMD_IS_APU) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 11:
+ mem_type = igp_info->v11.memorytype;
+ return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ default:
+ return 0;
+ }
+ } else {
+ umc_info = (union umc_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 1:
+ mem_type = umc_info->v31.vram_type;
+ return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ default:
+ return 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
union firmware_info {
struct atom_firmware_info_v3_1 v31;
};
@@ -151,10 +242,6 @@ union smu_info {
struct atom_smu_info_v3_1 v31;
};
-union umc_info {
- struct atom_umc_info_v3_1 v31;
-};
-
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 288b97e..7689c96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -28,6 +28,7 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51a..1ae5ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -14,6 +14,16 @@
#include "amd_acpi.h"
+#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
+
+struct amdgpu_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
@@ -35,6 +45,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
bool bridge_pm_usable;
+ unsigned int quirks;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
- /*
- * Disable legacy PM methods only when pcie port PM is usable,
- * otherwise the device might fail to power off or power on.
- */
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
+ printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+ atpx->functions.power_cntl = true;
+ atpx->is_hybrid = false;
+ } else {
+ printk("ATPX Hybrid Graphics\n");
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
}
atpx->dgpu_req_power_for_displays = false;
@@ -547,6 +564,32 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.get_client_id = amdgpu_atpx_get_client_id,
};
+static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+};
+
+static void amdgpu_atpx_get_quirks(struct pci_dev *pdev)
+{
+ const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device) {
+ amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+}
+
/**
* amdgpu_atpx_detect - detect whether we have PX
*
@@ -570,6 +613,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
@@ -579,6 +623,7 @@ static bool amdgpu_atpx_detect(void)
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
if (has_atpx && vga_count == 2) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 63ec1e1..02b849b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, 0, &sobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
+ ttm_bo_type_kernel, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, 0, &dobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
+ ttm_bo_type_kernel, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 057e1ec..a5df80d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -93,7 +93,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (amdgpu_need_post(adev))
+ if (amdgpu_device_need_post(adev))
return false;
adev->bios = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 59089e0..92be7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
for (i = 0; i < list->num_entries; i++) {
unsigned priority = list->array[i].priority;
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!list->array[i].robj->parent)
+ list_add_tail(&list->array[i].tv.head,
+ &bucket[priority]);
+
list->array[i].user_pages = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f2b72c7..71a57b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -24,12 +24,10 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/acpi.h>
#include <drm/drmP.h>
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
-#include "cgs_linux.h"
#include "atom.h"
#include "amdgpu_ucode.h"
@@ -42,152 +40,6 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
-static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
- int (*call_back_func)(struct amd_pp_init *, void **))
-{
- CGS_FUNC_ADEV;
- struct amd_pp_init pp_init;
- struct amd_powerplay *amd_pp;
-
- if (call_back_func == NULL)
- return NULL;
-
- amd_pp = &(adev->powerplay);
- pp_init.chip_family = adev->family;
- pp_init.chip_id = adev->asic_type;
- pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- pp_init.feature_mask = amdgpu_pp_feature_mask;
- pp_init.device = cgs_device;
- if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
- return NULL;
-
- return adev->powerplay.pp_handle;
-}
-
-static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
- enum cgs_gpu_mem_type type,
- uint64_t size, uint64_t align,
- cgs_handle_t *handle)
-{
- CGS_FUNC_ADEV;
- uint16_t flags = 0;
- int ret = 0;
- uint32_t domain = 0;
- struct amdgpu_bo *obj;
-
- /* fail if the alignment is not a power of 2 */
- if (((align != 1) && (align & (align - 1)))
- || size == 0 || align == 0)
- return -EINVAL;
-
-
- switch(type) {
- case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- domain = AMDGPU_GEM_DOMAIN_VRAM;
- break;
- case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- domain = AMDGPU_GEM_DOMAIN_VRAM;
- break;
- case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
- domain = AMDGPU_GEM_DOMAIN_GTT;
- break;
- case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
- flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- domain = AMDGPU_GEM_DOMAIN_GTT;
- break;
- default:
- return -EINVAL;
- }
-
-
- *handle = 0;
-
- ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
- NULL, NULL, 0, &obj);
- if (ret) {
- DRM_ERROR("(%d) bo create failed\n", ret);
- return ret;
- }
- *handle = (cgs_handle_t)obj;
-
- return ret;
-}
-
-static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
-
- if (obj) {
- int r = amdgpu_bo_reserve(obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(obj);
- amdgpu_bo_unpin(obj);
- amdgpu_bo_unreserve(obj);
- }
- amdgpu_bo_unref(&obj);
-
- }
- return 0;
-}
-
-static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
- uint64_t *mcaddr)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
-
- WARN_ON_ONCE(obj->placement.num_placement > 1);
-
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_unpin(obj);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
- void **map)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_kmap(obj, map);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_kunmap(obj);
- amdgpu_bo_unreserve(obj);
- return r;
-}
static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
{
@@ -329,109 +181,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne
adev->mode_info.atom_context, table, args);
}
-struct cgs_irq_params {
- unsigned src_id;
- cgs_irq_source_set_func_t set;
- cgs_irq_handler_func_t handler;
- void *private_data;
-};
-
-static int cgs_set_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- struct cgs_irq_params *irq_params =
- (struct cgs_irq_params *)src->data;
- if (!irq_params)
- return -EINVAL;
- if (!irq_params->set)
- return -EINVAL;
- return irq_params->set(irq_params->private_data,
- irq_params->src_id,
- type,
- (int)state);
-}
-
-static int cgs_process_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct cgs_irq_params *irq_params =
- (struct cgs_irq_params *)source->data;
- if (!irq_params)
- return -EINVAL;
- if (!irq_params->handler)
- return -EINVAL;
- return irq_params->handler(irq_params->private_data,
- irq_params->src_id,
- entry->iv_entry);
-}
-
-static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
- .set = cgs_set_irq_state,
- .process = cgs_process_irq,
-};
-
-static int amdgpu_cgs_add_irq_source(void *cgs_device,
- unsigned client_id,
- unsigned src_id,
- unsigned num_types,
- cgs_irq_source_set_func_t set,
- cgs_irq_handler_func_t handler,
- void *private_data)
-{
- CGS_FUNC_ADEV;
- int ret = 0;
- struct cgs_irq_params *irq_params;
- struct amdgpu_irq_src *source =
- kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
- if (!source)
- return -ENOMEM;
- irq_params =
- kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
- if (!irq_params) {
- kfree(source);
- return -ENOMEM;
- }
- source->num_types = num_types;
- source->funcs = &cgs_irq_funcs;
- irq_params->src_id = src_id;
- irq_params->set = set;
- irq_params->handler = handler;
- irq_params->private_data = private_data;
- source->data = (void *)irq_params;
- ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
- if (ret) {
- kfree(irq_params);
- kfree(source);
- }
-
- return ret;
-}
-
-static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
- unsigned src_id, unsigned type)
-{
- CGS_FUNC_ADEV;
-
- if (!adev->irq.client[client_id].sources)
- return -EINVAL;
-
- return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
-}
-
-static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
- unsigned src_id, unsigned type)
-{
- CGS_FUNC_ADEV;
-
- if (!adev->irq.client[client_id].sources)
- return -EINVAL;
-
- return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
-}
-
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
@@ -801,6 +550,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else
strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
+ case CHIP_VEGA12:
+ strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
@@ -852,61 +604,6 @@ static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
return amdgpu_sriov_vf(adev);
}
-static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info)
-{
- CGS_FUNC_ADEV;
-
- if (NULL == sys_info)
- return -ENODEV;
-
- if (sizeof(struct cgs_system_info) != sys_info->size)
- return -ENODEV;
-
- switch (sys_info->info_id) {
- case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
- sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
- break;
- case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
- sys_info->value = adev->pm.pcie_gen_mask;
- break;
- case CGS_SYSTEM_INFO_PCIE_MLW:
- sys_info->value = adev->pm.pcie_mlw_mask;
- break;
- case CGS_SYSTEM_INFO_PCIE_DEV:
- sys_info->value = adev->pdev->device;
- break;
- case CGS_SYSTEM_INFO_PCIE_REV:
- sys_info->value = adev->pdev->revision;
- break;
- case CGS_SYSTEM_INFO_CG_FLAGS:
- sys_info->value = adev->cg_flags;
- break;
- case CGS_SYSTEM_INFO_PG_FLAGS:
- sys_info->value = adev->pg_flags;
- break;
- case CGS_SYSTEM_INFO_GFX_CU_INFO:
- sys_info->value = adev->gfx.cu_info.number;
- break;
- case CGS_SYSTEM_INFO_GFX_SE_INFO:
- sys_info->value = adev->gfx.config.max_shader_engines;
- break;
- case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
- sys_info->value = adev->pdev->subsystem_device;
- break;
- case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
- sys_info->value = adev->pdev->subsystem_vendor;
- break;
- case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
- sys_info->value = adev->pdev->devfn;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
@@ -917,12 +614,9 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
return -EINVAL;
mode_info = info->mode_info;
- if (mode_info) {
+ if (mode_info)
/* if the displays are off, vblank time is max */
mode_info->vblank_time_us = 0xffffffff;
- /* always set the reference clock */
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- }
if (!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_crtc *amdgpu_crtc;
@@ -948,7 +642,11 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
(amdgpu_crtc->v_border * 2);
mode_info->vblank_time_us = vblank_lines * line_time_us;
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
+ /* we have issues with mclk switching with refresh rates
+ * over 120 hz on the non-DC code.
+ */
+ if (mode_info->refresh_rate > 120)
+ mode_info->vblank_time_us = 0;
mode_info = NULL;
}
}
@@ -958,7 +656,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
if (mode_info != NULL) {
mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
- mode_info->ref_clock = adev->clock.spll.reference_freq;
}
}
return 0;
@@ -974,223 +671,7 @@ static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool ena
return 0;
}
-/** \brief evaluate acpi namespace object, handle or pathname must be valid
- * \param cgs_device
- * \param info input/output arguments for the control method
- * \return status
- */
-
-#if defined(CONFIG_ACPI)
-static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
- struct cgs_acpi_method_info *info)
-{
- CGS_FUNC_ADEV;
- acpi_handle handle;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *params, *obj;
- uint8_t name[5] = {'\0'};
- struct cgs_acpi_method_argument *argument;
- uint32_t i, count;
- acpi_status status;
- int result;
-
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
- return -ENODEV;
-
- memset(&input, 0, sizeof(struct acpi_object_list));
-
- /* validate input info */
- if (info->size != sizeof(struct cgs_acpi_method_info))
- return -EINVAL;
-
- input.count = info->input_count;
- if (info->input_count > 0) {
- if (info->pinput_argument == NULL)
- return -EINVAL;
- argument = info->pinput_argument;
- for (i = 0; i < info->input_count; i++) {
- if (((argument->type == ACPI_TYPE_STRING) ||
- (argument->type == ACPI_TYPE_BUFFER)) &&
- (argument->pointer == NULL))
- return -EINVAL;
- argument++;
- }
- }
-
- if (info->output_count > 0) {
- if (info->poutput_argument == NULL)
- return -EINVAL;
- argument = info->poutput_argument;
- for (i = 0; i < info->output_count; i++) {
- if (((argument->type == ACPI_TYPE_STRING) ||
- (argument->type == ACPI_TYPE_BUFFER))
- && (argument->pointer == NULL))
- return -EINVAL;
- argument++;
- }
- }
-
- /* The path name passed to acpi_evaluate_object should be null terminated */
- if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
- strncpy(name, (char *)&(info->name), sizeof(uint32_t));
- name[4] = '\0';
- }
-
- /* parse input parameters */
- if (input.count > 0) {
- input.pointer = params =
- kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
- if (params == NULL)
- return -EINVAL;
-
- argument = info->pinput_argument;
-
- for (i = 0; i < input.count; i++) {
- params->type = argument->type;
- switch (params->type) {
- case ACPI_TYPE_INTEGER:
- params->integer.value = argument->value;
- break;
- case ACPI_TYPE_STRING:
- params->string.length = argument->data_length;
- params->string.pointer = argument->pointer;
- break;
- case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->data_length;
- params->buffer.pointer = argument->pointer;
- break;
- default:
- break;
- }
- params++;
- argument++;
- }
- }
-
- /* parse output info */
- count = info->output_count;
- argument = info->poutput_argument;
-
- /* evaluate the acpi method */
- status = acpi_evaluate_object(handle, name, &input, &output);
-
- if (ACPI_FAILURE(status)) {
- result = -EIO;
- goto free_input;
- }
-
- /* return the output info */
- obj = output.pointer;
-
- if (count > 1) {
- if ((obj->type != ACPI_TYPE_PACKAGE) ||
- (obj->package.count != count)) {
- result = -EIO;
- goto free_obj;
- }
- params = obj->package.elements;
- } else
- params = obj;
-
- if (params == NULL) {
- result = -EIO;
- goto free_obj;
- }
-
- for (i = 0; i < count; i++) {
- if (argument->type != params->type) {
- result = -EIO;
- goto free_obj;
- }
- switch (params->type) {
- case ACPI_TYPE_INTEGER:
- argument->value = params->integer.value;
- break;
- case ACPI_TYPE_STRING:
- if ((params->string.length != argument->data_length) ||
- (params->string.pointer == NULL)) {
- result = -EIO;
- goto free_obj;
- }
- strncpy(argument->pointer,
- params->string.pointer,
- params->string.length);
- break;
- case ACPI_TYPE_BUFFER:
- if (params->buffer.pointer == NULL) {
- result = -EIO;
- goto free_obj;
- }
- memcpy(argument->pointer,
- params->buffer.pointer,
- argument->data_length);
- break;
- default:
- break;
- }
- argument++;
- params++;
- }
-
- result = 0;
-free_obj:
- kfree(obj);
-free_input:
- kfree((void *)input.pointer);
- return result;
-}
-#else
-static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
- struct cgs_acpi_method_info *info)
-{
- return -EIO;
-}
-#endif
-
-static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
- uint32_t acpi_method,
- uint32_t acpi_function,
- void *pinput, void *poutput,
- uint32_t output_count,
- uint32_t input_size,
- uint32_t output_size)
-{
- struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
- struct cgs_acpi_method_argument acpi_output = {0};
- struct cgs_acpi_method_info info = {0};
-
- acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].data_length = sizeof(uint32_t);
- acpi_input[0].value = acpi_function;
-
- acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].data_length = input_size;
- acpi_input[1].pointer = pinput;
-
- acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.data_length = output_size;
- acpi_output.pointer = poutput;
-
- info.size = sizeof(struct cgs_acpi_method_info);
- info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
- info.input_count = 2;
- info.name = acpi_method;
- info.pinput_argument = acpi_input;
- info.output_count = output_count;
- info.poutput_argument = &acpi_output;
-
- return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
-}
-
static const struct cgs_ops amdgpu_cgs_ops = {
- .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
- .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
- .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
- .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
- .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
- .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
.read_register = amdgpu_cgs_read_register,
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
@@ -1205,18 +686,9 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
- .call_acpi_method = amdgpu_cgs_call_acpi_method,
- .query_system_info = amdgpu_cgs_query_system_info,
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
- .register_pp_handle = amdgpu_cgs_register_pp_handle,
-};
-
-static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- .add_irq_source = amdgpu_cgs_add_irq_source,
- .irq_get = amdgpu_cgs_irq_get,
- .irq_put = amdgpu_cgs_irq_put
};
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
@@ -1230,7 +702,6 @@ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
}
cgs_device->base.ops = &amdgpu_cgs_ops;
- cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
return (struct cgs_device *)cgs_device;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index df9cbc7..96501ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
+ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -358,7 +351,6 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
if (amdgpu_connector->edid) {
drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
- drm_edid_to_eld(connector, amdgpu_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(connector, NULL);
@@ -737,9 +729,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -758,8 +752,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -869,16 +867,18 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
if (amdgpu_connector->ddc_bus)
- dret = amdgpu_ddc_probe(amdgpu_connector, false);
+ dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
@@ -925,8 +925,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -989,9 +991,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -999,7 +1003,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
if (amdgpu_connector->ddc_bus)
- dret = amdgpu_ddc_probe(amdgpu_connector, false);
+ dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
@@ -1116,8 +1120,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1360,9 +1366,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1402,7 +1410,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
/* setup ddc on the bridge */
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
/* bridge chips are always aux */
- if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */
+ /* try DDC */
+ if (amdgpu_display_ddc_probe(amdgpu_connector, true))
ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1422,7 +1431,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
- if (amdgpu_ddc_probe(amdgpu_connector, false))
+ if (amdgpu_display_ddc_probe(amdgpu_connector,
+ false))
ret = connector_status_connected;
}
}
@@ -1430,8 +1440,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 57abf7a..dc34b50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -90,6 +90,12 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ /* skip guilty context job */
+ if (atomic_read(&p->ctx->guilty) == 1) {
+ ret = -ECANCELED;
+ goto free_chunk;
+ }
+
mutex_lock(&p->ctx->lock);
/* get chunks */
@@ -251,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return;
}
- total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+ total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
@@ -296,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- u64 total_vis_vram = adev->mc.visible_vram_size;
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+ u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -337,7 +343,12 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved, bytes_moved;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .resv = bo->tbo.resv,
+ .flags = 0
+ };
uint32_t domain;
int r;
@@ -348,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero.
*/
if (p->bytes_moved < p->bytes_moved_threshold) {
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do
@@ -367,15 +378,13 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
- p->bytes_moved += bytes_moved;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ p->bytes_moved += ctx.bytes_moved;
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
- p->bytes_moved_vis += bytes_moved;
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -390,6 +399,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo *validated)
{
uint32_t domain = validated->allowed_domains;
+ struct ttm_operation_ctx ctx = { true, false };
int r;
if (!p->evictable)
@@ -427,11 +437,11 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
update_bytes_moved_vis =
- adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
p->bytes_moved += bytes_moved;
@@ -470,6 +480,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_bo_list_entry *lobj;
int r;
@@ -487,8 +498,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
- false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
@@ -532,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
- if (p->uf_entry.robj)
+ if (p->uf_entry.robj && !p->uf_entry.robj->parent)
list_add(&p->uf_entry.tv.head, &p->validated);
while (1) {
@@ -678,7 +688,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (!r && p->uf_entry.robj) {
struct amdgpu_bo *uf = p->uf_entry.robj;
- r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
}
@@ -768,10 +778,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
struct amdgpu_bo *bo;
int i, r;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -781,7 +787,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync,
- fpriv->prt_va->last_pt_update);
+ fpriv->prt_va->last_pt_update, false);
if (r)
return r;
@@ -795,7 +801,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r)
return r;
}
@@ -818,7 +824,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r)
return r;
}
@@ -829,7 +835,11 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ r = amdgpu_vm_update_directories(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
if (r)
return r;
@@ -865,8 +875,8 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
struct amdgpu_cs_chunk *chunk;
+ uint64_t offset, va_start;
struct amdgpu_ib *ib;
- uint64_t offset;
uint8_t *kptr;
chunk = &p->chunks[i];
@@ -876,14 +886,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
- r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
- &aobj, &m);
+ va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
return r;
}
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+ if ((va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
@@ -896,7 +906,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
+ kptr += va_start - offset;
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
@@ -1033,8 +1043,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(ctx);
return r;
} else if (fence) {
- r = amdgpu_sync_fence(p->adev, &p->job->sync,
- fence);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
+ true);
dma_fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
@@ -1053,7 +1063,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
return r;
@@ -1145,7 +1155,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
@@ -1168,7 +1178,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1194,11 +1204,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring,
- amd_sched_get_job_priority(&job->base));
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base);
+ drm_sched_entity_push_job(&job->base, entity);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
@@ -1570,6 +1579,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct amdgpu_bo_va_mapping **map)
{
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
int r;
@@ -1590,11 +1600,10 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
- false);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
- return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
+ return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c184468..09d35051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,10 +28,10 @@
#include "amdgpu_sched.h"
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
- if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
- if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
@@ -75,22 +75,23 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs);
+ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs, &ctx->guilty);
if (r)
goto failed;
}
@@ -103,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
+ drm_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -125,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
+ drm_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
@@ -136,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -216,11 +217,45 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
- if (ctx->reset_counter == reset_counter)
+ if (ctx->reset_counter_query == reset_counter)
out->state.reset_status = AMDGPU_CTX_NO_RESET;
else
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
- ctx->reset_counter = reset_counter;
+ ctx->reset_counter_query = reset_counter;
+
+ mutex_unlock(&mgr->lock);
+ return 0;
+}
+
+static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ union drm_amdgpu_ctx_out *out)
+{
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (!ctx) {
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ out->state.flags = 0x0;
+ out->state.hangs = 0x0;
+
+ if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
+
+ if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
+
+ if (atomic_read(&ctx->guilty))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
mutex_unlock(&mgr->lock);
return 0;
@@ -231,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -243,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == AMD_SCHED_PRIORITY_INVALID)
- priority = AMD_SCHED_PRIORITY_NORMAL;
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
+ priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -257,6 +292,9 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
+ case AMDGPU_CTX_OP_QUERY_STATE2:
+ r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ break;
default:
return -EINVAL;
}
@@ -347,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
- struct amd_sched_rq *rq;
- struct amd_sched_entity *entity;
+ struct drm_sched_rq *rq;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
- enum amd_sched_priority ctx_prio;
+ enum drm_sched_priority ctx_prio;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
@@ -369,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
- amd_sched_entity_set_rq(entity, rq);
+ drm_sched_entity_set_rq(entity, rq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
new file mode 100644
index 0000000..369beb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -0,0 +1,803 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <drm/drmP.h>
+#include <linux/debugfs.h>
+#include "amdgpu.h"
+
+/*
+ * Debugfs
+ */
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->debugfs_count; i++) {
+ if (adev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = adev->debugfs_count + 1;
+ if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ adev->debugfs[adev->debugfs_count].files = files;
+ adev->debugfs[adev->debugfs_count].num_files = nfiles;
+ adev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ adev->ddev->primary->debugfs_root,
+ adev->ddev->primary);
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ goto end;
+
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto end;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_PCIE(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_PCIE(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_SMC(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 3;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int idx, x, outsize, r, valuesize;
+ uint32_t values[16];
+
+ if (size & 3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (amdgpu_dpm == 0)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ valuesize = sizeof(values);
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ else
+ return -EINVAL;
+
+ if (size > valuesize)
+ return -EINVAL;
+
+ outsize = 0;
+ x = 0;
+ if (!r) {
+ while (size) {
+ r = put_user(values[x++], (int32_t *)buf);
+ buf += 4;
+ size -= 4;
+ outsize += 4;
+ }
+ }
+
+ return !r ? outsize : r;
+}
+
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_read,
+ .write = amdgpu_debugfs_regs_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+ "amdgpu_gca_config",
+ "amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | S_IRUGO, root,
+ adev, debugfs_regs[i]);
+ if (IS_ERR(ent)) {
+ for (j = 0; j < i; j++) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ return PTR_ERR(ent);
+ }
+
+ if (!i)
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ adev->debugfs_regs[i] = ent;
+ }
+
+ return 0;
+}
+
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ if (adev->debugfs_regs[i]) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ }
+}
+
+static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0, i;
+
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_park(ring->sched.thread);
+ }
+
+ seq_printf(m, "run ib test:\n");
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ seq_printf(m, "ib ring tests failed (%d).\n", r);
+ else
+ seq_printf(m, "ib ring tests passed.\n");
+
+ /* go on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_unpark(ring->sched.thread);
+ }
+
+ return 0;
+}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_debugfs_list[] = {
+ {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
+ {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
+ {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
+ {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
+};
+
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
+ ARRAY_SIZE(amdgpu_debugfs_list));
+}
+
+#else
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
new file mode 100644
index 0000000..8260d80
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Debugfs
+ */
+struct amdgpu_debugfs {
+ const struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+int amdgpu_debugfs_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles);
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3573ecd..34af664 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -60,15 +59,11 @@
#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
-
static const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -89,10 +84,21 @@ static const char *amdgpu_asic_name[] = {
"POLARIS11",
"POLARIS12",
"VEGA10",
+ "VEGA12",
"RAVEN",
"LAST",
};
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with HG/PX power control,
+ * otherwise return false.
+ */
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -105,6 +111,15 @@ bool amdgpu_device_is_px(struct drm_device *dev)
/*
* MMIO register access helper functions.
*/
+/**
+ * amdgpu_mm_rreg - read a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @acc_flags: access flags which require special behavior
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags)
{
@@ -127,6 +142,58 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
return ret;
}
+/*
+ * MMIO register read with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ *
+*/
+
+/**
+ * amdgpu_mm_rreg8 - read a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: byte aligned register offset
+ *
+ * Returns the 8 bit value from the offset specified.
+ */
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+ if (offset < adev->rmmio_size)
+ return (readb(adev->rmmio + offset));
+ BUG();
+}
+
+/*
+ * MMIO register write with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ * @value: the value want to be written to the register
+ *
+*/
+/**
+ * amdgpu_mm_wreg8 - read a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: byte aligned register offset
+ * @value: 8 bit value to write
+ *
+ * Writes the value specified to the offset specified.
+ */
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+ if (offset < adev->rmmio_size)
+ writeb(value, adev->rmmio + offset);
+ else
+ BUG();
+}
+
+/**
+ * amdgpu_mm_wreg - write to a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ *
+ * Writes the value specified to the offset specified.
+ */
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
@@ -155,6 +222,14 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
}
}
+/**
+ * amdgpu_io_rreg - read an IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
{
if ((reg * 4) < adev->rio_mem_size)
@@ -165,6 +240,15 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
}
}
+/**
+ * amdgpu_io_wreg - write to an IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ *
+ * Writes the value specified to the offset specified.
+ */
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
@@ -333,7 +417,15 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG();
}
-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Allocates a scratch page of VRAM for use by various things in the
+ * driver.
+ */
+static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
@@ -342,13 +434,20 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
(void **)&adev->vram_scratch.ptr);
}
-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Frees the VRAM scratch page.
+ */
+static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
- * amdgpu_program_register_sequence - program an array of registers.
+ * amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
@@ -357,9 +456,9 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
- const u32 *registers,
- const u32 array_size)
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+ const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
@@ -383,7 +482,15 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
}
}
-void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_pci_config_reset - reset the GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Resets the GPU using the pci config reset sequence.
+ * Only applicable to asics prior to vega10.
+ */
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
@@ -392,14 +499,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
* GPU doorbell aperture helpers function.
*/
/**
- * amdgpu_doorbell_init - Init doorbell driver information.
+ * amdgpu_device_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
@@ -410,6 +517,9 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
return 0;
}
+ if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
+ return -EINVAL;
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -429,66 +539,35 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
{
iounmap(adev->doorbell.ptr);
adev->doorbell.ptr = NULL;
}
-/**
- * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup amdkfd
- *
- * @adev: amdgpu_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
- *
- * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
- * takes doorbells required for its own rings and reports the setup to amdkfd.
- * amdgpu reserved doorbells are at the start of the doorbell aperture.
- */
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /*
- * The first num_doorbells are used by amdgpu.
- * amdkfd takes whatever's left in the aperture.
- */
- if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = adev->doorbell.base;
- *aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
+
/*
- * amdgpu_wb_*()
+ * amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
- * amdgpu_wb_fini - Disable Writeback and free memory
+ * amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
-static void amdgpu_wb_fini(struct amdgpu_device *adev)
+static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
@@ -499,7 +578,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -507,7 +586,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
-static int amdgpu_wb_init(struct amdgpu_device *adev)
+static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
@@ -526,14 +605,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */
- memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
+ memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
}
return 0;
}
/**
- * amdgpu_wb_get - Allocate a wb entry
+ * amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
@@ -541,7 +620,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
@@ -555,61 +634,36 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
}
/**
- * amdgpu_wb_free - Free a wb entry
+ * amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
+ wb >>= 3;
if (wb < adev->wb.num_wb)
- __clear_bit(wb >> 3, adev->wb.used);
+ __clear_bit(wb, adev->wb.used);
}
/**
- * amdgpu_vram_location - try to find VRAM location
+ * amdgpu_device_vram_location - try to find VRAM location
+ *
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
- * as parameter (which is so far either PCI aperture address or
- * for IGP TOM base address).
- *
- * If there is not enough space to fit the unvisible VRAM in the 32bits
- * address space then we limit the VRAM size to the aperture.
- *
- * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
- * this shouldn't be a problem as we are using the PCI aperture as a reference.
- * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
- * not IGP.
- *
- * Note: we use mc_vram_size as on some board we need to program the mc to
- * cover the whole aperture even if VRAM size is inferior to aperture size
- * Novell bug 204882 + along with lots of ubuntu ones
- *
- * Note: when limiting vram it's safe to overwritte real_vram_size because
- * we are not in case where real_vram_size is inferior to mc_vram_size (ie
- * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
- * ones)
- *
- * Note: IGP TOM addr should be the same as the aperture addr, we don't
- * explicitly check for that though.
- *
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * as parameter.
*/
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc, u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
- if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
- dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
- mc->real_vram_size = mc->aper_size;
- mc->mc_vram_size = mc->aper_size;
- }
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
@@ -619,7 +673,8 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GTT location
+ *
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -630,11 +685,12 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc)
{
u64 size_af, size_bf;
- size_af = adev->mc.mc_mask - mc->vram_end;
+ size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
@@ -647,93 +703,91 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
dev_warn(adev->dev, "limiting GTT\n");
mc->gart_size = size_af;
}
- mc->gart_start = mc->vram_end + 1;
+ /* VCE doesn't like it when BOs cross a 4GB segment, so align
+ * the GART base on a 4GB boundary as well.
+ */
+ mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
-/*
- * Firmware Reservation functions
- */
/**
- * amdgpu_fw_reserve_vram_fini - free fw reserved vram
+ * amdgpu_device_resize_fb_bar - try to resize FB BAR
*
* @adev: amdgpu_device pointer
*
- * free fw reserved vram if it has been reserved.
+ * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
+ * to fail, but if any of the BARs is not accessible after the size we abort
+ * driver loading by returning -ENODEV.
*/
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
-}
+ u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
+ u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
+ struct pci_bus *root;
+ struct resource *res;
+ unsigned i;
+ u16 cmd;
+ int r;
-/**
- * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from fw.
- */
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
-{
- int r = 0;
- u64 gpu_addr;
- u64 vram_size = adev->mc.visible_vram_size;
+ /* Bypass for VF */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
+ /* Check if the root BUS has 64bit memory resources */
+ root = adev->pdev->bus;
+ while (root->parent)
+ root = root->parent;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ pci_bus_for_each_resource(root, res, i) {
+ if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ res->start > 0x100000000ull)
+ break;
+ }
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
- PAGE_SIZE, true, 0,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ /* Trying to resize is pointless without a root hub window above 4GB */
+ if (!res)
+ return 0;
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), &gpu_addr);
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ /* Disable memory decoding while we change the BAR addresses and size */
+ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(adev->pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
- }
- return r;
+ /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ amdgpu_device_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ pci_release_resource(adev->pdev, 2);
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
-}
+ pci_release_resource(adev->pdev, 0);
+
+ r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ if (r == -ENOSPC)
+ DRM_INFO("Not enough PCI address space for a large BAR.");
+ else if (r && r != -ENOTSUPP)
+ DRM_ERROR("Problem resizing BAR0 (%d).", r);
+ pci_assign_unassigned_bus_resources(adev->pdev->bus);
+
+ /* When the doorbell or fb BAR isn't available we have no chance of
+ * using the device.
+ */
+ r = amdgpu_device_doorbell_init(adev);
+ if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
+ return -ENODEV;
+
+ pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
+
+ return 0;
+}
/*
* GPU helpers function.
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
@@ -741,7 +795,7 @@ error_create:
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -786,285 +840,9 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
return true;
}
-/**
- * amdgpu_dummy_page_init - init dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page used by the driver (all asics).
- * This dummy page is used by the driver as a filler for gart entries
- * when pages are taken out of the GART
- * Returns 0 on sucess, -ENOMEM on failure.
- */
-int amdgpu_dummy_page_init(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page)
- return 0;
- adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
- if (adev->dummy_page.page == NULL)
- return -ENOMEM;
- adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * amdgpu_dummy_page_fini - free dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the dummy page used by the driver (all asics).
- */
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page == NULL)
- return;
- pci_unmap_page(adev->pdev, adev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
-}
-
-
-/* ATOM accessor methods */
-/*
- * ATOM is an interpreted byte code stored in tables in the vbios. The
- * driver registers callbacks to access registers and the interpreter
- * in the driver parses the tables and executes then to program specific
- * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
- * atombios.h, and atom.c
- */
-
-/**
- * cail_pll_read - read PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- * Returns the value of the PLL register.
- */
-static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_pll_write - write PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- * @val: value to write to the pll register
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- */
-static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_mc_read - read MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- *
- * Provides an MC register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MC register.
- */
-static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_mc_write - write MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- * @val: value to write to the pll register
- *
- * Provides a MC register accessor for the atom interpreter (r4xx+).
- */
-static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_reg_write - write MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- * @val: value to write to the pll register
- *
- * Provides a MMIO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32(reg, val);
-}
-
-/**
- * cail_reg_read - read MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- *
- * Provides an MMIO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MMIO register.
- */
-static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32(reg);
- return r;
-}
-
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
-static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct atom_context *ctx = adev->mode_info.atom_context;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
-}
-
-static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
- NULL);
-
-/**
- * amdgpu_atombios_fini - free the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the driver info and register access callbacks for the ATOM
- * interpreter (r4xx+).
- * Called at driver shutdown.
- */
-static void amdgpu_atombios_fini(struct amdgpu_device *adev)
-{
- if (adev->mode_info.atom_context) {
- kfree(adev->mode_info.atom_context->scratch);
- kfree(adev->mode_info.atom_context->iio);
- }
- kfree(adev->mode_info.atom_context);
- adev->mode_info.atom_context = NULL;
- kfree(adev->mode_info.atom_card_info);
- adev->mode_info.atom_card_info = NULL;
- device_remove_file(adev->dev, &dev_attr_vbios_version);
-}
-
-/**
- * amdgpu_atombios_init - init the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Initializes the driver info and register access callbacks for the
- * ATOM interpreter (r4xx+).
- * Returns 0 on sucess, -ENOMEM on failure.
- * Called at driver startup.
- */
-static int amdgpu_atombios_init(struct amdgpu_device *adev)
-{
- struct card_info *atom_card_info =
- kzalloc(sizeof(struct card_info), GFP_KERNEL);
- int ret;
-
- if (!atom_card_info)
- return -ENOMEM;
-
- adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
- atom_card_info->reg_read = cail_reg_read;
- atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
- atom_card_info->mc_read = cail_mc_read;
- atom_card_info->mc_write = cail_mc_write;
- atom_card_info->pll_read = cail_pll_read;
- atom_card_info->pll_write = cail_pll_write;
-
- adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
- if (!adev->mode_info.atom_context) {
- amdgpu_atombios_fini(adev);
- return -ENOMEM;
- }
-
- mutex_init(&adev->mode_info.atom_context->mutex);
- if (adev->is_atom_fw) {
- amdgpu_atomfirmware_scratch_regs_init(adev);
- amdgpu_atomfirmware_allocate_fb_scratch(adev);
- } else {
- amdgpu_atombios_scratch_regs_init(adev);
- amdgpu_atombios_allocate_fb_scratch(adev);
- }
-
- ret = device_create_file(adev->dev, &dev_attr_vbios_version);
- if (ret) {
- DRM_ERROR("Failed to create device file for VBIOS version\n");
- return ret;
- }
-
- return 0;
-}
-
/* if we get transitioned to only one device, take VGA back */
/**
- * amdgpu_vga_set_decode - enable/disable vga decode
+ * amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @cookie: amdgpu_device pointer
* @state: enable/disable vga decode
@@ -1072,7 +850,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
{
struct amdgpu_device *adev = cookie;
amdgpu_asic_set_vga_state(adev, state);
@@ -1083,7 +861,17 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static void amdgpu_check_block_size(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_check_block_size - validate the vm block size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Validates the vm block size specified via module parameter.
+ * The vm block size defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+ * page table and the remaining bits are in the page directory.
+ */
+static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
@@ -1094,64 +882,40 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- goto def_value;
+ amdgpu_vm_block_size = -1;
}
-
- if (amdgpu_vm_block_size > 24 ||
- (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
- dev_warn(adev->dev, "VM page table size (%d) too large\n",
- amdgpu_vm_block_size);
- goto def_value;
- }
-
- return;
-
-def_value:
- amdgpu_vm_block_size = -1;
}
-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_check_vm_size - validate the vm size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Validates the vm size in GB specified via module parameter.
+ * The VM size is the size of the GPU virtual memory space in GB.
+ */
+static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
/* no need to check the default value */
if (amdgpu_vm_size == -1)
return;
- if (!is_power_of_2(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- goto def_value;
- }
-
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
- goto def_value;
+ amdgpu_vm_size = -1;
}
-
- /*
- * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
- goto def_value;
- }
-
- return;
-
-def_value:
- amdgpu_vm_size = -1;
}
/**
- * amdgpu_check_arguments - validate module params
+ * amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_check_arguments(struct amdgpu_device *adev)
+static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
@@ -1184,9 +948,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_fragment_size = -1;
}
- amdgpu_check_vm_size(adev);
+ amdgpu_device_check_vm_size(adev);
- amdgpu_check_block_size(adev);
+ amdgpu_device_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!is_power_of_2(amdgpu_vram_page_split))) {
@@ -1194,6 +958,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+ if (amdgpu_lockup_timeout == 0) {
+ dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
+ amdgpu_lockup_timeout = 10000;
+ }
+
+ adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
}
/**
@@ -1257,9 +1028,20 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.can_switch = amdgpu_switcheroo_can_switch,
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
+/**
+ * amdgpu_device_ip_set_clockgating_state - set the CG state
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: clockgating state (gate or ungate)
+ *
+ * Sets the requested clockgating state for all instances of
+ * the hardware IP specified.
+ * Returns the error code from the last instance.
+ */
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
{
int i, r = 0;
@@ -1279,9 +1061,20 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
return r;
}
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
+/**
+ * amdgpu_device_ip_set_powergating_state - set the PG state
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: powergating state (gate or ungate)
+ *
+ * Sets the requested powergating state for all instances of
+ * the hardware IP specified.
+ * Returns the error code from the last instance.
+ */
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
{
int i, r = 0;
@@ -1301,7 +1094,19 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
return r;
}
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+/**
+ * amdgpu_device_ip_get_clockgating_state - get the CG state
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: clockgating feature flags
+ *
+ * Walks the list of IPs on the device and updates the clockgating
+ * flags for each IP.
+ * Updates @flags with the feature flags for each hardware IP where
+ * clockgating is enabled.
+ */
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int i;
@@ -1313,8 +1118,17 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
}
}
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+/**
+ * amdgpu_device_ip_wait_for_idle - wait for idle
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Waits for the request hardware IP to be idle.
+ * Returns 0 for success or a negative error code on failure.
+ */
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i, r;
@@ -1332,8 +1146,17 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
}
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+/**
+ * amdgpu_device_ip_is_idle - is the hardware IP idle
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is idle or not.
+ * Returns true if it the IP is idle, false if not.
+ */
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
@@ -1347,8 +1170,18 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
}
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+/**
+ * amdgpu_device_ip_get_ip_block - get a hw IP pointer
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Returns a pointer to the hardware IP block structure
+ * if it exists for the asic, otherwise NULL.
+ */
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
@@ -1360,7 +1193,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_version_cmp
+ * amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
@@ -1370,11 +1203,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor)
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor)
{
- struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
@@ -1385,7 +1218,7 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_add
+ * amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
@@ -1393,13 +1226,13 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version)
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
- DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
+ DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
@@ -1407,6 +1240,18 @@ int amdgpu_ip_block_add(struct amdgpu_device *adev,
return 0;
}
+/**
+ * amdgpu_device_enable_virtual_display - enable virtual display feature
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enabled the virtual display feature if the user has enabled it via
+ * the module parameter virtual_display. This feature provides a virtual
+ * display hardware on headless boards or in virtualized environments.
+ * This function parses and validates the configuration string specified by
+ * the user and configues the virtual display configuration (number of
+ * virtual connectors, crtcs, etc.) specified.
+ */
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
@@ -1452,6 +1297,16 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Parses the asic configuration parameters specified in the gpu info
+ * firmware and makes them availale to the driver for use in configuring
+ * the asic.
+ * Returns 0 on success, -EINVAL on failure.
+ */
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -1489,6 +1344,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_VEGA10:
chip_name = "vega10";
break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@@ -1550,7 +1408,17 @@ out:
return err;
}
-static int amdgpu_early_init(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_early_init - run early init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Early initialization pass for hardware IPs. The hardware IPs that make
+ * up each asic are discovered each IP's early_init callback is run. This
+ * is the first stage in initializing the asic.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1602,8 +1470,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return r;
break;
#endif
- case CHIP_VEGA10:
- case CHIP_RAVEN:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_RAVEN:
if (adev->asic_type == CHIP_RAVEN)
adev->family = AMDGPU_FAMILY_RV;
else
@@ -1622,10 +1491,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ amdgpu_amdkfd_device_probe(adev);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
- return r;
+ return -EAGAIN;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1657,7 +1528,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_init(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_init - run init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main initialization pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the sw_init and hw_init callbacks
+ * are run. sw_init initializes the software state associated with each IP
+ * and hw_init initializes the hardware associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1671,9 +1553,10 @@ static int amdgpu_init(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.sw = true;
+
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- r = amdgpu_vram_scratch_init(adev);
+ r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
@@ -1683,9 +1566,9 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
- r = amdgpu_wb_init(adev);
+ r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
+ DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -1704,8 +1587,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.sw)
continue;
- /* gmc hw init is done early */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
+ if (adev->ip_blocks[i].status.hw)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
@@ -1716,30 +1598,69 @@ static int amdgpu_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true;
}
+ amdgpu_amdkfd_device_init(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, true);
+
return 0;
}
-static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Writes a reset magic value to the gart pointer in VRAM. The driver calls
+ * this function before a GPU reset. If the value is retained after a
+ * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
+ */
+static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}
-static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_check_vram_lost - check if vram is valid
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Checks the reset magic value written to the gart pointer in VRAM.
+ * The driver calls this after a GPU reset to see if the contents of
+ * VRAM is lost or now.
+ * returns true if vram is lost, false if not.
+ */
+static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
return !!memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM);
}
-static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_late_set_cg_state - late init for clockgating
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Late initialization pass enabling clockgating for hardware IPs.
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * set_clockgating_state callbacks are run. This stage is run late
+ * in the init process.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{
int i = 0, r;
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
@@ -1753,7 +1674,19 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_late_init(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_late_init - run late init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Late initialization pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the late_init callbacks are run.
+ * late_init covers any special initialization that an IP requires
+ * after all of the have been initialized or something that needs to happen
+ * late in the init process.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1774,20 +1707,33 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
mod_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
return 0;
}
-static int amdgpu_fini(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_fini - run fini for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main teardown pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
+ * are run. hw_fini tears down the hardware associated with each IP
+ * and sw_fini tears down any software state associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_amdkfd_device_fini(adev);
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
@@ -1810,13 +1756,10 @@ static int amdgpu_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- amdgpu_wb_fini(adev);
- amdgpu_vram_scratch_fini(adev);
- }
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
@@ -1837,9 +1780,17 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
+
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ amdgpu_free_static_csa(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
+ }
+
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1859,19 +1810,40 @@ static int amdgpu_fini(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, false);
+ if (amdgpu_virt_release_full_gpu(adev, false))
+ DRM_ERROR("failed to release exclusive mode on fini\n");
return 0;
}
-static void amdgpu_late_init_func_handler(struct work_struct *work)
+/**
+ * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
+ *
+ * @work: work_struct
+ *
+ * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
+ * clockgating setup into a worker thread to speed up driver init and
+ * resume from suspend.
+ */
+static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_cg_state(adev);
}
-int amdgpu_suspend(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int i, r;
@@ -1879,17 +1851,18 @@ int amdgpu_suspend(struct amdgpu_device *adev)
amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
- r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
- if (i != AMD_IP_BLOCK_TYPE_SMC) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
@@ -1912,7 +1885,7 @@ int amdgpu_suspend(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1935,13 +1908,15 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
r = block->version->funcs->hw_init(adev);
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ if (r)
+ return r;
}
}
return 0;
}
-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1968,13 +1943,27 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
r = block->version->funcs->hw_init(adev);
DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ if (r)
+ return r;
}
}
return 0;
}
-static int amdgpu_resume_phase1(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * First resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * COMMON, GMC, and IH. resume puts the hardware into a functional state
+ * after a suspend and updates the software state as necessary. This
+ * function is also used for restoring the GPU after a GPU reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
{
int i, r;
@@ -1982,9 +1971,8 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n",
@@ -1997,7 +1985,20 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase2(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * First resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
+ * functional state after a suspend and updates the software state as
+ * necessary. This function is also used for restoring the GPU after a GPU
+ * reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
{
int i, r;
@@ -2005,8 +2006,8 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
continue;
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -2019,18 +2020,37 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_resume - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main resume function for hardware IPs. The hardware IPs
+ * are split into two resume functions because they are
+ * are also used in in recovering from a GPU reset and some additional
+ * steps need to be take between them. In this case (S3/S4) they are
+ * run sequentially.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
return r;
}
+/**
+ * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Query the VBIOS data tables to determine if the board supports SR-IOV.
+ */
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
@@ -2047,6 +2067,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
+ *
+ * @asic_type: AMD asic type
+ *
+ * Check if there is DC (new modesetting infrastructre) support for an asic.
+ * returns true if DC has support, false if not.
+ */
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
@@ -2054,6 +2082,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
@@ -2064,10 +2094,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
return amdgpu_dc != 0;
#endif
- case CHIP_KABINI:
- case CHIP_MULLINS:
- return amdgpu_dc > 0;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#endif
@@ -2121,14 +2149,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
- adev->mc.gart_size = 512 * 1024 * 1024;
+ if (amdgpu_emu_mode == 1)
+ adev->usec_timeout *= 2;
+ adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0;
- adev->gart.gart_funcs = NULL;
+ adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2163,8 +2193,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
+ mutex_init(&adev->lock_reset);
- amdgpu_check_arguments(adev);
+ amdgpu_device_check_arguments(adev);
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
@@ -2179,13 +2210,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
- INIT_LIST_HEAD(&adev->gtt_list);
- spin_lock_init(&adev->gtt_list_lock);
-
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
- INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -2205,7 +2234,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ amdgpu_device_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2218,18 +2247,18 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->rio_mem == NULL)
DRM_INFO("PCI I/O BAR is not found.\n");
+ amdgpu_device_get_pcie_info(adev);
+
/* early init functions */
- r = amdgpu_early_init(adev);
+ r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_runtime_pm == 1)
- runtime = true;
if (amdgpu_device_is_px(ddev))
runtime = true;
if (!pci_is_thunderbolt_attached(adev->pdev))
@@ -2238,6 +2267,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (runtime)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+ if (amdgpu_emu_mode == 1) {
+ /* post the asic on emulation mode */
+ emu_soc_asic_init(adev);
+ goto fence_driver_init;
+ }
+
/* Read BIOS */
if (!amdgpu_get_bios(adev)) {
r = -EINVAL;
@@ -2255,7 +2290,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
@@ -2267,8 +2302,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
dev_err(adev->dev, "gpu post error!\n");
goto failed;
}
- } else {
- DRM_INFO("GPU post is not needed\n");
}
if (adev->is_atom_fw) {
@@ -2292,6 +2325,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_i2c_init(adev);
}
+fence_driver_init:
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
if (r) {
@@ -2303,11 +2337,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* init the mode config */
drm_mode_config_init(adev->ddev);
- r = amdgpu_init(adev);
+ r = amdgpu_device_ip_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_init failed\n");
+ /* failed in exclusive mode due to timeout */
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_mmio_blocked(adev) &&
+ !amdgpu_virt_wait_reset(adev)) {
+ dev_err(adev->dev, "VF exclusive mode timeout\n");
+ /* Don't send request since VF is inactive. */
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ adev->virt.ops = NULL;
+ r = -EAGAIN;
+ goto failed;
+ }
+ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- amdgpu_fini(adev);
goto failed;
}
@@ -2343,7 +2388,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- r = amdgpu_gem_debugfs_init(adev);
+ r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2351,17 +2396,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_test_ib_ring_init(adev);
- if (r)
- DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
-
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_vbios_dump_init(adev);
+ r = amdgpu_debugfs_init(adev);
if (r)
- DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+ DRM_ERROR("Creating debugfs files failed (%d).\n", r);
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
@@ -2379,9 +2420,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_late_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2392,6 +2433,7 @@ failed:
amdgpu_vf_error_trans_all(adev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+
return r;
}
@@ -2409,15 +2451,19 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
- if (adev->mode_info.mode_config_initialized)
- drm_crtc_force_disable_all(adev->ddev);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+ /* disable all interrupts */
+ amdgpu_irq_disable_all(adev);
+ if (adev->mode_info.mode_config_initialized){
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_crtc_force_disable_all(adev->ddev);
+ else
+ drm_atomic_helper_shutdown(adev->ddev);
+ }
amdgpu_ib_pool_fini(adev);
- amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_fini(adev);
+ r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
@@ -2427,7 +2473,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
- amdgpu_atombios_fini(adev);
+
+ if (amdgpu_emu_mode != 1)
+ amdgpu_atombios_fini(adev);
+
kfree(adev->bios);
adev->bios = NULL;
if (!pci_is_thunderbolt_attached(adev->pdev))
@@ -2440,8 +2489,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
- amdgpu_pm_sysfs_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2521,7 +2569,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2529,7 +2577,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2578,18 +2625,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r)
DRM_ERROR("amdgpu asic init failed\n");
}
- r = amdgpu_resume(adev);
+ r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
goto unlock;
}
amdgpu_fence_driver_resume(adev);
@@ -2600,7 +2646,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r)
goto unlock;
@@ -2637,14 +2683,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_modeset_unlock_all(dev);
- } else {
- /*
- * There is no equivalent atomic helper to turn on
- * display, so we defined our own function for this,
- * once suspend resume is supported by the atomic
- * framework this will be reworked
- */
- amdgpu_dm_display_resume(adev);
}
}
@@ -2680,7 +2718,17 @@ unlock:
return r;
}
-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_check_soft_reset - did soft reset succeed
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and
+ * the check_soft_reset callbacks are run. check_soft_reset determines
+ * if the asic is still hung or not.
+ * Returns true if any of the IPs are still in a hung state, false if not.
+ */
+static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
{
int i;
bool asic_hang = false;
@@ -2702,7 +2750,18 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang;
}
-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
+ * handles any IP specific hardware or software state changes that are
+ * necessary for a soft reset to succeed.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2720,7 +2779,16 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
+ * reset is necessary to recover.
+ * Returns true if a full asic reset is required, false if not.
+ */
+static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
@@ -2741,7 +2809,18 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
return false;
}
-static int amdgpu_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_soft_reset - do a soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * soft_reset callbacks are run if the block is hung. soft_reset handles any
+ * IP specific hardware or software state changes that are necessary to soft
+ * reset the IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2759,7 +2838,18 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_post_soft_reset - clean up from soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
+ * handles any IP specific hardware or software state changes that are
+ * necessary after the IP has been soft reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2776,18 +2866,23 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
return 0;
}
-bool amdgpu_need_backup(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- return amdgpu_lockup_timeout > 0 ? true : false;
-}
-
-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+/**
+ * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: amdgpu_bo buffer whose shadow is being restored
+ * @fence: dma_fence associated with the operation
+ *
+ * Restores the VRAM buffer contents from the shadow in GTT. Used to
+ * restore things like GPUVM page tables after a GPU reset where
+ * the contents of VRAM might be lost.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2820,98 +2915,46 @@ err:
}
/**
- * amdgpu_sriov_gpu_reset - reset the asic
+ * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
*
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
+ * @adev: amdgpu_device pointer
*
- * Attempt the reset the GPU if it has hung (all asics).
- * for SRIOV case.
- * Returns 0 for success or an error on failure.
+ * Restores the contents of VRAM buffers from the shadows in GTT. Used to
+ * restore things like GPUVM page tables after a GPU reset where
+ * the contents of VRAM might be lost.
+ * Returns 0 on success, 1 on failure.
*/
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
+static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
{
- int i, j, r = 0;
- int resched;
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *bo, *tmp;
- struct amdgpu_ring *ring;
struct dma_fence *fence = NULL, *next = NULL;
+ long r = 1;
+ int i = 0;
+ long tmo;
- mutex_lock(&adev->virt.lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_sriov_reset = true;
-
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
-
- /* we start from the ring trigger GPU hang */
- j = job ? job->ring->idx : 0;
-
- /* block scheduler */
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
- if (!ring || !ring->sched.thread)
- continue;
-
- kthread_park(ring->sched.thread);
-
- if (job && j != i)
- continue;
-
- /* here give the last chance to check if job removed from mirror-list
- * since we already pay some time on kthread_park */
- if (job && list_empty(&job->base.node)) {
- kthread_unpark(ring->sched.thread);
- goto give_up_reset;
- }
-
- if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
- amd_sched_job_kickout(&job->base);
-
- /* only do job_reset on the hang ring if @job not NULL */
- amd_sched_hw_job_reset(&ring->sched);
-
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion_ring(ring);
- }
-
- /* request to take full control of GPU before re-initialization */
- if (job)
- amdgpu_virt_reset_gpu(adev);
+ if (amdgpu_sriov_runtime(adev))
+ tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
else
- amdgpu_virt_request_full_gpu(adev, true);
-
-
- /* Resume IP prior to SMC */
- amdgpu_sriov_reinit_early(adev);
-
- /* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_ttm_recover_gart(adev);
+ tmo = msecs_to_jiffies(100);
- /* now we are okay to resume SMC/CP/SDMA */
- amdgpu_sriov_reinit_late(adev);
-
- amdgpu_irq_gpu_reset_resume_helper(adev);
-
- if (amdgpu_ib_ring_tests(adev))
- dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
-
- /* release full control of GPU after ib test */
- amdgpu_virt_release_full_gpu(adev, true);
-
- DRM_INFO("recover vram bo from shadow\n");
-
- ring = adev->mman.buffer_funcs_ring;
+ DRM_INFO("recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
- r = dma_fence_wait(fence, false);
- if (r) {
- WARN(r, "recovery from shadow isn't completed\n");
+ r = dma_fence_wait_timeout(fence, false, tmo);
+ if (r == 0)
+ pr_err("wait fence %p[%d] timeout\n", fence, i);
+ else if (r < 0)
+ pr_err("wait fence %p[%d] interrupted\n", fence, i);
+ if (r < 1) {
+ dma_fence_put(fence);
+ fence = next;
break;
}
+ i++;
}
dma_fence_put(fence);
@@ -2920,204 +2963,259 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_unlock(&adev->shadow_list_lock);
if (fence) {
- r = dma_fence_wait(fence, false);
- if (r)
- WARN(r, "recovery from shadow isn't completed\n");
- }
- dma_fence_put(fence);
+ r = dma_fence_wait_timeout(fence, false, tmo);
+ if (r == 0)
+ pr_err("wait fence %p[%d] timeout\n", fence, i);
+ else if (r < 0)
+ pr_err("wait fence %p[%d] interrupted\n", fence, i);
- for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
- ring = adev->rings[i % AMDGPU_MAX_RINGS];
- if (!ring || !ring->sched.thread)
- continue;
-
- if (job && j != i) {
- kthread_unpark(ring->sched.thread);
- continue;
- }
-
- amd_sched_job_recovery(&ring->sched);
- kthread_unpark(ring->sched.thread);
}
+ dma_fence_put(fence);
- drm_helper_resume_force_mode(adev->ddev);
-give_up_reset:
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
- } else {
- dev_info(adev->dev, "GPU reset successed!\n");
- }
+ if (r > 0)
+ DRM_INFO("recover vram bo from shadow done\n");
+ else
+ DRM_ERROR("recover vram bo from shadow failed\n");
- adev->in_sriov_reset = false;
- mutex_unlock(&adev->virt.lock_reset);
- return r;
+ return (r > 0) ? 0 : 1;
}
/**
- * amdgpu_gpu_reset - reset the asic
+ * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
*
* @adev: amdgpu device pointer
*
- * Attempt the reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
+ * attempt to do soft-reset or full-reset and reinitialize Asic
+ * return 0 means successed otherwise failed
*/
-int amdgpu_gpu_reset(struct amdgpu_device *adev)
+static int amdgpu_device_reset(struct amdgpu_device *adev)
{
- struct drm_atomic_state *state = NULL;
- int i, r;
- int resched;
- bool need_full_reset, vram_lost = false;
-
- if (!amdgpu_check_soft_reset(adev)) {
- DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
- return 0;
- }
-
- atomic_inc(&adev->gpu_reset_counter);
-
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- /* store modesetting */
- if (amdgpu_device_has_dc_support(adev))
- state = drm_atomic_helper_suspend(adev->ddev);
-
- /* block scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched);
- }
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion(adev);
+ bool need_full_reset, vram_lost = 0;
+ int r;
- need_full_reset = amdgpu_need_full_reset(adev);
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
if (!need_full_reset) {
- amdgpu_pre_soft_reset(adev);
- r = amdgpu_soft_reset(adev);
- amdgpu_post_soft_reset(adev);
- if (r || amdgpu_check_soft_reset(adev)) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("soft reset failed, will fallback to full reset!\n");
need_full_reset = true;
}
}
if (need_full_reset) {
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
retry:
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (!r) {
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
goto out;
- vram_lost = amdgpu_check_vram_lost(adev);
+
+ vram_lost = amdgpu_device_check_vram_lost(adev);
if (vram_lost) {
DRM_ERROR("VRAM is lost!\n");
atomic_inc(&adev->vram_lost_counter);
}
- r = amdgpu_ttm_recover_gart(adev);
+
+ r = amdgpu_gtt_mgr_recover(
+ &adev->mman.bdev.man[TTM_PL_TT]);
if (r)
goto out;
- r = amdgpu_resume_phase2(adev);
+
+ r = amdgpu_device_ip_resume_phase2(adev);
if (r)
goto out;
+
if (vram_lost)
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
}
}
+
out:
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
need_full_reset = true;
goto retry;
}
- /**
- * recovery vm page tables, since we cannot depend on VRAM is
- * consistent after gpu full reset.
- */
- if (need_full_reset && amdgpu_need_backup(adev)) {
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct amdgpu_bo *bo, *tmp;
- struct dma_fence *fence = NULL, *next = NULL;
-
- DRM_INFO("recover vram bo from shadow\n");
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
- next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r) {
- WARN(r, "recovery from shadow isn't completed\n");
- break;
- }
- }
+ }
- dma_fence_put(fence);
- fence = next;
- }
- mutex_unlock(&adev->shadow_list_lock);
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r)
- WARN(r, "recovery from shadow isn't completed\n");
- }
- dma_fence_put(fence);
- }
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
+ r = amdgpu_device_handle_vram_lost(adev);
- if (!ring || !ring->sched.thread)
- continue;
+ return r;
+}
- amd_sched_job_recovery(&ring->sched);
- kthread_unpark(ring->sched.thread);
- }
- } else {
- dev_err(adev->dev, "asic resume failed (%d).\n", r);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i] && adev->rings[i]->sched.thread) {
- kthread_unpark(adev->rings[i]->sched.thread);
- }
- }
+/**
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu device pointer
+ *
+ * do VF FLR and reinitialize Asic
+ * return 0 means successed otherwise failed
+ */
+static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ bool from_hypervisor)
+{
+ int r;
+
+ if (from_hypervisor)
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ else
+ r = amdgpu_virt_reset_gpu(adev);
+ if (r)
+ return r;
+
+ /* Resume IP prior to SMC */
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
+ if (r)
+ goto error;
+
+ /* we need recover gart prior to run SMC/CP/SDMA resume */
+ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ if (r)
+ goto error;
+
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ r = amdgpu_ib_ring_tests(adev);
+
+ if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ atomic_inc(&adev->vram_lost_counter);
+ r = amdgpu_device_handle_vram_lost(adev);
+ }
+
+error:
+
+ return r;
+}
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ * @force forces reset regardless of amdgpu_gpu_recovery
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job, bool force)
+{
+ struct drm_atomic_state *state = NULL;
+ int i, r, resched;
+
+ if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
+
+ if (!force && (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
+ DRM_INFO("GPU recovery disabled.\n");
+ return 0;
+ }
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
+
+ /* block all schedulers and reset given job's ring */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ kthread_park(ring->sched.thread);
+
+ if (job && job->ring->idx != i)
+ continue;
+
+ drm_sched_hw_job_reset(&ring->sched, &job->base);
+
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ else
+ r = amdgpu_device_reset(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* only need recovery sched of the given job's ring
+ * or all rings (in the case @job is NULL)
+ * after above amdgpu_reset accomplished
+ */
+ if ((!job || job->ring->idx == i) && !r)
+ drm_sched_job_recovery(&ring->sched);
+
+ kthread_unpark(ring->sched.thread);
}
if (amdgpu_device_has_dc_support(adev)) {
- r = drm_atomic_helper_resume(adev->ddev, state);
- amdgpu_dm_display_resume(adev);
- } else
+ if (drm_atomic_helper_resume(adev->ddev, state))
+ dev_info(adev->dev, "drm resume failed:%d\n", r);
+ } else {
drm_helper_resume_force_mode(adev->ddev);
+ }
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
- }
- else {
- dev_info(adev->dev, "GPU reset successed!\n");
+ dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
}
amdgpu_vf_error_trans_all(adev);
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
return r;
}
-void amdgpu_get_pcie_info(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Fetchs and stores in the driver the PCIE capabilities (gen speed
+ * and lanes) of the slot the device is in. Handles APUs and
+ * virtualized environments where PCIE config space may not be available.
+ */
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
@@ -3209,773 +3307,3 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
}
}
-/*
- * Debugfs
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
-#endif
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- goto end;
-
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto end;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
-end:
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_PCIE(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_PCIE(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_DIDT(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_DIDT(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_SMC(*pos);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_SMC(*pos, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- uint32_t *config, no_regs = 0;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- /* version, increment each time something is added */
- config[no_regs++] = 3;
- config[no_regs++] = adev->gfx.config.max_shader_engines;
- config[no_regs++] = adev->gfx.config.max_tile_pipes;
- config[no_regs++] = adev->gfx.config.max_cu_per_sh;
- config[no_regs++] = adev->gfx.config.max_sh_per_se;
- config[no_regs++] = adev->gfx.config.max_backends_per_se;
- config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
- config[no_regs++] = adev->gfx.config.max_gprs;
- config[no_regs++] = adev->gfx.config.max_gs_threads;
- config[no_regs++] = adev->gfx.config.max_hw_contexts;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
- config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.num_tile_pipes;
- config[no_regs++] = adev->gfx.config.backend_enable_mask;
- config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
- config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
- config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
- config[no_regs++] = adev->gfx.config.num_gpus;
- config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
- config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
- config[no_regs++] = adev->gfx.config.gb_addr_config;
- config[no_regs++] = adev->gfx.config.num_rbs;
-
- /* rev==1 */
- config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
-
- /* rev==2 */
- config[no_regs++] = adev->family;
- config[no_regs++] = adev->external_rev_id;
-
- /* rev==3 */
- config[no_regs++] = adev->pdev->device;
- config[no_regs++] = adev->pdev->revision;
- config[no_regs++] = adev->pdev->subsystem_device;
- config[no_regs++] = adev->pdev->subsystem_vendor;
-
- while (size && (*pos < no_regs * 4)) {
- uint32_t value;
-
- value = config[*pos >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- kfree(config);
- return r;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- kfree(config);
- return result;
-}
-
-static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- int idx, x, outsize, r, valuesize;
- uint32_t values[16];
-
- if (size & 3 || *pos & 0x3)
- return -EINVAL;
-
- if (amdgpu_dpm == 0)
- return -EINVAL;
-
- /* convert offset to sensor number */
- idx = *pos >> 2;
-
- valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
-
- if (size > valuesize)
- return -EINVAL;
-
- outsize = 0;
- x = 0;
- if (!r) {
- while (size) {
- r = put_user(values[x++], (int32_t *)buf);
- buf += 4;
- size -= 4;
- outsize += 4;
- }
- }
-
- return !r ? outsize : r;
-}
-
-static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r, x;
- ssize_t result=0;
- uint32_t offset, se, sh, cu, wave, simd, data[32];
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = (*pos & GENMASK_ULL(6, 0));
- se = (*pos & GENMASK_ULL(14, 7)) >> 7;
- sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
- cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
- wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
- simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- x = 0;
- if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (!x)
- return -EINVAL;
-
- while (size && (offset < x * 4)) {
- uint32_t value;
-
- value = data[offset >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- offset += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r;
- ssize_t result = 0;
- uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
- se = (*pos & GENMASK_ULL(19, 12)) >> 12;
- sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
- cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
- wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
- simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
- thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
- bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
-
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- if (bank == 0) {
- if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
- } else {
- if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
- }
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- while (size) {
- uint32_t value;
-
- value = data[offset++];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto err;
- }
-
- result += 4;
- buf += 4;
- size -= 4;
- }
-
-err:
- kfree(data);
- return result;
-}
-
-static const struct file_operations amdgpu_debugfs_regs_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_read,
- .write = amdgpu_debugfs_regs_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_didt_read,
- .write = amdgpu_debugfs_regs_didt_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_pcie_read,
- .write = amdgpu_debugfs_regs_pcie_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_smc_read,
- .write = amdgpu_debugfs_regs_smc_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_gca_config_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gca_config_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_sensors_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_sensor_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_wave_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_wave_read,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_gpr_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gpr_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations *debugfs_regs[] = {
- &amdgpu_debugfs_regs_fops,
- &amdgpu_debugfs_regs_didt_fops,
- &amdgpu_debugfs_regs_pcie_fops,
- &amdgpu_debugfs_regs_smc_fops,
- &amdgpu_debugfs_gca_config_fops,
- &amdgpu_debugfs_sensors_fops,
- &amdgpu_debugfs_wave_fops,
- &amdgpu_debugfs_gpr_fops,
-};
-
-static const char *debugfs_regs_names[] = {
- "amdgpu_regs",
- "amdgpu_regs_didt",
- "amdgpu_regs_pcie",
- "amdgpu_regs_smc",
- "amdgpu_gca_config",
- "amdgpu_sensors",
- "amdgpu_wave",
- "amdgpu_gpr",
-};
-
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
- adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
-
- if (!i)
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
- }
-
- return 0;
-}
-
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int r = 0, i;
-
- /* hold on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- }
-
- seq_printf(m, "run ib test:\n");
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- seq_printf(m, "ib ring tests failed (%d).\n", r);
- else
- seq_printf(m, "ib ring tests passed.\n");
-
- /* go on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_unpark(ring->sched.thread);
- }
-
- return 0;
-}
-
-static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
-};
-
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_debugfs_test_ib_ring_list, 1);
-}
-
-int amdgpu_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static const struct drm_info_list amdgpu_vbios_dump_list[] = {
- {"amdgpu_vbios",
- amdgpu_debugfs_get_vbios_dump,
- 0, NULL},
-};
-
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_vbios_dump_list, 1);
-}
-#else
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 138beb5..93f700a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -29,13 +29,16 @@
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
#include <asm/div64.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
-static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+static void amdgpu_display_flip_callback(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
@@ -44,8 +47,8 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
schedule_work(&work->flip_work.work);
}
-static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct dma_fence **f)
+static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
+ struct dma_fence **f)
{
struct dma_fence *fence= *f;
@@ -54,14 +57,15 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
*f = NULL;
- if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb,
+ amdgpu_display_flip_callback))
return true;
dma_fence_put(fence);
return false;
}
-static void amdgpu_flip_work_func(struct work_struct *__work)
+static void amdgpu_display_flip_work_func(struct work_struct *__work)
{
struct delayed_work *delayed_work =
container_of(__work, struct delayed_work, work);
@@ -75,20 +79,20 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
unsigned i;
int vpos, hpos;
- if (amdgpu_flip_handle_fence(work, &work->excl))
+ if (amdgpu_display_flip_handle_fence(work, &work->excl))
return;
for (i = 0; i < work->shared_count; ++i)
- if (amdgpu_flip_handle_fence(work, &work->shared[i]))
+ if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return;
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
- (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode)
+ (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
@@ -116,7 +120,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/*
* Handle unpin events outside the interrupt handler proper.
*/
-static void amdgpu_unpin_work_func(struct work_struct *__work)
+static void amdgpu_display_unpin_work_func(struct work_struct *__work)
{
struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, unpin_work);
@@ -138,11 +142,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags, uint32_t target,
- struct drm_modeset_acquire_ctx *ctx)
+int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -161,8 +165,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
- INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
+ INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
work->event = event;
work->adev = adev;
@@ -188,7 +192,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
@@ -206,7 +210,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_unreserve(new_abo);
work->base = base;
- work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
/* we borrow the event spin lock for protecting flip_wrok */
@@ -227,7 +231,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work.work);
+ amdgpu_display_flip_work_func(&work->flip_work.work);
return 0;
pflip_cleanup:
@@ -253,8 +257,8 @@ cleanup:
return r;
}
-int amdgpu_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
+int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev;
struct amdgpu_device *adev;
@@ -351,7 +355,7 @@ static const char *hpd_names[6] = {
"HPD6",
};
-void amdgpu_print_display_setup(struct drm_device *dev)
+void amdgpu_display_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector;
@@ -428,11 +432,11 @@ void amdgpu_print_display_setup(struct drm_device *dev)
}
/**
- * amdgpu_ddc_probe
+ * amdgpu_display_ddc_probe
*
*/
-bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
- bool use_aux)
+bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -478,7 +482,7 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
-static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
+static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -487,9 +491,10 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(amdgpu_fb);
}
-static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
+static int amdgpu_display_user_framebuffer_create_handle(
+ struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -497,15 +502,28 @@ static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
- .destroy = amdgpu_user_framebuffer_destroy,
- .create_handle = amdgpu_user_framebuffer_create_handle,
+ .destroy = amdgpu_display_user_framebuffer_destroy,
+ .create_handle = amdgpu_display_user_framebuffer_create_handle,
};
-int
-amdgpu_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
+{
+ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
+ adev->flags & AMD_IS_APU &&
+ amdgpu_device_asic_has_dc_support(adev->asic_type))
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+#endif
+
+ return domain;
+}
+
+int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
int ret;
rfb->obj = obj;
@@ -519,9 +537,9 @@ amdgpu_framebuffer_init(struct drm_device *dev,
}
struct drm_framebuffer *
-amdgpu_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
@@ -546,7 +564,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+ ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put_unlocked(obj);
@@ -556,15 +574,9 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-void amdgpu_output_poll_changed(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
- amdgpu_fb_output_poll_changed(adev);
-}
-
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
- .fb_create = amdgpu_user_framebuffer_create,
- .output_poll_changed = amdgpu_output_poll_changed
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
@@ -585,7 +597,7 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
-int amdgpu_modeset_create_props(struct amdgpu_device *adev)
+int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -634,7 +646,7 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_update_display_priority(struct amdgpu_device *adev)
+void amdgpu_display_update_priority(struct amdgpu_device *adev)
{
/* adjustment options for the display watermarks */
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
@@ -644,7 +656,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev)
}
-static bool is_hdtv_mode(const struct drm_display_mode *mode)
+static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@@ -656,9 +668,9 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode)
return false;
}
-bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -701,7 +713,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
- is_hdtv_mode(mode)))) {
+ amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
else
@@ -769,10 +781,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, unsigned int flags, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
u32 vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -864,7 +876,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
-int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
+int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
{
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return AMDGPU_CRTC_IRQ_NONE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 3cc0ef0..2b11d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,11 +23,10 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
+uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
struct drm_framebuffer *
-amdgpu_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
-void amdgpu_output_poll_changed(struct drm_device *dev);
+amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 56caaee..643d008 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -265,9 +265,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
-
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
@@ -328,8 +325,8 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
- ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
+#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
+ ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
@@ -344,22 +341,36 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
-#define amdgpu_dpm_get_power_profile_state(adev, query) \
- ((adev)->powerplay.pp_funcs->get_power_profile_state(\
- (adev)->powerplay.pp_handle, query))
-
-#define amdgpu_dpm_set_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->set_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_switch_power_profile(adev, type) \
+#define amdgpu_dpm_switch_power_profile(adev, type, en) \
((adev)->powerplay.pp_funcs->switch_power_profile(\
- (adev)->powerplay.pp_handle, type))
+ (adev)->powerplay.pp_handle, type, en))
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
+#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
+ virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
+ ((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
+ (adev)->powerplay.pp_handle, virtual_addr_low, \
+ virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
+
+#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
+ ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
+ (adev)->powerplay.pp_handle, buf))
+
+#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
+ ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
+ (adev)->powerplay.pp_handle, parameter, size))
+
+#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
+ ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
+ (adev)->powerplay.pp_handle, type, parameter, size))
+
+#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
+ ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
+ (adev)->powerplay.pp_handle))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index c2f414f..7379aa5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -73,9 +73,11 @@
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23.0 - Add query for VRAM lost counter
+ * - 3.24.0 - Add high priority compute support for gfx9
+ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 23
+#define KMS_DRIVER_MINOR 25
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -90,7 +92,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 10000;
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -119,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffffffff;
+uint amdgpu_pp_feature_mask = 0xffffbfff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -128,6 +130,8 @@ int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
+int amdgpu_emu_mode = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +169,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -216,7 +220,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);
-MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
module_param_named(dc_log, amdgpu_dc_log, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
@@ -280,6 +284,12 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
+MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
+module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -306,7 +316,6 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
-
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -535,6 +544,12 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ /* Vega 12 */
+ {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
@@ -566,12 +581,18 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
return 0;
}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
unsigned long flags = ent->driver_data;
- int ret;
+ int ret, retry = 0;
+ bool supports_atomic = false;
+
+ if (!amdgpu_virtual_display &&
+ amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n"
@@ -592,6 +613,13 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
+ /* warn the user if they mix atomic and non-atomic capable GPUs */
+ if ((kms_driver.driver_features & DRIVER_ATOMIC) && !supports_atomic)
+ DRM_ERROR("Mixing atomic and non-atomic capable GPUs!\n");
+ /* support atomic early so the atomic debugfs stuff gets created */
+ if (supports_atomic)
+ kms_driver.driver_features |= DRIVER_ATOMIC;
+
dev = drm_dev_alloc(&kms_driver, &pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
@@ -604,8 +632,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+retry_init:
ret = drm_dev_register(dev, ent->driver_data);
- if (ret)
+ if (ret == -EAGAIN && ++retry <= 3) {
+ DRM_INFO("retry init %d\n", retry);
+ /* Don't request EX mode too frequently which is attacking */
+ msleep(5000);
+ goto retry_init;
+ } else if (ret)
goto err_pci;
return 0;
@@ -639,7 +673,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_suspend(adev);
+ amdgpu_device_ip_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -710,7 +744,6 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
ret = amdgpu_device_suspend(drm_dev, false, false);
pci_save_state(pdev);
@@ -747,7 +780,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
ret = amdgpu_device_resume(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
@@ -825,8 +857,8 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- return amdgpu_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
}
static struct drm_driver kms_driver = {
@@ -844,12 +876,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = amdgpu_debugfs_init,
-#endif
- .irq_preinstall = amdgpu_irq_preinstall,
- .irq_postinstall = amdgpu_irq_postinstall,
- .irq_uninstall = amdgpu_irq_uninstall,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
@@ -862,9 +888,7 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = amdgpu_gem_prime_pin,
- .gem_prime_unpin = amdgpu_gem_prime_unpin,
+ .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
@@ -906,10 +930,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- r = amd_sched_fence_slab_init();
- if (r)
- goto error_sched;
-
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -922,9 +942,6 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(pdriver);
-error_sched:
- amdgpu_fence_slab_fini();
-
error_fence:
amdgpu_sync_fini();
@@ -938,7 +955,6 @@ static void __exit amdgpu_exit(void)
pci_unregister_driver(pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
- amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 90fa8e8..1206301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -38,6 +38,8 @@
#include <linux/vga_switcheroo.h>
+#include "amdgpu_display.h"
+
/* object hierarchy -
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
@@ -124,7 +126,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */
- u32 tiling_flags = 0;
+ u32 tiling_flags = 0, domain;
int ret;
int aligned_size, size;
int height = mode_cmd->height;
@@ -135,12 +137,12 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
+ domain = amdgpu_display_framebuffer_domains(adev);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
- ret = amdgpu_gem_object_create(adev, aligned_size, 0,
- AMDGPU_GEM_DOMAIN_VRAM,
+ ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
@@ -166,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
}
- ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
+ ret = amdgpu_bo_pin(abo, domain, NULL);
if (ret) {
amdgpu_bo_unreserve(abo);
goto out_unref;
@@ -225,7 +227,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->par = rfbdev;
info->skip_vt_switch = true;
- ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
+ &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out;
@@ -242,8 +245,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
- info->fix.smem_start = adev->mc.aper_base + tmp;
+ tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
+ info->fix.smem_start = adev->gmc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
@@ -252,7 +255,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
- info->apertures->ranges[0].size = adev->mc.aper_size;
+ info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -262,7 +265,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
+ DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@@ -283,12 +286,6 @@ out:
return ret;
}
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
-}
-
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
@@ -325,7 +322,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0;
/* select 8 bpp console on low vram cards */
- if (adev->mc.real_vram_size <= (32*1024*1024))
+ if (adev->gmc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
@@ -393,24 +390,3 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
return true;
return false;
}
-
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *afbdev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!adev)
- return;
-
- afbdev = adev->mode_info.rfbdev;
-
- if (!afbdev)
- return;
-
- fb_helper = &afbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2fa95ae..455a81e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -187,7 +187,7 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ seq, 0);
*s = seq;
@@ -391,9 +391,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
+ "cpu addr 0x%p\n", ring->idx,
+ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
return 0;
}
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
- long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -434,20 +433,11 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission,
- timeout, ring->name);
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission, amdgpu_job_hang_limit,
+ (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ?
+ MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
+ ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -499,11 +489,11 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
- amdgpu_fence_driver_force_completion(adev);
+ amdgpu_fence_driver_force_completion(ring);
}
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- amd_sched_fini(&ring->sched);
+ drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -534,7 +524,7 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* delay GPU reset to resume */
- amdgpu_fence_driver_force_completion(adev);
+ amdgpu_fence_driver_force_completion(ring);
}
/* disable the interrupt */
@@ -571,30 +561,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_force_completion - force all fence waiter to complete
+ * amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
- * @adev: amdgpu device pointer
+ * @ring: fence of the ring to signal
*
- * In case of GPU reset failure make sure no process keep waiting on fence
- * that will never complete.
*/
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
{
- int i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->fence_drv.initialized)
- continue;
-
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
- }
-}
-
-void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring)
-{
- if (ring)
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ amdgpu_fence_process(ring);
}
/*
@@ -709,25 +684,25 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
}
/**
- * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
+ * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
*
* Manually trigger a gpu reset at the next fence wait.
*/
-static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
+static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- seq_printf(m, "gpu reset\n");
- amdgpu_gpu_reset(adev);
+ seq_printf(m, "gpu recover\n");
+ amdgpu_device_gpu_recover(adev, NULL, true);
return 0;
}
static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
+ {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
};
static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index fe81850..cf0f186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -57,60 +57,45 @@
*/
/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
+ * amdgpu_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
*/
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- void *ptr;
+ struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
- ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
- &adev->gart.table_addr);
- if (ptr == NULL) {
+ if (adev->dummy_page_addr)
+ return 0;
+ adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ adev->dummy_page_addr = 0;
return -ENOMEM;
}
-#ifdef CONFIG_X86
- if (0) {
- set_memory_uc((unsigned long)ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- adev->gart.ptr = ptr;
- memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
return 0;
}
/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
+ * amdgpu_dummy_page_fini - free dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
+ * Frees the dummy page used by the driver (all asics).
*/
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
- if (adev->gart.ptr == NULL) {
+ if (!adev->dummy_page_addr)
return;
- }
-#ifdef CONFIG_X86
- if (0) {
- set_memory_wb((unsigned long)adev->gart.ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- pci_free_consistent(adev->pdev, adev->gart.table_size,
- (void *)adev->gart.ptr,
- adev->gart.table_addr);
- adev->gart.ptr = NULL;
- adev->gart.table_addr = 0;
+ pci_unmap_page(adev->pdev, adev->dummy_page_addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ adev->dummy_page_addr = 0;
}
/**
@@ -128,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r;
if (adev->gart.robj == NULL) {
- r = amdgpu_bo_create(adev, adev->gart.table_size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, &adev->gart.robj);
+ ttm_bo_type_kernel, NULL,
+ &adev->gart.robj);
if (r) {
return r;
}
@@ -248,18 +234,19 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
#endif
- page_base = adev->dummy_page.addr;
+ page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
- t, page_base, flags);
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0;
}
@@ -291,7 +278,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) {
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+ amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
@@ -329,7 +316,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++)
- adev->gart.pages[p] = pagelist[i];
+ adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
#endif
if (!adev->gart.ptr)
@@ -341,7 +328,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return r;
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0;
}
@@ -357,7 +345,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
{
int r;
- if (adev->dummy_page.page)
+ if (adev->dummy_page_addr)
return 0;
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
@@ -365,22 +353,20 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
- r = amdgpu_dummy_page_init(adev);
+ r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
- adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
- adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
+ adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
+ adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
- if (adev->gart.pages == NULL) {
- amdgpu_gart_fini(adev);
+ if (adev->gart.pages == NULL)
return -ENOMEM;
- }
#endif
return 0;
@@ -395,14 +381,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
- if (adev->gart.ready) {
- /* unbind pages */
- amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
- }
- adev->gart.ready = false;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
- amdgpu_dummy_page_fini(adev);
+ amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index afbe803..456295c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -31,7 +31,6 @@
*/
struct amdgpu_device;
struct amdgpu_bo;
-struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_SIZE 4096
#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@@ -39,7 +38,7 @@ struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
struct amdgpu_gart {
- dma_addr_t table_addr;
+ u64 table_addr;
struct amdgpu_bo *robj;
void *ptr;
unsigned num_gpu_pages;
@@ -52,12 +51,8 @@ struct amdgpu_gart {
/* Asic default pte flags */
uint64_t gart_pte_flags;
-
- const struct amdgpu_gart_funcs *gart_funcs;
};
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e87eedc..28c2706 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
amdgpu_mn_unregister(robj);
amdgpu_bo_unref(&robj);
}
@@ -45,7 +43,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
- u64 flags, bool kernel,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj)
{
@@ -58,23 +56,11 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
-retry:
- r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, resv, 0, &bo);
+ r = amdgpu_bo_create(adev, size, alignment, initial_domain,
+ flags, type, resv, &bo);
if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
-
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
- }
+ DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
return r;
}
*obj = &bo->gem_base;
@@ -282,6 +268,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
@@ -335,7 +322,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
@@ -517,17 +504,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (!amdgpu_vm_ready(vm))
return;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- goto error;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
goto error;
if (operation == AMDGPU_VA_OP_MAP ||
- operation == AMDGPU_VA_OP_REPLACE)
+ operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ goto error;
+ }
+
+ r = amdgpu_vm_update_directories(adev, vm);
error:
if (r && r != -ERESTARTSYS)
@@ -557,14 +545,25 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
+ dev_dbg(&dev->pdev->dev,
"va_address 0x%LX is in reserved area 0x%LX\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
+ if (args->va_address >= AMDGPU_VA_HOLE_START &&
+ args->va_address < AMDGPU_VA_HOLE_END) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+ args->va_address, AMDGPU_VA_HOLE_START,
+ AMDGPU_VA_HOLE_END);
+ return -EINVAL;
+ }
+
+ args->va_address &= AMDGPU_VA_HOLE_MASK;
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -576,7 +575,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
@@ -622,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r)
goto error_backoff;
- va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -642,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r)
goto error_backoff;
- va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -839,7 +838,7 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
};
#endif
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ef04336..239bf2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -179,8 +179,12 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
- /* Using pipes 2/3 from MEC 2 seems cause problems */
- if (mec == 1 && pipe > 1)
+ /*
+ * 1. Using pipes 2/3 from MEC 2 seems cause problems.
+ * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
+ * only can be issued on queue 0.
+ */
+ if ((mec == 1 && pipe > 1) || queue != 0)
continue;
ring->me = mec + 1;
@@ -203,7 +207,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
return r;
@@ -229,7 +233,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq)
{
- amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
new file mode 100644
index 0000000..893c249
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+#ifndef __AMDGPU_GMC_H__
+#define __AMDGPU_GMC_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_irq.h"
+
+struct firmware;
+
+/*
+ * VMHUB structures, functions & helpers
+ */
+struct amdgpu_vmhub {
+ uint32_t ctx0_ptb_addr_lo32;
+ uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_req;
+ uint32_t vm_inv_eng0_ack;
+ uint32_t vm_context0_cntl;
+ uint32_t vm_l2_pro_fault_status;
+ uint32_t vm_l2_pro_fault_cntl;
+};
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct amdgpu_gmc_funcs {
+ /* flush the vm tlb via mmio */
+ void (*flush_gpu_tlb)(struct amdgpu_device *adev,
+ uint32_t vmid);
+ /* flush the vm tlb via ring */
+ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
+ uint64_t pd_addr);
+ /* Change the VMID -> PASID mapping */
+ void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
+ unsigned pasid);
+ /* write pte/pde updates using the cpu */
+ int (*set_pte_pde)(struct amdgpu_device *adev,
+ void *cpu_pt_addr, /* cpu addr of page table */
+ uint32_t gpu_page_idx, /* pte/pde to update */
+ uint64_t addr, /* addr to write into pte/pde */
+ uint64_t flags); /* access flags */
+ /* enable/disable PRT support */
+ void (*set_prt)(struct amdgpu_device *adev, bool enable);
+ /* set pte flags based per asic */
+ uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
+ uint32_t flags);
+ /* get the pde for a given mc addr */
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
+};
+
+struct amdgpu_gmc {
+ resource_size_t aper_size;
+ resource_size_t aper_base;
+ /* for some chips with <= 32MB we need to lie
+ * about vram size near mc fb location */
+ u64 mc_vram_size;
+ u64 visible_vram_size;
+ u64 gart_size;
+ u64 gart_start;
+ u64 gart_end;
+ u64 vram_start;
+ u64 vram_end;
+ unsigned vram_width;
+ u64 real_vram_size;
+ int vram_mtrr;
+ u64 mc_mask;
+ const struct firmware *fw; /* MC firmware */
+ uint32_t fw_version;
+ struct amdgpu_irq_src vm_fault;
+ uint32_t vram_type;
+ uint32_t srbm_soft_reset;
+ bool prt_warning;
+ uint64_t stolen_size;
+ /* apertures */
+ u64 shared_aperture_start;
+ u64 shared_aperture_end;
+ u64 private_aperture_start;
+ u64 private_aperture_end;
+ /* protects concurrent invalidation */
+ spinlock_t invalidate_lock;
+ bool translate_further;
+
+ const struct amdgpu_gmc_funcs *gmc_funcs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 00e0ce1..da7b1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
atomic64_t available;
};
+struct amdgpu_gtt_node {
+ struct drm_mm_node node;
+ struct ttm_buffer_object *tbo;
+};
+
/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
@@ -51,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
- size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
+ size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size);
@@ -70,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
-
+ spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
@@ -79,17 +84,17 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
- * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
* @mem: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
{
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
- return (node->start != AMDGPU_BO_INVALID_OFFSET);
+ return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
}
/**
@@ -109,12 +114,12 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
- if (amdgpu_gtt_mgr_is_allocated(mem))
+ if (amdgpu_gtt_mgr_has_gart_addr(mem))
return 0;
if (place)
@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
mode = DRM_MM_INSERT_HIGH;
spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, node,
- mem->num_pages, mem->page_alignment, 0,
- fpfn, lpfn, mode);
+ r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
+ mem->page_alignment, 0, fpfn, lpfn,
+ mode);
spin_unlock(&mgr->lock);
if (!r)
- mem->start = node->start;
+ mem->start = node->node.start;
return r;
}
@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node;
+ struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out;
}
- node->start = AMDGPU_BO_INVALID_OFFSET;
- node->size = mem->num_pages;
+ node->node.start = AMDGPU_BO_INVALID_OFFSET;
+ node->node.size = mem->num_pages;
+ node->tbo = tbo;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out;
}
} else {
- mem->start = node->start;
+ mem->start = node->node.start;
}
return 0;
@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
+ struct amdgpu_gtt_node *node = mem->mm_node;
if (!node)
return;
spin_lock(&mgr->lock);
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
- drm_mm_remove_node(node);
+ if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
+ drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
return (result > 0 ? result : 0) * PAGE_SIZE;
}
+int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_node *node;
+ struct drm_mm_node *mm_node;
+ int r = 0;
+
+ spin_lock(&mgr->lock);
+ drm_mm_for_each_node(mm_node, &mgr->mm) {
+ node = container_of(mm_node, struct amdgpu_gtt_node, node);
+ r = amdgpu_ttm_recover_gart(node->tbo);
+ if (r)
+ break;
+ }
+ spin_unlock(&mgr->lock);
+
+ return r;
+}
+
/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 659997b..311589e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- if (vm && !job->vm_id) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if (ring->funcs->emit_pipeline_sync && job &&
- ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
+ ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -181,15 +181,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
}
- if (ring->funcs->init_cond_exec)
+ if (job && ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_hdp_flush
#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
#endif
- )
- amdgpu_ring_emit_hdp_flush(ring);
+ {
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ else
+ amdgpu_asic_flush_hdp(adev, ring);
+ }
skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -211,7 +214,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+ amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
need_ctx_switch);
need_ctx_switch = false;
}
@@ -219,19 +222,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_tmz)
amdgpu_ring_emit_tmz(ring, false);
- if (ring->funcs->emit_hdp_invalidate
#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
#endif
- )
- amdgpu_ring_emit_hdp_invalidate(ring);
+ amdgpu_asic_invalidate_hdp(adev, ring);
r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
- job->vm_id);
+ if (job && job->vmid)
+ amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
@@ -279,11 +279,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
return r;
}
- r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
adev->ib_pool_ready = true;
if (amdgpu_debugfs_sa_init(adev)) {
dev_err(adev->dev, "failed to register debugfs file for SA\n");
@@ -302,7 +297,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
adev->ib_pool_ready = false;
}
@@ -322,14 +316,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
int r, ret = 0;
+ long tmo_gfx, tmo_mm;
+
+ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
+ if (amdgpu_sriov_vf(adev)) {
+ /* for MM engines in hypervisor side they are not scheduled together
+ * with CP and SDMA engines, so even in exclusive mode MM engine could
+ * still running on other VF thus the IB TEST TIMEOUT for MM engines
+ * under SR-IOV should be set to a long time. 8 sec should be enough
+ * for the MM comes back to this VF.
+ */
+ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ }
+
+ if (amdgpu_sriov_runtime(adev)) {
+ /* for CP & SDMA engines since they are scheduled together so
+ * need to make the timeout width enough to cover the time
+ * cost waiting for it coming back under RUNTIME only
+ */
+ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+ long tmo;
if (!ring || !ring->ready)
continue;
- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
+ /* MM engine need more time */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ tmo = tmo_mm;
+ else
+ tmo = tmo_gfx;
+
+ r = amdgpu_ring_test_ib(ring, tmo);
if (r) {
ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
new file mode 100644
index 0000000..a1c78f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ids.h"
+
+#include <linux/idr.h>
+#include <linux/dma-fence-array.h>
+#include <drm/drmP.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_pasid_ida);
+
+/* Helper to free pasid from a fence callback */
+struct amdgpu_pasid_cb {
+ struct dma_fence_cb cb;
+ unsigned int pasid;
+};
+
+/**
+ * amdgpu_pasid_alloc - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_pasid_alloc(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ if (pasid >= 0)
+ trace_amdgpu_pasid_allocated(pasid);
+
+ return pasid;
+}
+
+/**
+ * amdgpu_pasid_free - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_pasid_free(unsigned int pasid)
+{
+ trace_amdgpu_pasid_freed(pasid);
+ ida_simple_remove(&amdgpu_pasid_ida, pasid);
+}
+
+static void amdgpu_pasid_free_cb(struct dma_fence *fence,
+ struct dma_fence_cb *_cb)
+{
+ struct amdgpu_pasid_cb *cb =
+ container_of(_cb, struct amdgpu_pasid_cb, cb);
+
+ amdgpu_pasid_free(cb->pasid);
+ dma_fence_put(fence);
+ kfree(cb);
+}
+
+/**
+ * amdgpu_pasid_free_delayed - free pasid when fences signal
+ *
+ * @resv: reservation object with the fences to wait for
+ * @pasid: pasid to free
+ *
+ * Free the pasid only after all the fences in resv are signaled.
+ */
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+ unsigned int pasid)
+{
+ struct dma_fence *fence, **fences;
+ struct amdgpu_pasid_cb *cb;
+ unsigned count;
+ int r;
+
+ r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+ if (r)
+ goto fallback;
+
+ if (count == 0) {
+ amdgpu_pasid_free(pasid);
+ return;
+ }
+
+ if (count == 1) {
+ fence = fences[0];
+ kfree(fences);
+ } else {
+ uint64_t context = dma_fence_context_alloc(1);
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_create(count, fences, context,
+ 1, false);
+ if (!array) {
+ kfree(fences);
+ goto fallback;
+ }
+ fence = &array->base;
+ }
+
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb) {
+ /* Last resort when we are OOM */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ amdgpu_pasid_free(pasid);
+ } else {
+ cb->pasid = pasid;
+ if (dma_fence_add_callback(fence, &cb->cb,
+ amdgpu_pasid_free_cb))
+ amdgpu_pasid_free_cb(fence, &cb->cb);
+ }
+
+ return;
+
+fallback:
+ /* Not enough memory for the delayed delete, as last resort
+ * block for all the fences to complete.
+ */
+ reservation_object_wait_timeout_rcu(resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ amdgpu_pasid_free(pasid);
+}
+
+/*
+ * VMID manager
+ *
+ * VMIDs are a per VMHUB identifier for page tables handling.
+ */
+
+/**
+ * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter);
+}
+
+/**
+ * amdgpu_vm_grab_idle - grab idle VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @idle: resulting idle VMID
+ *
+ * Try to find an idle VMID, if none is idle add a fence to wait to the sync
+ * object. Returns -ENOMEM when we are out of memory.
+ */
+static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct amdgpu_vmid **idle)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence **fences;
+ unsigned i;
+ int r;
+
+ if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
+ return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
+
+ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&(*idle)->list == &id_mgr->ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct dma_fence_array *array;
+ unsigned j;
+
+ *idle = NULL;
+ for (j = 0; j < i; ++j)
+ dma_fence_get(fences[j]);
+
+ array = dma_fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ dma_fence_put(fences[j]);
+ kfree(fences);
+ return -ENOMEM;
+ }
+
+ r = amdgpu_sync_fence(adev, sync, &array->base, false);
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = &array->base;
+ return r;
+ }
+ kfree(fences);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_grab_reserved - try to assign reserved VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ *
+ * Try to assign a reserved VMID.
+ */
+static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct dma_fence *fence,
+ struct amdgpu_job *job,
+ struct amdgpu_vmid **id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct dma_fence *updates = sync->last_vm_update;
+ bool needs_flush = vm->use_cpu_for_update;
+ int r = 0;
+
+ *id = vm->reserved_vmid[vmhub];
+ if (updates && (*id)->flushed_updates &&
+ updates->context == (*id)->flushed_updates->context &&
+ !dma_fence_is_later(updates, (*id)->flushed_updates))
+ updates = NULL;
+
+ if ((*id)->owner != vm->entity.fence_context ||
+ job->vm_pd_addr != (*id)->pd_gpu_addr ||
+ updates || !(*id)->last_flush ||
+ ((*id)->last_flush->context != fence_context &&
+ !dma_fence_is_signaled((*id)->last_flush))) {
+ struct dma_fence *tmp;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
+ *id = NULL;
+ r = amdgpu_sync_fence(adev, sync, tmp, false);
+ return r;
+ }
+ needs_flush = true;
+ }
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ if (r)
+ return r;
+
+ if (updates) {
+ dma_fence_put((*id)->flushed_updates);
+ (*id)->flushed_updates = dma_fence_get(updates);
+ }
+ job->vm_needs_flush = needs_flush;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_grab_used - try to reuse a VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ * @id: resulting VMID
+ *
+ * Try to reuse a VMID for this submission.
+ */
+static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct dma_fence *fence,
+ struct amdgpu_job *job,
+ struct amdgpu_vmid **id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct dma_fence *updates = sync->last_vm_update;
+ int r;
+
+ job->vm_needs_flush = vm->use_cpu_for_update;
+
+ /* Check if we can use a VMID already assigned to this VM */
+ list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
+ bool needs_flush = vm->use_cpu_for_update;
+ struct dma_fence *flushed;
+
+ /* Check all the prerequisites to using this VMID */
+ if ((*id)->owner != vm->entity.fence_context)
+ continue;
+
+ if ((*id)->pd_gpu_addr != job->vm_pd_addr)
+ continue;
+
+ if (!(*id)->last_flush ||
+ ((*id)->last_flush->context != fence_context &&
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ flushed = (*id)->flushed_updates;
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ needs_flush = true;
+
+ /* Concurrent flushes are only possible starting with Vega10 */
+ if (adev->asic_type < CHIP_VEGA10 && needs_flush)
+ continue;
+
+ /* Good, we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ if (r)
+ return r;
+
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
+ dma_fence_put((*id)->flushed_updates);
+ (*id)->flushed_updates = dma_fence_get(updates);
+ }
+
+ job->vm_needs_flush |= needs_flush;
+ return 0;
+ }
+
+ *id = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_grab_id - allocate the next free VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *idle = NULL;
+ struct amdgpu_vmid *id = NULL;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+ r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
+ if (r || !idle)
+ goto error;
+
+ if (vm->reserved_vmid[vmhub]) {
+ r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+ if (r || !id)
+ goto error;
+ } else {
+ r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+ if (r)
+ goto error;
+
+ if (!id) {
+ struct dma_fence *updates = sync->last_vm_update;
+
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
+
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active,
+ fence, false);
+ if (r)
+ goto error;
+
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ job->vm_needs_flush = true;
+ }
+
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+ }
+
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->owner = vm->entity.fence_context;
+
+ if (job->vm_needs_flush) {
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+ job->vmid = id - id_mgr->ids;
+ job->pasid = vm->pasid;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+
+error:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr;
+ struct amdgpu_vmid *idle;
+ int r = 0;
+
+ id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
+ AMDGPU_VM_MAX_RESERVED_VMID) {
+ DRM_ERROR("Over limitation of reserved vmid\n");
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ r = -EINVAL;
+ goto unlock;
+ }
+ /* Select the first entry VMID */
+ idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&idle->list);
+ vm->reserved_vmid[vmhub] = idle;
+ mutex_unlock(&id_mgr->lock);
+
+ return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ }
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ * @vmid: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[vmid];
+
+ mutex_lock(&id_mgr->lock);
+ id->owner = 0;
+ id->gds_base = 0;
+ id->gds_size = 0;
+ id->gws_base = 0;
+ id->gws_size = 0;
+ id->oa_base = 0;
+ id->oa_size = 0;
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset_all - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ for (j = 1; j < id_mgr->num_ids; ++j)
+ amdgpu_vmid_reset(adev, i, j);
+ }
+}
+
+/**
+ * amdgpu_vmid_mgr_init - init the VMID manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_init(&id_mgr->lock);
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
+ atomic_set(&id_mgr->reserved_vmid_num, 0);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (j = 1; j < id_mgr->num_ids; ++j) {
+ amdgpu_vmid_reset(adev, i, j);
+ amdgpu_sync_create(&id_mgr->ids[i].active);
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
+ }
+
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+}
+
+/**
+ * amdgpu_vmid_mgr_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_destroy(&id_mgr->lock);
+ for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
+ struct amdgpu_vmid *id = &id_mgr->ids[j];
+
+ amdgpu_sync_free(&id->active);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
+ dma_fence_put(id->pasid_mapping);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
new file mode 100644
index 0000000..7625419
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_IDS_H__
+#define __AMDGPU_IDS_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/dma-fence.h>
+
+#include "amdgpu_sync.h"
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VMID 16
+
+struct amdgpu_device;
+struct amdgpu_vm;
+struct amdgpu_ring;
+struct amdgpu_sync;
+struct amdgpu_job;
+
+struct amdgpu_vmid {
+ struct list_head list;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ uint64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+
+ unsigned pasid;
+ struct dma_fence *pasid_mapping;
+};
+
+struct amdgpu_vmid_mgr {
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
+ atomic_t reserved_vmid_num;
+};
+
+int amdgpu_pasid_alloc(unsigned int bits);
+void amdgpu_pasid_free(unsigned int pasid);
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+ unsigned int pasid);
+
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid);
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f5f27e4..06373d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -92,15 +92,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
}
return 0;
} else {
- r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
if (r) {
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
return r;
}
@@ -133,8 +133,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
&adev->irq.ih.gpu_addr,
(void **)&adev->irq.ih.ring);
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index ada89358..0e01f11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -25,51 +25,12 @@
#define __AMDGPU_IH_H__
#include <linux/chash.h>
+#include "soc15_ih_clientid.h"
struct amdgpu_device;
- /*
- * vega10+ IH clients
- */
-enum amdgpu_ih_clientid
-{
- AMDGPU_IH_CLIENTID_IH = 0x00,
- AMDGPU_IH_CLIENTID_ACP = 0x01,
- AMDGPU_IH_CLIENTID_ATHUB = 0x02,
- AMDGPU_IH_CLIENTID_BIF = 0x03,
- AMDGPU_IH_CLIENTID_DCE = 0x04,
- AMDGPU_IH_CLIENTID_ISP = 0x05,
- AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
- AMDGPU_IH_CLIENTID_RLC = 0x07,
- AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
- AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
- AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
- AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
- AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
- AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
- AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
- AMDGPU_IH_CLIENTID_THM = 0x0f,
- AMDGPU_IH_CLIENTID_UVD = 0x10,
- AMDGPU_IH_CLIENTID_VCE0 = 0x11,
- AMDGPU_IH_CLIENTID_VMC = 0x12,
- AMDGPU_IH_CLIENTID_XDMA = 0x13,
- AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
- AMDGPU_IH_CLIENTID_ATS = 0x15,
- AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
- AMDGPU_IH_CLIENTID_DF = 0x17,
- AMDGPU_IH_CLIENTID_VCE1 = 0x18,
- AMDGPU_IH_CLIENTID_PWR = 0x19,
- AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
- AMDGPU_IH_CLIENTID_EA = 0x1c,
- AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
- AMDGPU_IH_CLIENTID_MP0 = 0x1e,
- AMDGPU_IH_CLIENTID_MP1 = 0x1f,
-
- AMDGPU_IH_CLIENTID_MAX,
-
- AMDGPU_IH_CLIENTID_VCN = AMDGPU_IH_CLIENTID_UVD
-};
#define AMDGPU_IH_CLIENTID_LEGACY 0
+#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
@@ -105,11 +66,11 @@ struct amdgpu_iv_entry {
unsigned client_id;
unsigned src_id;
unsigned ring_id;
- unsigned vm_id;
- unsigned vm_id_src;
+ unsigned vmid;
+ unsigned vmid_src;
uint64_t timestamp;
unsigned timestamp_src;
- unsigned pas_id;
+ unsigned pasid;
unsigned pasid_src;
unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
const uint32_t *iv_entry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 47c5ce9..3a5ca46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -88,11 +88,11 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
reset_work);
if (!amdgpu_sriov_vf(adev))
- amdgpu_gpu_reset(adev);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
/* Disable *all* interrupts */
-static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
unsigned i, j, k;
@@ -123,55 +123,6 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_preinstall - drm irq preinstall callback
- *
- * @dev: drm dev pointer
- *
- * Gets the hw ready to enable irqs (all asics).
- * This function disables all interrupt sources on the GPU.
- */
-void amdgpu_irq_preinstall(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
-
- /* Disable *all* interrupts */
- amdgpu_irq_disable_all(adev);
- /* Clear bits */
- amdgpu_ih_process(adev);
-}
-
-/**
- * amdgpu_irq_postinstall - drm irq preinstall callback
- *
- * @dev: drm dev pointer
- *
- * Handles stuff to be done after enabling irqs (all asics).
- * Returns 0 on success.
- */
-int amdgpu_irq_postinstall(struct drm_device *dev)
-{
- dev->max_vblank_count = 0x00ffffff;
- return 0;
-}
-
-/**
- * amdgpu_irq_uninstall - drm irq uninstall callback
- *
- * @dev: drm dev pointer
- *
- * This function disables all interrupt sources on the GPU (all asics).
- */
-void amdgpu_irq_uninstall(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
-
- if (adev == NULL) {
- return;
- }
- amdgpu_irq_disable_all(adev);
-}
-
-/**
* amdgpu_irq_handler - irq handler
*
* @int irq, void *arg: args
@@ -232,7 +183,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int ret = pci_enable_msi(adev->pdev);
if (!ret) {
adev->irq.msi_enabled = true;
- dev_info(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI.\n");
}
}
@@ -257,12 +208,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
if (r) {
adev->irq.installed = false;
- flush_work(&adev->hotplug_work);
+ if (!amdgpu_device_has_dc_support(adev))
+ flush_work(&adev->hotplug_work);
cancel_work_sync(&adev->reset_work);
return r;
}
+ adev->ddev->max_vblank_count = 0x00ffffff;
- DRM_INFO("amdgpu: irq initialized.\n");
+ DRM_DEBUG("amdgpu: irq initialized.\n");
return 0;
}
@@ -282,7 +235,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
- flush_work(&adev->hotplug_work);
+ if (!amdgpu_device_has_dc_support(adev))
+ flush_work(&adev->hotplug_work);
cancel_work_sync(&adev->reset_work);
}
@@ -305,6 +259,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
}
}
kfree(adev->irq.client[i].sources);
+ adev->irq.client[i].sources = NULL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 0610cc4..3375ad7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -78,9 +78,7 @@ struct amdgpu_irq {
uint32_t srbm_soft_reset;
};
-void amdgpu_irq_preinstall(struct drm_device *dev);
-int amdgpu_irq_postinstall(struct drm_device *dev);
-void amdgpu_irq_uninstall(struct drm_device *dev);
+void amdgpu_irq_disable_all(struct amdgpu_device *adev);
irqreturn_t amdgpu_irq_handler(int irq, void *arg);
int amdgpu_irq_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0cfc68d..2bd5676 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
- if (amdgpu_sriov_vf(job->adev))
- amdgpu_sriov_gpu_reset(job->adev, job);
- else
- amdgpu_gpu_reset(job->adev);
+ amdgpu_device_gpu_recover(job->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
- amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
@@ -100,14 +96,13 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
+ amdgpu_ring_priority_put(job->ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
@@ -118,13 +113,12 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f)
{
int r;
@@ -133,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
@@ -141,46 +135,47 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- amdgpu_ring_priority_get(job->ring,
- amd_sched_get_job_priority(&job->base));
- amd_sched_entity_push_job(&job->base);
+ amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+ drm_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
-
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
+ bool explicit = false;
int r;
-
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
- if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
+
+ if (fence && explicit) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
+ r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ }
}
- if (!fence)
- fence = amdgpu_sync_get_fence(&job->sync);
- while (fence == NULL && vm && !job->vm_id) {
+
+ while (fence == NULL && vm && !job->vmid) {
struct amdgpu_ring *ring = job->ring;
- r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
- fence = amdgpu_sync_get_fence(&job->sync);
+ fence = amdgpu_sync_get_fence(&job->sync, NULL);
}
return fence;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
struct amdgpu_job *job;
int r;
@@ -190,15 +185,18 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
+ finished = &job->base.s_fence->finished;
adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- /* skip ib schedule when vram is lost */
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
- dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
- DRM_ERROR("Skip scheduling IBs!\n");
+
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+
+ if (finished->error < 0) {
+ DRM_INFO("Skip scheduling IBs!\n");
} else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
&fence);
@@ -213,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 720139e..4b7824d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -63,8 +63,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_acpi_fini(adev);
amdgpu_device_fini(adev);
@@ -159,9 +157,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- amdgpu_amdkfd_device_probe(adev);
- amdgpu_amdkfd_device_init(adev);
-
if (amdgpu_device_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -171,9 +166,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, true);
-
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -198,8 +190,12 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->uvd.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_VCN:
+ fw_info->ver = adev->vcn.fw_version;
+ fw_info->feature = 0;
+ break;
case AMDGPU_INFO_FW_GMC:
- fw_info->ver = adev->mc.fw_version;
+ fw_info->ver = adev->gmc.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_GFX_ME:
@@ -478,9 +474,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
- vram_gtt.vram_size = adev->mc.real_vram_size;
+ vram_gtt.vram_size = adev->gmc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size;
- vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
+ vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
@@ -492,17 +488,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_memory_info mem;
memset(&mem, 0, sizeof(mem));
- mem.vram.total_heap_size = adev->mc.real_vram_size;
+ mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size =
- adev->mc.real_vram_size - adev->vram_pin_size;
+ adev->gmc.real_vram_size - adev->vram_pin_size;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
- adev->mc.visible_vram_size;
+ adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
- adev->mc.visible_vram_size -
+ adev->gmc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -558,6 +554,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
+ uint64_t vm_size;
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
@@ -585,8 +582,22 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
+
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+
+ /* Older VCE FW versions are buggy and can handle only 40bits */
+ if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
+ vm_size = min(vm_size, 1ULL << 40);
+
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
- dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ dev_info.virtual_address_max =
+ min(vm_size, AMDGPU_VA_HOLE_START);
+
+ if (vm_size > AMDGPU_VA_HOLE_START) {
+ dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
+ dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
+ }
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -597,8 +608,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap));
- dev_info.vram_type = adev->mc.vram_type;
- dev_info.vram_bit_width = adev->mc.vram_width;
+ dev_info.vram_type = adev->gmc.vram_type;
+ dev_info.vram_bit_width = adev->gmc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
@@ -756,6 +767,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -EINVAL;
}
break;
+ case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
+ /* get stable pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
+ /* get stable pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
@@ -786,9 +815,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*/
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_fbdev_restore_mode(adev);
+ drm_fb_helper_lastclose(dev);
vga_switcheroo_process_delayed_switch();
}
@@ -805,7 +832,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv;
- int r;
+ int r, pasid;
file_priv->driver_priv = NULL;
@@ -819,28 +846,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend;
}
- r = amdgpu_vm_init(adev, &fpriv->vm,
- AMDGPU_VM_CONTEXT_GFX, 0);
- if (r) {
- kfree(fpriv);
- goto out_suspend;
+ pasid = amdgpu_pasid_alloc(16);
+ if (pasid < 0) {
+ dev_warn(adev->dev, "No more PASIDs available!");
+ pasid = 0;
}
+ r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
+ if (r)
+ goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
- amdgpu_vm_fini(adev, &fpriv->vm);
- kfree(fpriv);
- goto out_suspend;
+ goto error_vm;
}
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
- if (r) {
- amdgpu_vm_fini(adev, &fpriv->vm);
- kfree(fpriv);
- goto out_suspend;
- }
+ if (r)
+ goto error_vm;
}
mutex_init(&fpriv->bo_list_lock);
@@ -849,6 +873,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
file_priv->driver_priv = fpriv;
+ goto out_suspend;
+
+error_vm:
+ amdgpu_vm_fini(adev, &fpriv->vm);
+
+error_pasid:
+ if (pasid)
+ amdgpu_pasid_free(pasid);
+
+ kfree(fpriv);
out_suspend:
pm_runtime_mark_last_busy(dev->dev);
@@ -871,6 +905,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list;
+ struct amdgpu_bo *pd;
+ unsigned int pasid;
int handle;
if (!fpriv)
@@ -895,7 +931,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
+ pasid = fpriv->vm.pasid;
+ pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
+
amdgpu_vm_fini(adev, &fpriv->vm);
+ if (pasid)
+ amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
+ amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
amdgpu_bo_list_free(list);
@@ -947,11 +989,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
*/
do {
count = amdgpu_display_vblank_get_counter(adev, pipe);
- /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
- * distance to start of vblank, instead of regular
- * vertical scanout pos.
+ /* Ask amdgpu_display_get_crtc_scanoutpos to return
+ * vpos as distance to start of vblank, instead of
+ * regular vertical scanout pos.
*/
- stat = amdgpu_get_crtc_scanoutpos(
+ stat = amdgpu_display_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode);
@@ -992,7 +1034,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{
struct amdgpu_device *adev = dev->dev_private;
- int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
+ int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
}
@@ -1008,7 +1050,7 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{
struct amdgpu_device *adev = dev->dev_private;
- int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
+ int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
}
@@ -1160,6 +1202,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
i, fw_info.feature, fw_info.ver);
}
+ /* VCN */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCN;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index ffde1e9..d6416ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -89,7 +89,6 @@ enum amdgpu_hpd_id {
AMDGPU_HPD_4,
AMDGPU_HPD_5,
AMDGPU_HPD_6,
- AMDGPU_HPD_LAST,
AMDGPU_HPD_NONE = 0xff,
};
@@ -106,7 +105,6 @@ enum amdgpu_crtc_irq {
AMDGPU_CRTC_IRQ_VLINE4,
AMDGPU_CRTC_IRQ_VLINE5,
AMDGPU_CRTC_IRQ_VLINE6,
- AMDGPU_CRTC_IRQ_LAST,
AMDGPU_CRTC_IRQ_NONE = 0xff
};
@@ -117,7 +115,6 @@ enum amdgpu_pageflip_irq {
AMDGPU_PAGEFLIP_IRQ_D4,
AMDGPU_PAGEFLIP_IRQ_D5,
AMDGPU_PAGEFLIP_IRQ_D6,
- AMDGPU_PAGEFLIP_IRQ_LAST,
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
};
@@ -270,8 +267,6 @@ struct amdgpu_display_funcs {
void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
- /* wait for vblank */
- void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
/* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level);
@@ -355,6 +350,7 @@ struct amdgpu_mode_info {
u16 firmware_flags;
/* pointer to backlight encoder */
struct amdgpu_encoder *bl_encoder;
+ u8 bl_level; /* saved backlight level */
struct amdgpu_audio audio; /* audio stuff */
int num_crtc; /* number of crtcs */
int num_hpd; /* number of hpd pins */
@@ -555,14 +551,6 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
- /* number of modes generated from EDID at 'dc_sink' */
- int num_modes;
- /* The 'old' sink - before an HPD.
- * The 'current' sink is in dc_link->sink. */
- struct dc_sink *dc_sink;
- struct dc_link *dc_link;
- struct dc_sink *dc_em_sink;
- const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -573,27 +561,6 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
-
- struct drm_dp_mst_topology_mgr mst_mgr;
- struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_connector *mst_port;
- struct amdgpu_encoder *mst_encoder;
- struct semaphore mst_sem;
-
- /* TODO see if we can merge with ddc_bus or make a dm_connector */
- struct amdgpu_i2c_adapter *i2c;
-
- /* Monitor range limits */
- int min_vfreq ;
- int max_vfreq ;
- int pixel_clock_mhz;
-
- /*freesync caps*/
- struct mod_freesync_caps caps;
-
- struct mutex hpd_lock;
-
};
/* TODO: start to use this struct and remove same field from base one */
@@ -611,7 +578,7 @@ struct amdgpu_mst_connector {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
-/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+/* Driver internal use only flags of amdgpu_display_get_crtc_scanoutpos() */
#define DRM_SCANOUTPOS_VALID (1 << 0)
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
@@ -630,30 +597,31 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
-bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
+bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ bool use_aux);
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
+int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, unsigned int flags, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
-int amdgpu_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
+int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void amdgpu_enc_destroy(struct drm_encoder *encoder);
void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
-bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
-int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
/* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev);
@@ -661,23 +629,19 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
-
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
-
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
/* amdgpu_display.c */
-void amdgpu_print_display_setup(struct drm_device *dev);
-int amdgpu_modeset_create_props(struct amdgpu_device *adev);
-int amdgpu_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags, uint32_t target,
- struct drm_modeset_acquire_ctx *ctx);
+void amdgpu_display_print_display_setup(struct drm_device *dev);
+int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
+int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
+int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ea25164..fac4b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -36,14 +36,32 @@
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+ return false;
+
+ return true;
+}
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ if (bo->kfd_bo)
+ amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+
amdgpu_bo_kunmap(bo);
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
@@ -71,7 +89,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -91,7 +109,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
if (flags & AMDGPU_GEM_CREATE_SHADOW)
- places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
else
places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT;
@@ -157,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use, and returns it still
* reserved.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -175,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
int r;
if (!*bo_ptr) {
- r = amdgpu_bo_create(adev, size, align, true, domain,
+ r = amdgpu_bo_create(adev, size, align, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo_ptr);
+ ttm_bo_type_kernel, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
@@ -226,12 +246,14 @@ error_free:
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -281,31 +303,68 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
-static int amdgpu_bo_do_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+/* Validate bo size is bit bigger then the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ unsigned long size, u32 domain)
+{
+ struct ttm_mem_type_manager *man = NULL;
+
+ /*
+ * If GTT is part of requested domains the check must succeed to
+ * allow fall back to GTT
+ */
+ if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+ man = &adev->mman.bdev.man[TTM_PL_TT];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+ if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ man = &adev->mman.bdev.man[TTM_PL_VRAM];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+
+ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+ return true;
+
+fail:
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+ man->size << PAGE_SHIFT);
+ return false;
+}
+
+static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
- uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = (type != ttm_bo_type_kernel),
+ .no_wait_gpu = false,
+ .resv = resv,
+ .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ };
struct amdgpu_bo *bo;
- enum ttm_bo_type type;
unsigned long page_align;
- u64 initial_bytes_moved, bytes_moved;
size_t acc_size;
+ u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
- if (kernel) {
- type = ttm_bo_type_kernel;
- } else if (sg) {
- type = ttm_bo_type_sg;
- } else {
- type = ttm_bo_type_device;
- }
+ if (!amdgpu_bo_validate_size(adev, size, domain))
+ return -ENOMEM;
+
*bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -314,11 +373,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
- if (unlikely(r)) {
- kfree(bo);
- return r;
- }
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -328,7 +383,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
bo->allowed_domains = bo->preferred_domains;
- if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ if (type != ttm_bo_type_kernel &&
+ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags;
@@ -362,33 +418,41 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, domain);
-
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- /* Kernel allocation are uninterruptible */
+ domains = bo->preferred_domains;
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domains);
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- if (unlikely(r != 0))
+ &bo->placement, page_align, &ctx, acc_size,
+ NULL, resv, &amdgpu_ttm_bo_destroy);
+
+ if (unlikely(r && r != -ERESTARTSYS)) {
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ } else if (domains != bo->preferred_domains) {
+ domains = bo->allowed_domains;
+ goto retry;
+ }
+ }
+ if (unlikely(r))
return r;
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+ ctx.bytes_moved);
else
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
- if (kernel)
+ if (type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -425,12 +489,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow)
return 0;
- r = amdgpu_bo_do_create(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
- NULL, bo->tbo.resv, 0,
- &bo->shadow);
+ ttm_bo_type_kernel,
+ bo->tbo.resv, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -441,22 +504,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
-/* init_value will only take effect when flags contains
- * AMDGPU_GEM_CREATE_VRAM_CLEARED.
- */
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
- uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
- parent_flags, sg, resv, init_value, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, domain,
+ parent_flags, type, resv, bo_ptr);
if (r)
return r;
@@ -511,6 +569,7 @@ err:
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
uint32_t domain;
int r;
@@ -521,7 +580,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
@@ -632,6 +691,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 *gpu_addr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -647,7 +707,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (domain != amdgpu_mem_type_to_domain(mem_type))
+ if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
bo->pin_count++;
@@ -682,21 +742,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto error;
+ }
+
bo->pin_count = 1;
- if (gpu_addr != NULL) {
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
+ if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- }
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -717,6 +779,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
@@ -730,7 +793,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
@@ -766,31 +829,32 @@ static const char *amdgpu_vram_names[] = {
"GDDR4",
"GDDR5",
"HBM",
- "DDR3"
+ "DDR3",
+ "DDR4",
};
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->mc.aper_base,
- adev->mc.aper_size);
+ arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ adev->gmc.aper_size);
/* Add an MTRR for the VRAM */
- adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
- adev->mc.aper_size);
+ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+ adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
- adev->mc.mc_vram_size >> 20,
- (unsigned long long)adev->mc.aper_size >> 20);
+ adev->gmc.mc_vram_size >> 20,
+ (unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
- adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
+ adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
- arch_phys_wc_del(adev->mc.vram_mtrr);
- arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -902,6 +966,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo;
unsigned long offset, size;
int r;
@@ -919,7 +984,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ if ((offset + size) <= adev->gmc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -935,14 +1000,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1];
- r = ttm_bo_validate(bo, &abo->placement, false, false);
+ r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
- (offset + size) > adev->mc.visible_vram_size)
+ (offset + size) > adev->gmc.visible_vram_size)
return -EINVAL;
return 0;
@@ -980,7 +1045,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
- !amdgpu_ttm_is_bound(bo->tbo.ttm));
+ !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 428aae0..546f77cb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -92,6 +92,8 @@ struct amdgpu_bo {
struct list_head mn_list;
struct list_head shadow_list;
};
+
+ struct kgd_mem *kfd_bo;
};
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
@@ -187,7 +189,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
{
switch (bo->tbo.mem.mem_type) {
- case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
+ case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
case TTM_PL_VRAM: return true;
default: return false;
}
@@ -201,13 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
+ struct reservation_object *resv,
+ struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
@@ -282,8 +282,6 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f8edf54..361975c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -32,7 +32,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include "amd_powerplay.h"
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -117,7 +116,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
}
if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
@@ -317,7 +316,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
+ AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true;
}
}
@@ -361,6 +360,90 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return count;
}
+static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ uint32_t parameter_size = 0;
+ long parameter[64];
+ char buf_cpy[128];
+ char *tmp_str;
+ char *sub_str;
+ const char delimiter[3] = {' ', '\n', '\0'};
+ uint32_t type;
+
+ if (count > 127)
+ return -EINVAL;
+
+ if (*buf == 's')
+ type = PP_OD_EDIT_SCLK_VDDC_TABLE;
+ else if (*buf == 'm')
+ type = PP_OD_EDIT_MCLK_VDDC_TABLE;
+ else if(*buf == 'r')
+ type = PP_OD_RESTORE_DEFAULT_TABLE;
+ else if (*buf == 'c')
+ type = PP_OD_COMMIT_DPM_TABLE;
+ else
+ return -EINVAL;
+
+ memcpy(buf_cpy, buf, count+1);
+
+ tmp_str = buf_cpy;
+
+ while (isspace(*++tmp_str));
+
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
+ ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+ if (ret)
+ return -EINVAL;
+ parameter_size++;
+
+ while (isspace(*tmp_str))
+ tmp_str++;
+ }
+
+ if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size);
+
+ if (ret)
+ return -EINVAL;
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ return count;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t size = 0;
+
+ if (adev->powerplay.pp_funcs->print_clock_levels) {
+ size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
+ return size;
+ } else {
+ return snprintf(buf, PAGE_SIZE, "\n");
+ }
+
+}
+
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -531,7 +614,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
@@ -575,7 +658,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
@@ -585,159 +668,70 @@ fail:
return count;
}
-static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
- char *buf, struct amd_pp_profile *query)
+static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0xff;
- if (adev->powerplay.pp_funcs->get_power_profile_state)
- ret = amdgpu_dpm_get_power_profile_state(
- adev, query);
+ if (adev->powerplay.pp_funcs->get_power_profile_mode)
+ return amdgpu_dpm_get_power_profile_mode(adev, buf);
- if (ret)
- return ret;
-
- return snprintf(buf, PAGE_SIZE,
- "%d %d %d %d %d\n",
- query->min_sclk / 100,
- query->min_mclk / 100,
- query->activity_threshold,
- query->up_hyst,
- query->down_hyst);
+ return snprintf(buf, PAGE_SIZE, "\n");
}
-static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amd_pp_profile query = {0};
-
- query.type = AMD_PP_GFX_PROFILE;
-
- return amdgpu_get_pp_power_profile(dev, buf, &query);
-}
-static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
+static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
- char *buf)
-{
- struct amd_pp_profile query = {0};
-
- query.type = AMD_PP_COMPUTE_PROFILE;
-
- return amdgpu_get_pp_power_profile(dev, buf, &query);
-}
-
-static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
const char *buf,
- size_t count,
- struct amd_pp_profile *request)
+ size_t count)
{
+ int ret = 0xff;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint32_t loop = 0;
- char *sub_str, buf_cpy[128], *tmp_str;
+ uint32_t parameter_size = 0;
+ long parameter[64];
+ char *sub_str, buf_cpy[128];
+ char *tmp_str;
+ uint32_t i = 0;
+ char tmp[2];
+ long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- long int value;
- int ret = 0xff;
-
- if (strncmp("reset", buf, strlen("reset")) == 0) {
- if (adev->powerplay.pp_funcs->reset_power_profile_state)
- ret = amdgpu_dpm_reset_power_profile_state(
- adev, request);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- return count;
- }
-
- if (strncmp("set", buf, strlen("set")) == 0) {
- if (adev->powerplay.pp_funcs->set_power_profile_state)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- return count;
- }
- if (count + 1 >= 128) {
- count = -EINVAL;
+ tmp[0] = *(buf);
+ tmp[1] = '\0';
+ ret = kstrtol(tmp, 0, &profile_mode);
+ if (ret)
goto fail;
- }
-
- memcpy(buf_cpy, buf, count + 1);
- tmp_str = buf_cpy;
-
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
- ret = kstrtol(sub_str, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- switch (loop) {
- case 0:
- /* input unit MHz convert to dpm table unit 10KHz*/
- request->min_sclk = (uint32_t)value * 100;
- break;
- case 1:
- /* input unit MHz convert to dpm table unit 10KHz*/
- request->min_mclk = (uint32_t)value * 100;
- break;
- case 2:
- request->activity_threshold = (uint16_t)value;
- break;
- case 3:
- request->up_hyst = (uint8_t)value;
- break;
- case 4:
- request->down_hyst = (uint8_t)value;
- break;
- default:
- break;
+ if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (count < 2 || count > 127)
+ return -EINVAL;
+ while (isspace(*++buf))
+ i++;
+ memcpy(buf_cpy, buf, count-i);
+ tmp_str = buf_cpy;
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
+ ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ parameter_size++;
+ while (isspace(*tmp_str))
+ tmp_str++;
}
-
- loop++;
}
- if (adev->powerplay.pp_funcs->set_power_profile_state)
- ret = amdgpu_dpm_set_power_profile_state(adev, request);
-
- if (ret)
- count = -EINVAL;
+ parameter[parameter_size] = profile_mode;
+ if (adev->powerplay.pp_funcs->set_power_profile_mode)
+ ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+ if (!ret)
+ return count;
fail:
- return count;
-}
-
-static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amd_pp_profile request = {0};
-
- request.type = AMD_PP_GFX_PROFILE;
-
- return amdgpu_set_pp_power_profile(dev, buf, count, &request);
-}
-
-static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amd_pp_profile request = {0};
-
- request.type = AMD_PP_COMPUTE_PROFILE;
-
- return amdgpu_set_pp_power_profile(dev, buf, count, &request);
+ return -EINVAL;
}
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
@@ -767,12 +761,12 @@ static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
amdgpu_get_pp_mclk_od,
amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_gfx_power_profile,
- amdgpu_set_pp_gfx_power_profile);
-static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_compute_power_profile,
- amdgpu_set_pp_compute_power_profile);
+static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_power_profile_mode,
+ amdgpu_set_pp_power_profile_mode);
+static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_od_clk_voltage,
+ amdgpu_set_pp_od_clk_voltage);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -780,17 +774,23 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
- int temp;
+ int r, temp, size = sizeof(temp);
/* Can't get temperature when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->get_temperature)
- temp = 0;
- else
- temp = amdgpu_dpm_get_temperature(adev);
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the temperature */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp, &size);
+ if (r)
+ return r;
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
@@ -835,6 +835,11 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL;
@@ -869,6 +874,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
int err;
u32 value;
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
err = kstrtou32(buf, 10, &value);
if (err)
return err;
@@ -892,6 +902,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
@@ -911,6 +926,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
+ /* Can't adjust fan when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
@@ -920,6 +940,175 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return sprintf(buf, "%i\n", speed);
}
+static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
+ u32 vddgfx;
+ int r, size = sizeof(vddgfx);
+
+ /* Can't get voltage when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the voltage */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
+}
+
+static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "vddgfx\n");
+}
+
+static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
+ u32 vddnb;
+ int r, size = sizeof(vddnb);
+
+ /* only APUs have vddnb */
+ if (adev->flags & AMD_IS_APU)
+ return -EINVAL;
+
+ /* Can't get voltage when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the voltage */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb, &size);
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
+}
+
+static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "vddnb\n");
+}
+
+static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
+ struct pp_gpu_power query = {0};
+ int r, size = sizeof(query);
+ unsigned uw;
+
+ /* Can't get power when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* get the voltage */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
+ (void *)&query, &size);
+ if (r)
+ return r;
+
+ /* convert to microwatts */
+ uw = (query.average_gpu_power >> 8) * 1000000;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", uw);
+}
+
+static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%i\n", 0);
+}
+
+static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ uint32_t limit = 0;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else {
+ return snprintf(buf, PAGE_SIZE, "\n");
+ }
+}
+
+static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ uint32_t limit = 0;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else {
+ return snprintf(buf, PAGE_SIZE, "\n");
+ }
+}
+
+
+static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err;
+ u32 value;
+
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
+ value = value / 1000000; /* convert to Watt */
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -928,6 +1117,14 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
+static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
+static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
+static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -938,6 +1135,14 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_power1_average.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_max.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_min.dev_attr.attr,
+ &sensor_dev_attr_power1_cap.dev_attr.attr,
NULL
};
@@ -948,9 +1153,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
- /* no skipping for powerplay */
- if (adev->powerplay.cgs_device)
- return effective_mode;
+ /* handle non-powerplay limitations */
+ if (!adev->powerplay.pp_handle) {
+ /* Skip fan attributes if fan is not present */
+ if (adev->pm.no_fan &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
+ /* requires powerplay */
+ if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
+ return 0;
+ }
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
@@ -962,14 +1177,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
- /* Skip fan attributes if fan is not present */
- if (adev->pm.no_fan &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
/* mask fan attributes if we have no bindings for this asic to expose */
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
@@ -983,6 +1190,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
+ return 0;
+
/* hide max/min values if we can't both query and manage the fan */
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
@@ -990,8 +1203,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
- /* requires powerplay */
- if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
+ /* only APUs have vddnb */
+ if (!(adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
return effective_mode;
@@ -1014,13 +1229,15 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
pm.dpm.thermal.work);
/* switch to the thermal state */
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ int temp, size = sizeof(temp);
if (!adev->pm.dpm_enabled)
return;
- if (adev->powerplay.pp_funcs->get_temperature) {
- int temp = amdgpu_dpm_get_temperature(adev);
-
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor &&
+ !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp, &size)) {
if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */
dpm_state = adev->pm.dpm.user_state;
@@ -1279,16 +1496,16 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
amdgpu_pm_compute_clocks(adev);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
@@ -1320,9 +1537,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
- if (adev->powerplay.pp_funcs->get_temperature == NULL)
- return 0;
-
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
@@ -1392,20 +1606,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
ret = device_create_file(adev->dev,
- &dev_attr_pp_gfx_power_profile);
+ &dev_attr_pp_power_profile_mode);
if (ret) {
DRM_ERROR("failed to create device file "
- "pp_gfx_power_profile\n");
+ "pp_power_profile_mode\n");
return ret;
}
ret = device_create_file(adev->dev,
- &dev_attr_pp_compute_power_profile);
+ &dev_attr_pp_od_clk_voltage);
if (ret) {
DRM_ERROR("failed to create device file "
- "pp_compute_power_profile\n");
+ "pp_od_clk_voltage\n");
return ret;
}
-
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1438,9 +1651,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
- &dev_attr_pp_gfx_power_profile);
+ &dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
- &dev_attr_pp_compute_power_profile);
+ &dev_attr_pp_od_clk_voltage);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1463,7 +1676,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
}
if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0;
@@ -1513,6 +1726,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
@@ -1585,7 +1802,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *ddev = adev->ddev;
u32 flags = 0;
- amdgpu_get_clockgating_state(adev, &flags);
+ amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index 5f5aa5f..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "atom.h"
-#include "amdgpu.h"
-#include "amd_shared.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include "amdgpu_pm.h"
-#include <drm/amdgpu_drm.h>
-#include "amdgpu_powerplay.h"
-#include "si_dpm.h"
-#include "cik_dpm.h"
-#include "vi_dpm.h"
-
-static int amdgpu_pp_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amd_powerplay *amd_pp;
- int ret = 0;
-
- amd_pp = &(adev->powerplay);
- amd_pp->pp_handle = (void *)adev;
-
- switch (adev->asic_type) {
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_VEGA10:
- case CHIP_RAVEN:
- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
- break;
- /* These chips don't have powerplay implemenations */
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- amd_pp->ip_funcs = &si_dpm_ip_funcs;
- amd_pp->pp_funcs = &si_dpm_funcs;
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- if (amdgpu_dpm == -1) {
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
- amd_pp->pp_funcs = &ci_dpm_funcs;
- } else {
- amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
- }
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- case CHIP_KAVERI:
- amd_pp->ip_funcs = &kv_dpm_ip_funcs;
- amd_pp->pp_funcs = &kv_dpm_funcs;
- break;
-#endif
- default:
- ret = -EINVAL;
- break;
- }
-
- if (adev->powerplay.ip_funcs->early_init)
- ret = adev->powerplay.ip_funcs->early_init(
- amd_pp->cgs_device ? amd_pp->cgs_device :
- amd_pp->pp_handle);
-
- return ret;
-}
-
-
-static int amdgpu_pp_late_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_init)
- ret = adev->powerplay.ip_funcs->late_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_sw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_init)
- ret = adev->powerplay.ip_funcs->sw_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_sw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_fini)
- ret = adev->powerplay.ip_funcs->sw_fini(
- adev->powerplay.pp_handle);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static int amdgpu_pp_hw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_init_bo(adev);
-
- if (adev->powerplay.ip_funcs->hw_init)
- ret = adev->powerplay.ip_funcs->hw_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_hw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->hw_fini)
- ret = adev->powerplay.ip_funcs->hw_fini(
- adev->powerplay.pp_handle);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_fini_bo(adev);
-
- return ret;
-}
-
-static void amdgpu_pp_late_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_fini)
- adev->powerplay.ip_funcs->late_fini(
- adev->powerplay.pp_handle);
-
- if (adev->powerplay.cgs_device)
- amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
-}
-
-static int amdgpu_pp_suspend(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->suspend)
- ret = adev->powerplay.ip_funcs->suspend(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_resume(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->resume)
- ret = adev->powerplay.ip_funcs->resume(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_clockgating_state)
- ret = adev->powerplay.ip_funcs->set_clockgating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-static int amdgpu_pp_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_powergating_state)
- ret = adev->powerplay.ip_funcs->set_powergating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-
-static bool amdgpu_pp_is_idle(void *handle)
-{
- bool ret = true;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->is_idle)
- ret = adev->powerplay.ip_funcs->is_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_wait_for_idle(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->wait_for_idle)
- ret = adev->powerplay.ip_funcs->wait_for_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_soft_reset(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->soft_reset)
- ret = adev->powerplay.ip_funcs->soft_reset(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
- .name = "amdgpu_powerplay",
- .early_init = amdgpu_pp_early_init,
- .late_init = amdgpu_pp_late_init,
- .sw_init = amdgpu_pp_sw_init,
- .sw_fini = amdgpu_pp_sw_fini,
- .hw_init = amdgpu_pp_hw_init,
- .hw_fini = amdgpu_pp_hw_fini,
- .late_fini = amdgpu_pp_late_fini,
- .suspend = amdgpu_pp_suspend,
- .resume = amdgpu_pp_resume,
- .is_idle = amdgpu_pp_is_idle,
- .wait_for_idle = amdgpu_pp_wait_for_idle,
- .soft_reset = amdgpu_pp_soft_reset,
- .set_clockgating_state = amdgpu_pp_set_clockgating_state,
- .set_powergating_state = amdgpu_pp_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106..4b584cb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -26,9 +26,12 @@
#include <drm/drmP.h>
#include "amdgpu.h"
+#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
+static const struct dma_buf_ops amdgpu_dmabuf_ops;
+
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -102,59 +105,95 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ww_mutex_lock(&resv->lock, NULL);
- ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
- ww_mutex_unlock(&resv->lock);
+ ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
+ resv, &bo);
if (ret)
- return ERR_PTR(ret);
+ goto error;
- bo->prime_shared_count = 1;
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ bo->prime_shared_count = 1;
+
+ ww_mutex_unlock(&resv->lock);
return &bo->gem_base;
+
+error:
+ ww_mutex_unlock(&resv->lock);
+ return ERR_PTR(ret);
}
-int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
+static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
{
+ struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- long ret = 0;
-
- ret = amdgpu_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
- /*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
- */
- ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(ret < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
- amdgpu_bo_unreserve(bo);
- return ret;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ long r;
+
+ r = drm_gem_map_attach(dma_buf, target_dev, attach);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ goto error_detach;
+
+
+ if (attach->dev->driver != adev->dev->driver) {
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(r < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ goto error_unreserve;
+ }
}
/* pin buffer into GTT */
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (r)
+ goto error_unreserve;
+
+ if (attach->dev->driver != adev->dev->driver)
bo->prime_shared_count++;
+error_unreserve:
amdgpu_bo_unreserve(bo);
- return ret;
+
+error_detach:
+ if (r)
+ drm_gem_map_detach(dma_buf, attach);
+ return r;
}
-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
+static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
+ struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int ret = 0;
ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0))
- return;
+ goto error;
amdgpu_bo_unpin(bo);
- if (bo->prime_shared_count)
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
+
+error:
+ drm_gem_map_detach(dma_buf, attach);
}
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
@@ -164,6 +203,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { true, false };
+ u32 domain = amdgpu_display_framebuffer_domains(adev);
+ int ret;
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_FROM_DEVICE);
+
+ if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
+ return 0;
+
+ /* move to gtt */
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ }
+
+ amdgpu_bo_unreserve(bo);
+ return ret;
+}
+
+static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+ .attach = amdgpu_gem_map_attach,
+ .detach = amdgpu_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .begin_cpu_access = amdgpu_gem_begin_cpu_access,
+ .map = drm_gem_dmabuf_kmap,
+ .map_atomic = drm_gem_dmabuf_kmap_atomic,
+ .unmap = drm_gem_dmabuf_kunmap,
+ .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)
@@ -176,7 +259,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return ERR_PTR(-EPERM);
buf = drm_gem_prime_export(dev, gobj, flags);
- if (!IS_ERR(buf))
+ if (!IS_ERR(buf)) {
buf->file->f_mapping = dev->anon_inode->i_mapping;
+ buf->ops = &amdgpu_dmabuf_ops;
+ }
+
return buf;
}
+
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj;
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, dma_buf);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 447d446..19e71f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -51,29 +51,11 @@ static int psp_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
- psp->init_microcode = psp_v3_1_init_microcode;
- psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
- psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
- psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
- psp->ring_init = psp_v3_1_ring_init;
- psp->ring_create = psp_v3_1_ring_create;
- psp->ring_stop = psp_v3_1_ring_stop;
- psp->ring_destroy = psp_v3_1_ring_destroy;
- psp->cmd_submit = psp_v3_1_cmd_submit;
- psp->compare_sram_data = psp_v3_1_compare_sram_data;
- psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
- psp->mode1_reset = psp_v3_1_mode1_reset;
+ case CHIP_VEGA12:
+ psp_v3_1_set_psp_funcs(psp);
break;
case CHIP_RAVEN:
- psp->init_microcode = psp_v10_0_init_microcode;
- psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
- psp->ring_init = psp_v10_0_ring_init;
- psp->ring_create = psp_v10_0_ring_create;
- psp->ring_stop = psp_v10_0_ring_stop;
- psp->ring_destroy = psp_v10_0_ring_destroy;
- psp->cmd_submit = psp_v10_0_cmd_submit;
- psp->compare_sram_data = psp_v10_0_compare_sram_data;
- psp->mode1_reset = psp_v10_0_mode1_reset;
+ psp_v10_0_set_psp_funcs(psp);
break;
default:
return -EINVAL;
@@ -81,6 +63,9 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
@@ -94,6 +79,9 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -264,7 +252,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
return ret;
@@ -334,23 +322,26 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ goto skip_memalloc;
+
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
return -ENOMEM;
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
if (ret)
goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fence_buf_bo,
- &psp->fence_buf_mc_addr,
- &psp->fence_buf);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr,
+ &psp->fence_buf);
if (ret)
goto failed_mem2;
@@ -375,6 +366,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ret)
goto failed_mem;
+skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
goto failed_mem;
@@ -468,6 +460,9 @@ static int psp_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
@@ -508,19 +503,8 @@ failed:
return ret;
}
-static bool psp_check_reset(void* handle)
+int psp_gpu_reset(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->flags & AMD_IS_APU)
- return true;
-
- return false;
-}
-
-static int psp_reset(void* handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp);
}
@@ -567,9 +551,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
- .check_soft_reset = psp_check_reset,
+ .check_soft_reset = NULL,
.wait_for_idle = NULL,
- .soft_reset = psp_reset,
+ .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index ce465455..1292096 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -33,6 +33,8 @@
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
+struct psp_context;
+
enum psp_ring_type
{
PSP_RING_TYPE__INVALID = 0,
@@ -53,12 +55,8 @@ struct psp_ring
uint32_t ring_size;
};
-struct psp_context
+struct psp_funcs
{
- struct amdgpu_device *adev;
- struct psp_ring km_ring;
- struct psp_gfx_cmd_resp *cmd;
-
int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
@@ -77,6 +75,15 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
+};
+
+struct psp_context
+{
+ struct amdgpu_device *adev;
+ struct psp_ring km_ring;
+ struct psp_gfx_cmd_resp *cmd;
+
+ const struct psp_funcs *funcs;
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
@@ -123,25 +130,25 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
-#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
-#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
-#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
-#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
-#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
+#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
+#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
+#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
+#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
+#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
- (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
+ (psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \
- (psp)->compare_sram_data((psp), (ucode), (type))
+ (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
- ((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0)
+ ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \
- ((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0)
+ ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
- ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
+ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
- ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
+ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
- ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
+ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs;
@@ -151,4 +158,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+int psp_gpu_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 93d8661..262c126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -225,7 +225,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
/* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", instance);
+ DRM_DEBUG("invalid ip instance: %d\n", instance);
return -EINVAL;
}
@@ -255,13 +255,13 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->vcn.num_enc_rings;
break;
default:
- DRM_ERROR("unknown ip type: %d\n", hw_ip);
+ DRM_DEBUG("unknown ip type: %d\n", hw_ip);
return -EINVAL;
}
if (ring >= ip_num_rings) {
- DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
- ring, ip_num_rings, hw_ip);
+ DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
+ ring, ip_num_rings, hw_ip);
return -EINVAL;
}
@@ -292,7 +292,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
default:
*out_ring = NULL;
r = -EINVAL;
- DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
+ DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
}
out_unlock:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a98fbbb..d5f526f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
@@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
return;
/* no need to restore if the job is already at the lowest priority */
- if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ if (priority == DRM_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
@@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
goto out_unlock;
/* decay priority to the next level with a job available */
- for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- if (i == AMD_SCHED_PRIORITY_NORMAL
+ for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ if (i == DRM_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
@@ -206,7 +206,7 @@ out_unlock:
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
@@ -263,25 +263,25 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->fence_offs);
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
@@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
- for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
@@ -348,11 +348,11 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
@@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_debugfs_ring_fini(ring);
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = NULL;
+
ring->adev->rings[ring->idx] = NULL;
}
@@ -481,7 +484,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
result = 0;
if (*pos < 12) {
- early[0] = amdgpu_ring_get_rptr(ring);
+ early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index b18c2b96..1a59118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -25,7 +25,8 @@
#define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_print.h>
/* max number of rings */
#define AMDGPU_MAX_RINGS 18
@@ -35,8 +36,9 @@
#define AMDGPU_MAX_UVD_ENC_RINGS 2
/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
+#define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -79,8 +81,7 @@ struct amdgpu_fence_driver {
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion_ring(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission);
@@ -122,14 +123,13 @@ struct amdgpu_ring_funcs {
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ unsigned vmid, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
@@ -152,17 +152,19 @@ struct amdgpu_ring_funcs {
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+ void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct drm_gpu_scheduler sched;
struct list_head lru_list;
struct amdgpu_bo *ring_obj;
@@ -187,6 +189,7 @@ struct amdgpu_ring {
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
+ bool use_pollmem;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
@@ -195,9 +198,10 @@ struct amdgpu_ring {
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
+ struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
- atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
@@ -213,9 +217,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 3144400..fb1667b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -63,21 +63,27 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
- r = amdgpu_bo_create(adev, size, align, true, domain,
- 0, NULL, NULL, 0, &sa_manager->bo);
+ r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
+ &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager)
+ struct amdgpu_sa_manager *sa_manager)
{
struct amdgpu_sa_bo *sa_bo, *tmp;
+ if (sa_manager->bo == NULL) {
+ dev_err(adev->dev, "no bo for sa manager\n");
+ return;
+ }
+
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist,
amdgpu_sa_bo_try_free(sa_manager);
@@ -88,55 +94,9 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
amdgpu_sa_bo_remove_locked(sa_bo);
}
- amdgpu_bo_unref(&sa_manager->bo);
- sa_manager->size = 0;
-}
-
-int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager)
-{
- int r;
-
- if (sa_manager->bo == NULL) {
- dev_err(adev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
- /* map the buffer */
- r = amdgpu_bo_reserve(sa_manager->bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
- return r;
- }
- r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(sa_manager->bo);
- dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
- memset(sa_manager->cpu_ptr, 0, sa_manager->size);
- amdgpu_bo_unreserve(sa_manager->bo);
- return r;
-}
-
-int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager)
-{
- int r;
-
- if (sa_manager->bo == NULL) {
- dev_err(adev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
-
- r = amdgpu_bo_reserve(sa_manager->bo, true);
- if (!r) {
- amdgpu_bo_kunmap(sa_manager->bo);
- amdgpu_bo_unpin(sa_manager->bo);
- amdgpu_bo_unreserve(sa_manager->bo);
- }
- return r;
+ amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
+ sa_manager->size = 0;
}
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 290cc3f..86a0715 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -29,29 +29,29 @@
#include "amdgpu_vm.h"
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_HW;
+ return DRM_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_SW;
+ return DRM_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return AMD_SCHED_PRIORITY_NORMAL;
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return AMD_SCHED_PRIORITY_LOW;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
- return AMD_SCHED_PRIORITY_UNSET;
+ return DRM_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return AMD_SCHED_PRIORITY_INVALID;
+ return DRM_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
@@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index b28c067..2a1a0c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a4bf21f..2d6f5ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -31,10 +31,12 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
struct amdgpu_sync_entry {
struct hlist_node node;
struct dma_fence *fence;
+ bool explicit;
};
static struct kmem_cache *amdgpu_sync_slab;
@@ -63,7 +65,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
@@ -84,11 +86,20 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence;
+ struct amdgpu_amdkfd_fence *kfd_fence;
+ if (!f)
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
+
+ s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
+ kfd_fence = to_amdgpu_amdkfd_fence(f);
+ if (kfd_fence)
+ return AMDGPU_FENCE_OWNER_KFD;
+
return AMDGPU_FENCE_OWNER_UNDEFINED;
}
@@ -119,7 +130,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -128,6 +139,10 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
continue;
amdgpu_sync_keep_later(&e->fence, f);
+
+ /* Preserve eplicit flag to not loose pipe line sync */
+ e->explicit |= explicit;
+
return true;
}
return false;
@@ -141,24 +156,25 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
*/
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
+ struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
-
if (amdgpu_sync_same_dev(adev, f) &&
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- if (amdgpu_sync_add_later(sync, f))
+ if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
+ e->explicit = explicit;
+
hash_add(sync->fences, &e->node, f->context);
e->fence = dma_fence_get(f);
return 0;
@@ -189,10 +205,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- r = amdgpu_sync_fence(adev, sync, f);
-
- if (explicit_sync)
- return r;
+ r = amdgpu_sync_fence(adev, sync, f, false);
flist = reservation_object_get_list(resv);
if (!flist || r)
@@ -201,26 +214,33 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
+ /* We only want to trigger KFD eviction fences on
+ * evict or move jobs. Skip KFD fences otherwise.
+ */
+ fence_owner = amdgpu_sync_get_owner(f);
+ if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ continue;
+
if (amdgpu_sync_same_dev(adev, f)) {
/* VM updates are only interesting
* for other VM updates and moves.
*/
- fence_owner = amdgpu_sync_get_owner(f);
if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
(fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
((owner == AMDGPU_FENCE_OWNER_VM) !=
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
continue;
- /* Ignore fence from the same owner as
+ /* Ignore fence from the same owner and explicit one as
* long as it isn't undefined.
*/
if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- fence_owner == owner)
+ (fence_owner == owner || explicit_sync))
continue;
}
- r = amdgpu_sync_fence(adev, sync, f);
+ r = amdgpu_sync_fence(adev, sync, f, false);
if (r)
break;
}
@@ -245,7 +265,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
@@ -275,19 +295,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* amdgpu_sync_get_fence - get the next fence from the sync object
*
* @sync: sync object to use
+ * @explicit: true if the next fence is explicit
*
* Get and removes the next fence from the sync object not signaled yet.
*/
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i;
-
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
+ if (explicit)
+ *explicit = e->explicit;
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
@@ -300,6 +322,41 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
return NULL;
}
+/**
+ * amdgpu_sync_clone - clone a sync object
+ *
+ * @source: sync object to clone
+ * @clone: pointer to destination sync object
+ *
+ * Adds references to all unsignaled fences in @source to @clone. Also
+ * removes signaled fences from @source while at it.
+ */
+int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ struct dma_fence *f;
+ int i, r;
+
+ hash_for_each_safe(source->fences, i, tmp, e, node) {
+ f = e->fence;
+ if (!dma_fence_is_signaled(f)) {
+ r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
+ if (r)
+ return r;
+ } else {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ }
+ }
+
+ dma_fence_put(clone->last_vm_update);
+ clone->last_vm_update = dma_fence_get(source->last_vm_update);
+
+ return 0;
+}
+
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
{
struct amdgpu_sync_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 70d7e3a..10cf23a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -41,7 +41,7 @@ struct amdgpu_sync {
void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f);
+ struct dma_fence *f, bool explicit);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
@@ -49,7 +49,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
-struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
+int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index ed8c373..2dbe875 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, 0, &vram_obj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
+ ttm_bo_type_kernel, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, 0, gtt_obj + i);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ ttm_bo_type_kernel, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
@@ -142,10 +141,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n",
i, *vram_start, gart_start,
(unsigned long long)
- (gart_addr - adev->mc.gart_start +
+ (gart_addr - adev->gmc.gart_start +
(void*)gart_start - gtt_map),
(unsigned long long)
- (vram_addr - adev->mc.vram_start +
+ (vram_addr - adev->gmc.vram_start +
(void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin;
@@ -187,10 +186,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n",
i, *gart_start, vram_start,
(unsigned long long)
- (vram_addr - adev->mc.vram_start +
+ (vram_addr - adev->gmc.vram_start +
(void*)vram_start - vram_map),
(unsigned long long)
- (gart_addr - adev->mc.gart_start +
+ (gart_addr - adev->gmc.gart_start +
(void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
@@ -200,7 +199,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gart_addr - adev->mc.gart_start);
+ gart_addr - adev->gmc.gart_start);
continue;
out_lclean_unpin:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 06525f2..532263a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -82,31 +82,31 @@ TRACE_EVENT(amdgpu_iv,
__field(unsigned, client_id)
__field(unsigned, src_id)
__field(unsigned, ring_id)
- __field(unsigned, vm_id)
- __field(unsigned, vm_id_src)
+ __field(unsigned, vmid)
+ __field(unsigned, vmid_src)
__field(uint64_t, timestamp)
__field(unsigned, timestamp_src)
- __field(unsigned, pas_id)
+ __field(unsigned, pasid)
__array(unsigned, src_data, 4)
),
TP_fast_assign(
__entry->client_id = iv->client_id;
__entry->src_id = iv->src_id;
__entry->ring_id = iv->ring_id;
- __entry->vm_id = iv->vm_id;
- __entry->vm_id_src = iv->vm_id_src;
+ __entry->vmid = iv->vmid;
+ __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src;
- __entry->pas_id = iv->pas_id;
+ __entry->pasid = iv->pasid;
__entry->src_data[0] = iv->src_data[0];
__entry->src_data[1] = iv->src_data[1];
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
+ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
__entry->client_id, __entry->src_id,
- __entry->ring_id, __entry->vm_id,
- __entry->timestamp, __entry->pas_id,
+ __entry->ring_id, __entry->vmid,
+ __entry->timestamp, __entry->pasid,
__entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3])
);
@@ -217,24 +217,24 @@ TRACE_EVENT(amdgpu_vm_grab_id,
struct amdgpu_job *job),
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
- __field(struct amdgpu_vm *, vm)
+ __field(u32, pasid)
__field(u32, ring)
- __field(u32, vm_id)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
__field(u32, needs_flush)
),
TP_fast_assign(
- __entry->vm = vm;
+ __entry->pasid = vm->pasid;
__entry->ring = ring->idx;
- __entry->vm_id = job->vm_id;
+ __entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->vm, __entry->ring, __entry->vm_id,
+ TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->pasid, __entry->ring, __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -357,27 +357,49 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
);
TRACE_EVENT(amdgpu_vm_flush,
- TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
+ TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr),
- TP_ARGS(ring, vm_id, pd_addr),
+ TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
__field(u32, ring)
- __field(u32, vm_id)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
__entry->ring = ring->idx;
- __entry->vm_id = vm_id;
+ __entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
),
TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vm_id,
+ __entry->ring, __entry->vmid,
__entry->vm_hub,__entry->pd_addr)
);
+DECLARE_EVENT_CLASS(amdgpu_pasid,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid),
+ TP_STRUCT__entry(
+ __field(unsigned, pasid)
+ ),
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ ),
+ TP_printk("pasid=%u", __entry->pasid)
+);
+
+DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid)
+);
+
+DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid)
+);
+
TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ad5bf86..205da3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -46,6 +46,7 @@
#include "amdgpu.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
#include "bif/bif_4_1_d.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
@@ -76,7 +77,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r;
adev->mman.mem_global_referenced = false;
@@ -108,9 +109,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
+ rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
goto error_entity;
@@ -131,7 +132,7 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- amd_sched_entity_fini(adev->mman.entity.sched,
+ drm_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
@@ -161,7 +162,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->mc.gart_start;
+ man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -169,7 +170,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &amdgpu_vram_mgr_func;
- man->gpu_offset = adev->mc.vram_start;
+ man->gpu_offset = adev->gmc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
@@ -203,6 +204,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
+ if (bo->type == ttm_bo_type_sg) {
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ return;
+ }
+
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
@@ -213,13 +220,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (adev->mman.buffer_funcs &&
- adev->mman.buffer_funcs_ring &&
- adev->mman.buffer_funcs_ring->ready == false) {
+ if (!adev->mman.buffer_funcs_enabled) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
- } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
- unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
struct drm_mm_node *node = bo->mem.mm_node;
unsigned long pages_left;
@@ -260,6 +265,13 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ /*
+ * Don't verify access for KFD BOs. They don't have a GEM
+ * object associated with them.
+ */
+ if (abo->kfd_bo)
+ return 0;
+
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
return drm_vma_node_verify_access(&abo->gem_base.vma_node,
@@ -282,8 +294,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
{
uint64_t addr = 0;
- if (mem->mem_type != TTM_PL_TT ||
- amdgpu_gtt_mgr_is_allocated(mem)) {
+ if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
addr = mm_node->start << PAGE_SHIFT;
addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
@@ -332,7 +343,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE);
- if (!ring->ready) {
+ if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -369,7 +380,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
* dst to window 1
*/
if (src->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring,
@@ -383,7 +394,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
if (dst->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
r = amdgpu_map_buffer(dst->bo, dst->mem,
PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring,
@@ -467,9 +478,8 @@ error:
return r;
}
-static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
@@ -489,8 +499,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
@@ -500,23 +509,22 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
- r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
if (unlikely(r)) {
goto out_cleanup;
}
- r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
-static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
@@ -536,16 +544,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -554,10 +561,9 @@ out_cleanup:
return r;
}
-static int amdgpu_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
@@ -583,28 +589,24 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
amdgpu_move_null(bo, new_mem);
return 0;
}
- if (adev->mman.buffer_funcs == NULL ||
- adev->mman.buffer_funcs_ring == NULL ||
- !adev->mman.buffer_funcs_ring->ready) {
- /* use memcpy */
+
+ if (!adev->mman.buffer_funcs_enabled)
goto memcpy;
- }
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
- r = amdgpu_move_vram_ram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
- r = amdgpu_move_ram_vram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
+ new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
@@ -628,6 +630,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct drm_mm_node *mm_node = mem->mm_node;
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -645,9 +648,18 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
- if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
+ if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
return -EINVAL;
- mem->bus.base = adev->mc.aper_base;
+ /* Only physically contiguous buffers apply. In a contiguous
+ * buffer, size of the first mm_node would match the number of
+ * pages in ttm_mem_reg.
+ */
+ if (adev->mman.aper_base_kaddr &&
+ (mm_node->size == mem->num_pages))
+ mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
+ mem->bus.offset;
+
+ mem->bus.base = adev->gmc.aper_base;
mem->bus.is_iomem = true;
break;
default:
@@ -681,7 +693,6 @@ struct amdgpu_ttm_gup_task_list {
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
- struct amdgpu_device *adev;
u64 offset;
uint64_t userptr;
struct mm_struct *usermm;
@@ -690,7 +701,6 @@ struct amdgpu_ttm_tt {
struct list_head guptasks;
atomic_t mmu_invalidations;
uint32_t last_set_pages;
- struct list_head list;
};
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -840,6 +850,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags;
int r = 0;
@@ -861,44 +872,35 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
+ if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
+ gtt->offset = AMDGPU_BO_INVALID_OFFSET;
return 0;
+ }
- spin_lock(&gtt->adev->gtt_list_lock);
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
- if (r) {
+ if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
-
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
-bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- return gtt && !list_empty(&gtt->list);
-}
-
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct ttm_tt *ttm = bo->ttm;
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
struct ttm_mem_reg tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ uint64_t flags;
int r;
- if (!ttm || amdgpu_ttm_is_bound(ttm))
+ if (bo->mem.mem_type != TTM_PL_TT ||
+ amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
return 0;
tmp = bo->mem;
@@ -908,71 +910,67 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r))
return r;
- r = ttm_bo_move_ttm(bo, true, false, &tmp);
- if (unlikely(r))
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
+ bo->ttm->pages, gtt->ttm.dma_address, flags);
+ if (unlikely(r)) {
ttm_bo_mem_put(bo, &tmp);
- else
- bo->offset = (bo->mem.start << PAGE_SHIFT) +
- bo->bdev->man[bo->mem.mem_type].gpu_offset;
+ return r;
+ }
- return r;
+ ttm_bo_mem_put(bo, &bo->mem);
+ bo->mem = tmp;
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
+
+ return 0;
}
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
- struct amdgpu_ttm_tt *gtt, *tmp;
- struct ttm_mem_reg bo_mem;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
uint64_t flags;
int r;
- bo_mem.mem_type = TTM_PL_TT;
- spin_lock(&adev->gtt_list_lock);
- list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
- r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
- gtt->ttm.ttm.pages, gtt->ttm.dma_address,
- flags);
- if (r) {
- spin_unlock(&adev->gtt_list_lock);
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
- return r;
- }
- }
- spin_unlock(&adev->gtt_list_lock);
- return 0;
+ if (!gtt)
+ return 0;
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
+ r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+ gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
+ if (r)
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ gtt->ttm.ttm.num_pages, gtt->offset);
+ return r;
}
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm);
- if (!amdgpu_ttm_is_bound(ttm))
+ if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return 0;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
- spin_lock(&gtt->adev->gtt_list_lock);
- r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
- if (r) {
+ r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
+ if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
- goto error_unbind;
- }
- list_del_init(&gtt->list);
-error_unbind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
@@ -990,38 +988,33 @@ static struct ttm_backend_func amdgpu_backend_func = {
.destroy = &amdgpu_ttm_backend_destroy,
};
-static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bdev);
+ adev = amdgpu_ttm_adev(bo->bdev);
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
- gtt->adev = adev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
- INIT_LIST_HEAD(&gtt->list);
return &gtt->ttm.ttm;
}
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
- if (ttm->state != tt_unpopulated)
- return 0;
-
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
@@ -1034,18 +1027,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ gtt->ttm.dma_address,
+ ttm->num_pages);
ttm->state = tt_unbound;
return 0;
}
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
+ if (adev->need_swiotlb && swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -1067,7 +1061,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (adev->need_swiotlb && swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
return;
}
@@ -1192,6 +1186,23 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
{
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
+ struct reservation_object_list *flist;
+ struct dma_fence *f;
+ int i;
+
+ /* If bo is a KFD BO, check if the bo belongs to the current process.
+ * If true, then return false as any KFD process needs all its BOs to
+ * be resident to run successfully
+ */
+ flist = reservation_object_get_list(bo->resv);
+ if (flist) {
+ for (i = 0; i < flist->shared_count; ++i) {
+ f = rcu_dereference_protected(flist->shared[i],
+ reservation_object_held(bo->resv));
+ if (amdkfd_fence_check_mm(f, current->mm))
+ return false;
+ }
+ }
switch (bo->mem.mem_type) {
case TTM_PL_TT:
@@ -1234,7 +1245,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset;
- while (len && pos < adev->mc.mc_vram_size) {
+ while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
uint32_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
@@ -1292,6 +1303,102 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.access_memory = &amdgpu_ttm_access_memory
};
+/*
+ * Firmware Reservation functions
+ */
+/**
+ * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free fw reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
+
+/**
+ * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int r = 0;
+ int i;
+ u64 vram_size = adev->gmc.visible_vram_size;
+ u64 offset = adev->fw_vram_usage.start_offset;
+ u64 size = adev->fw_vram_usage.size;
+ struct amdgpu_bo *bo;
+
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
+
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ ttm_bo_type_kernel, NULL,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
+
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+
+ /* remove the original mem node and create a new one at the
+ * request position
+ */
+ bo = adev->fw_vram_usage.reserved_bo;
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
+ &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), NULL);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
+}
+
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
@@ -1314,8 +1421,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
adev->mman.initialized = true;
+
+ /* We opt to avoid OOM on system pages allocations */
+ adev->mman.bdev.no_retry = true;
+
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
- adev->mc.real_vram_size >> PAGE_SHIFT);
+ adev->gmc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
@@ -1324,33 +1435,42 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Reduce size of CPU-visible VRAM if requested */
vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
if (amdgpu_vis_vram_limit > 0 &&
- vis_vram_limit <= adev->mc.visible_vram_size)
- adev->mc.visible_vram_size = vis_vram_limit;
+ vis_vram_limit <= adev->gmc.visible_vram_size)
+ adev->gmc.visible_vram_size = vis_vram_limit;
/* Change the size here instead of the init above so only lpfn is affected */
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+#ifdef CONFIG_64BIT
+ adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+#endif
/*
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_fw_reserve_vram_init(adev);
+ r = amdgpu_ttm_fw_reserve_vram_init(adev);
if (r) {
return r;
}
- r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
NULL, NULL);
if (r)
return r;
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
- (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
+ (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
- if (amdgpu_gtt_size == -1)
- gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
+ if (amdgpu_gtt_size == -1) {
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->gmc.mc_vram_size),
+ ((uint64_t)si.totalram * si.mem_unit * 3/4));
+ }
else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
@@ -1410,19 +1530,16 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
- int r;
-
if (!adev->mman.initialized)
return;
+
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stolen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
- if (r == 0) {
- amdgpu_bo_unpin(adev->stolen_vga_memory);
- amdgpu_bo_unreserve(adev->stolen_vga_memory);
- }
- amdgpu_bo_unref(&adev->stolen_vga_memory);
- }
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_ttm_fw_reserve_vram_fini(adev);
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
if (adev->gds.mem.total_size)
@@ -1432,24 +1549,35 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (adev->gds.oa.total_size)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_gart_fini(adev);
amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
-/* this should only be called at bootup or when userspace
- * isn't running */
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
+/**
+ * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: true when we can use buffer functions.
+ *
+ * Enable/disable use of buffer functions during suspend/resume. This should
+ * only be called at bootup or when userspace isn't running.
+ */
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
- struct ttm_mem_type_manager *man;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ uint64_t size;
- if (!adev->mman.initialized)
+ if (!adev->mman.initialized || adev->in_gpu_reset)
return;
- man = &adev->mman.bdev.man[TTM_PL_VRAM];
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+ if (enable)
+ size = adev->gmc.real_vram_size;
+ else
+ size = adev->gmc.visible_vram_size;
man->size = size >> PAGE_SHIFT;
+ adev->mman.buffer_funcs_enabled = enable;
}
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1488,7 +1616,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
- *addr = adev->mc.gart_start;
+ *addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
@@ -1548,6 +1676,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
+ if (direct_submit && !ring->ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
@@ -1606,13 +1739,12 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint64_t src_data,
+ uint32_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- uint32_t max_bytes = 8 *
- adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
+ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@@ -1622,13 +1754,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct amdgpu_job *job;
int r;
- if (!ring->ready) {
+ if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
@@ -1643,9 +1775,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size;
++mm_node;
}
-
- /* num of dwords for each SDMA_OP_PTEPDE cmd */
- num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
+ num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* for IB padding */
num_dw += 64;
@@ -1670,16 +1800,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
- WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
-
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
- dst_addr, 0,
- cur_size_in_bytes >> 3, 0,
- src_data);
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_addr, cur_size_in_bytes);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1740,14 +1866,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- if (*pos >= adev->mc.mc_vram_size)
+ if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
while (size) {
unsigned long flags;
uint32_t value;
- if (*pos >= adev->mc.mc_vram_size)
+ if (*pos >= adev->gmc.mc_vram_size)
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
@@ -1779,14 +1905,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- if (*pos >= adev->mc.mc_vram_size)
+ if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
while (size) {
unsigned long flags;
uint32_t value;
- if (*pos >= adev->mc.mc_vram_size)
+ if (*pos >= adev->gmc.mc_vram_size)
return result;
r = get_user(value, (uint32_t *)buf);
@@ -1864,38 +1990,98 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
-static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
- int r;
- uint64_t phys;
struct iommu_domain *dom;
+ ssize_t result = 0;
+ int r;
- // always return 8 bytes
- if (size != 8)
- return -EINVAL;
+ dom = iommu_get_domain_for_dev(adev->dev);
- // only accept page addresses
- if (*pos & 0xFFF)
- return -EINVAL;
+ while (size) {
+ phys_addr_t addr = *pos & PAGE_MASK;
+ loff_t off = *pos & ~PAGE_MASK;
+ size_t bytes = PAGE_SIZE - off;
+ unsigned long pfn;
+ struct page *p;
+ void *ptr;
+
+ bytes = bytes < size ? bytes : size;
+
+ addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
+
+ pfn = addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -EPERM;
+
+ p = pfn_to_page(pfn);
+ if (p->mapping != adev->mman.bdev.dev_mapping)
+ return -EPERM;
+
+ ptr = kmap(p);
+ r = copy_to_user(buf, ptr + off, bytes);
+ kunmap(p);
+ if (r)
+ return -EFAULT;
+
+ size -= bytes;
+ *pos += bytes;
+ result += bytes;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ struct iommu_domain *dom;
+ ssize_t result = 0;
+ int r;
dom = iommu_get_domain_for_dev(adev->dev);
- if (dom)
- phys = iommu_iova_to_phys(dom, *pos);
- else
- phys = *pos;
- r = copy_to_user(buf, &phys, 8);
- if (r)
- return -EFAULT;
+ while (size) {
+ phys_addr_t addr = *pos & PAGE_MASK;
+ loff_t off = *pos & ~PAGE_MASK;
+ size_t bytes = PAGE_SIZE - off;
+ unsigned long pfn;
+ struct page *p;
+ void *ptr;
+
+ bytes = bytes < size ? bytes : size;
- return 8;
+ addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
+
+ pfn = addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -EPERM;
+
+ p = pfn_to_page(pfn);
+ if (p->mapping != adev->mman.bdev.dev_mapping)
+ return -EPERM;
+
+ ptr = kmap(p);
+ r = copy_from_user(ptr + off, buf, bytes);
+ kunmap(p);
+ if (r)
+ return -EFAULT;
+
+ size -= bytes;
+ *pos += bytes;
+ result += bytes;
+ }
+
+ return result;
}
-static const struct file_operations amdgpu_ttm_iova_fops = {
+static const struct file_operations amdgpu_ttm_iomem_fops = {
.owner = THIS_MODULE,
- .read = amdgpu_iova_to_phys_read,
+ .read = amdgpu_iomem_read,
+ .write = amdgpu_iomem_write,
.llseek = default_llseek
};
@@ -1908,7 +2094,7 @@ static const struct {
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
#endif
- { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
+ { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
};
#endif
@@ -1930,16 +2116,16 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
if (IS_ERR(ent))
return PTR_ERR(ent);
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+ i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
- i_size_write(ent->d_inode, adev->mc.gart_size);
+ i_size_write(ent->d_inode, adev->gmc.gart_size);
adev->mman.debugfs_entries[count] = ent;
}
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
+ if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
--count;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index abd4084..6ea7de8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -25,7 +25,7 @@
#define __AMDGPU_TTM_H__
#include "amdgpu.h"
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -44,6 +44,7 @@ struct amdgpu_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
+ void __iomem *aper_base_kaddr;
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_entries[8];
@@ -52,10 +53,11 @@ struct amdgpu_mman {
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
+ bool buffer_funcs_enabled;
struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_copy_mem {
@@ -67,12 +69,18 @@ struct amdgpu_copy_mem {
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
-bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+ bool enable);
+
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
@@ -85,14 +93,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct reservation_object *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint64_t src_data,
+ uint32_t src_data,
struct reservation_object *resv,
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
+int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 6564902..dd6f989 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -271,6 +271,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
case CHIP_RAVEN:
+ case CHIP_VEGA12:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
@@ -359,7 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
- struct amdgpu_bo **bo = &adev->firmware.fw_buf;
uint64_t fw_offset = 0;
int i, err;
struct amdgpu_firmware_info *ucode = NULL;
@@ -370,36 +370,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
- if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
- err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
+ if (!adev->in_gpu_reset) {
+ err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo);
+ &adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
+ dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
goto failed;
}
-
- err = amdgpu_bo_reserve(*bo, false);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed_reserve;
- }
-
- err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &adev->firmware.fw_buf_mc);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed_pin;
- }
-
- err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- goto failed_kmap;
- }
-
- amdgpu_bo_unreserve(*bo);
}
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
@@ -436,12 +416,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
}
return 0;
-failed_kmap:
- amdgpu_bo_unpin(*bo);
-failed_pin:
- amdgpu_bo_unreserve(*bo);
-failed_reserve:
- amdgpu_bo_unref(bo);
failed:
if (err)
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
@@ -464,8 +438,10 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
ucode->kaddr = NULL;
}
}
- amdgpu_bo_unref(&adev->firmware.fw_buf);
- adev->firmware.fw_buf = NULL;
+
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e8bd50c..627542b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -68,6 +68,7 @@
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
+#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
@@ -110,13 +111,14 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
+MODULE_FIRMWARE(FIRMWARE_VEGA12);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -161,11 +163,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
case CHIP_VEGA10:
fw_name = FIRMWARE_VEGA10;
break;
- case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
break;
default:
return -EINVAL;
@@ -230,9 +235,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
ring = &adev->uvd.ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n");
return r;
@@ -244,7 +249,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
- if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
switch (adev->asic_type) {
@@ -272,7 +277,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int i;
kfree(adev->uvd.saved_bo);
- amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+ drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr,
@@ -297,14 +302,17 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- break;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
- if (i == AMDGPU_MAX_UVD_HANDLES)
- return 0;
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- cancel_delayed_work_sync(&adev->uvd.idle_work);
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
@@ -346,6 +354,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
+ /* to restore uvd fence seq */
+ amdgpu_fence_driver_force_completion(&adev->uvd.ring);
}
return 0;
@@ -408,6 +418,7 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
*/
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
+ struct ttm_operation_ctx tctx = { false, false };
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint32_t cmd;
@@ -430,7 +441,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
}
amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
}
return r;
@@ -949,36 +960,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct dma_fence *f = NULL;
- struct amdgpu_device *adev = ring->adev;
- uint64_t addr;
uint32_t data[4];
- int i, r;
-
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
+ uint64_t addr;
+ long r;
+ int i;
- r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
- if (r)
- return r;
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
if (!ring->adev->uvd.address_64_bit) {
+ struct ttm_operation_ctx ctx = { true, false };
+
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto err;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto err;
-
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto err;
@@ -1010,6 +1013,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false,
+ msecs_to_jiffies(10));
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r < 0)
+ goto err_free;
+
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f);
if (r)
@@ -1017,17 +1028,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job);
} else {
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (r)
+ goto err_free;
+
r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
}
- ttm_eu_fence_buffer_objects(&ticket, &head, f);
+ amdgpu_bo_fence(bo, f, false);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
if (fence)
*fence = dma_fence_get(f);
- amdgpu_bo_unref(&bo);
dma_fence_put(f);
return 0;
@@ -1036,7 +1053,8 @@ err_free:
amdgpu_job_free(job);
err:
- ttm_eu_backoff_reservation(&ticket, &head);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
@@ -1047,31 +1065,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, &bo);
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, (void **)&msg);
if (r)
return r;
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
- }
-
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1087,9 +1090,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
-
return amdgpu_uvd_send_msg(ring, bo, true, fence);
}
@@ -1097,31 +1097,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, &bo);
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, (void **)&msg);
if (r)
return r;
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
- }
-
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
@@ -1130,9 +1115,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
-
return amdgpu_uvd_send_msg(ring, bo, direct, fence);
}
@@ -1142,19 +1124,16 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- if (amdgpu_sriov_vf(adev))
- return;
-
if (fences == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1164,27 +1143,29 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+ bool set_clocks;
if (amdgpu_sriov_vf(adev))
return;
+ set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
} else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
}
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+ if (!amdgpu_sriov_vf(ring->adev))
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
/**
@@ -1218,7 +1199,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 3553b92..32ea20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -31,6 +31,10 @@
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
+ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
+ 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
+
struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
@@ -47,8 +51,8 @@ struct amdgpu_uvd {
struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
- struct amd_sched_entity entity;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 2918de2..a33804b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -55,6 +55,7 @@
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
+#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -72,6 +73,7 @@ MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
+MODULE_FIRMWARE(FIRMWARE_VEGA12);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@@ -85,7 +87,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -127,11 +129,14 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
case CHIP_VEGA10:
fw_name = FIRMWARE_VEGA10;
break;
- case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
break;
default:
@@ -174,9 +179,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
+ rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -207,7 +212,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -300,9 +305,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
container_of(work, struct amdgpu_device, vce.idle_work.work);
unsigned i, count = 0;
- if (amdgpu_sriov_vf(adev))
- return;
-
for (i = 0; i < adev->vce.num_rings; i++)
count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
@@ -311,10 +313,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -343,10 +345,10 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
}
}
@@ -362,7 +364,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
*/
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
+ if (!amdgpu_sriov_vf(ring->adev))
+ schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
/**
@@ -544,6 +547,55 @@ err:
}
/**
+ * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
+ *
+ * @p: parser context
+ * @lo: address of lower dword
+ * @hi: address of higher dword
+ * @size: minimum size
+ * @index: bs/fb index
+ *
+ * Make sure that no BO cross a 4GB boundary.
+ */
+static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
+ int lo, int hi, unsigned size, int32_t index)
+{
+ int64_t offset = ((uint64_t)size) * ((int64_t)index);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *mapping;
+ unsigned i, fpfn, lpfn;
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+ int r;
+
+ addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
+ ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+ if (index >= 0) {
+ addr += offset;
+ fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
+ lpfn = 0x100000000ULL >> PAGE_SHIFT;
+ } else {
+ fpfn = 0;
+ lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
+ }
+
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+ addr, lo, hi, size, index);
+ return r;
+ }
+
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
+ bo->placements[i].lpfn = bo->placements[i].lpfn ?
+ min(bo->placements[i].lpfn, lpfn) : lpfn;
+ }
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+
+/**
* amdgpu_vce_cs_reloc - command submission relocation
*
* @p: parser context
@@ -648,12 +700,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r = 0, idx = 0;
+ unsigned idx;
+ int i, r = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- while (idx < ib->length_dw) {
+ for (idx = 0; idx < ib->length_dw;) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -664,6 +717,54 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
}
switch (cmd) {
+ case 0x00000002: /* task info */
+ fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
+ bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+ break;
+
+ case 0x03000001: /* encode */
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
+ idx + 9, 0, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
+ idx + 11, 0, 0);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000001: /* context buffer */
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
+ idx + 2, 0, 0);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000004: /* video bitstream buffer */
+ tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
+ tmp, bs_idx);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000005: /* feedback buffer */
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
+ 4096, fb_idx);
+ if (r)
+ goto out;
+ break;
+ }
+
+ idx += len / 4;
+ }
+
+ for (idx = 0; idx < ib->length_dw;) {
+ uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+ uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+ switch (cmd) {
case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
@@ -893,7 +994,7 @@ out:
*
*/
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -954,7 +1055,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed\n",
@@ -999,7 +1100,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
error:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 5ce54cd..7178126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -30,6 +30,8 @@
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
@@ -46,7 +48,7 @@ struct amdgpu_vce {
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
};
@@ -63,7 +65,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ unsigned vmid, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 041e012..58e4953 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -35,8 +35,7 @@
#include "soc15d.h"
#include "soc15_common.h"
-#include "vega10/soc15ip.h"
-#include "raven1/VCN/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_offset.h"
/* 1 second timeout */
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -51,7 +50,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -104,18 +103,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
ring = &adev->vcn.ring_dec;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+ rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n");
return r;
}
ring = &adev->vcn.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n");
return r;
@@ -130,9 +129,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
kfree(adev->vcn.saved_bo);
- amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
+ drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
- amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
+ drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
@@ -261,7 +260,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
@@ -271,33 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
-static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo, bool direct,
+ struct dma_fence **fence)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct dma_fence *f = NULL;
- struct amdgpu_device *adev = ring->adev;
uint64_t addr;
int i, r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
- if (r)
- return r;
-
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto err;
-
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto err;
@@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
goto err_free;
}
- ttm_eu_fence_buffer_objects(&ticket, &head, f);
+ amdgpu_bo_fence(bo, f, false);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
if (fence)
*fence = dma_fence_get(f);
- amdgpu_bo_unref(&bo);
dma_fence_put(f);
return 0;
@@ -343,7 +327,8 @@ err_free:
amdgpu_job_free(job);
err:
- ttm_eu_backoff_reservation(&ticket, &head);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
@@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, &bo);
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, (void **)&msg);
if (r)
return r;
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
- }
-
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
@@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = 14; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
-
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
}
@@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, &bo);
+ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, (void **)&msg);
if (r)
return r;
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
- }
-
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000);
@@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = 6; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
-
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
}
@@ -467,7 +416,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
@@ -500,7 +449,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed\n",
@@ -643,7 +592,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
error:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d50ba06..2fd7db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,8 +56,8 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct amd_sched_entity entity_dec;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity_dec;
+ struct drm_sched_entity entity_enc;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 6738df8..21adb1b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,29 @@
*/
#include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define MAX_KIQ_REG_TRY 20
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+ addr -= AMDGPU_VA_RESERVED_SIZE;
+
+ if (addr >= AMDGPU_VA_HOLE_START)
+ addr |= AMDGPU_VA_HOLE_END;
+
+ return addr;
+}
+
+bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
+{
+ /* By now all MMIO pages except mailbox are blocked */
+ /* if blocking is enabled in hypervisor. Choose the */
+ /* SCRATCH_REG0 to test. */
+ return RREG32_NO_KIQ(0xc040) == 0xffffffff;
+}
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{
@@ -39,16 +61,22 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_free_static_csa(struct amdgpu_device *adev) {
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj,
+ &adev->virt.csa_vmid0_addr,
+ NULL);
+}
+
/*
* amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
- * to this VM, and each command submission of GFX should use this virtual
- * address within META_DATA init package to support SRIOV gfx preemption.
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
*/
-
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
@@ -76,7 +104,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
@@ -85,7 +113,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
@@ -107,15 +135,13 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->enable_virtual_display = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
-
- mutex_init(&adev->virt.lock_reset);
}
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- signed long r;
+ signed long r, cnt = 0;
unsigned long flags;
- uint32_t val, seq;
+ uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
@@ -129,18 +155,39 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld\n", r);
- return ~0;
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ if (in_interrupt())
+ might_sleep();
+
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
- val = adev->wb.wb[adev->virt.reg_val_offs];
- return val;
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return adev->wb.wb[adev->virt.reg_val_offs];
+
+failed_kiq_read:
+ pr_err("failed to read reg:%x\n", reg);
+ return ~0;
}
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- signed long r;
+ signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -156,8 +203,34 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- if (r < 1)
- DRM_ERROR("wait for kiq fence error: %ld\n", r);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_write;
+
+ if (in_interrupt())
+ might_sleep();
+
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_write;
+
+ return;
+
+failed_kiq_write:
+ pr_err("failed to write reg:%x\n", reg);
}
/**
@@ -228,6 +301,22 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
}
/**
+ * amdgpu_virt_wait_reset() - wait for reset gpu completed
+ * @amdgpu: amdgpu device.
+ * Wait for GPU reset completed.
+ * Return: Zero if reset success, otherwise will return error.
+ */
+int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->wait_reset)
+ return -EINVAL;
+
+ return virt->ops->wait_reset(adev);
+}
+
+/**
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
* @amdgpu: amdgpu device.
* MM table is used by UVD and VCE for its initialization
@@ -296,7 +385,6 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
- uint32_t pf2vf_ver = 0;
uint32_t pf2vf_size = 0;
uint32_t checksum = 0;
uint32_t checkval;
@@ -309,9 +397,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf =
(struct amdgim_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
- pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
/* pf2vf message must be in 4K */
if (pf2vf_size > 0 && pf2vf_size < 4096) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b89d37f..880ac11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -55,6 +55,7 @@ struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*reset_gpu)(struct amdgpu_device *adev);
+ int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
@@ -80,6 +81,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
/* GIM supports feature of loading uCodes */
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+ /* VRAM LOST by GIM */
+ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
struct amdgim_pf2vf_info_header {
@@ -238,7 +241,6 @@ struct amdgpu_virt {
uint64_t csa_vmid0_addr;
bool chained_ib_support;
uint32_t reg_val_offs;
- struct mutex lock_reset;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
@@ -246,10 +248,10 @@ struct amdgpu_virt {
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ uint32_t gim_feature;
};
-#define AMDGPU_CSA_SIZE (8 * 1024)
-#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
+#define AMDGPU_CSA_SIZE (8 * 1024)
#define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -276,16 +278,20 @@ static inline bool is_virtual_machine(void)
}
struct amdgpu_vm;
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
+void amdgpu_free_static_csa(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
+int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c8c26f2..da55a78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -32,52 +32,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
-
-/*
- * PASID manager
- *
- * PASIDs are global address space identifiers that can be shared
- * between the GPU, an IOMMU and the driver. VMs on different devices
- * may use the same PASID if they share the same address
- * space. Therefore PASIDs are allocated using a global IDA. VMs are
- * looked up from the PASID per amdgpu_device.
- */
-static DEFINE_IDA(amdgpu_vm_pasid_ida);
-
-/**
- * amdgpu_vm_alloc_pasid - Allocate a PASID
- * @bits: Maximum width of the PASID in bits, must be at least 1
- *
- * Allocates a PASID of the given width while keeping smaller PASIDs
- * available if possible.
- *
- * Returns a positive integer on success. Returns %-EINVAL if bits==0.
- * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
- * memory allocation failure.
- */
-int amdgpu_vm_alloc_pasid(unsigned int bits)
-{
- int pasid = -EINVAL;
-
- for (bits = min(bits, 31U); bits > 0; bits--) {
- pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
- 1U << (bits - 1), 1U << bits,
- GFP_KERNEL);
- if (pasid != -ENOSPC)
- break;
- }
-
- return pasid;
-}
-
-/**
- * amdgpu_vm_free_pasid - Free a PASID
- * @pasid: PASID to free
- */
-void amdgpu_vm_free_pasid(unsigned int pasid)
-{
- ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
-}
+#include "amdgpu_amdkfd.h"
/*
* GPUVM
@@ -121,7 +76,8 @@ struct amdgpu_pte_update_params {
/* indirect buffer to fill with commands */
struct amdgpu_ib *ib;
/* Function which actually does the update */
- void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+ void (*func)(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
/* The next two are used during VM update by CPU
@@ -139,6 +95,35 @@ struct amdgpu_prt_cb {
};
/**
+ * amdgpu_vm_level_shift - return the addr shift for each level
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns the number of bits the pfn needs to be right shifted for a level.
+ */
+static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
+ unsigned level)
+{
+ unsigned shift = 0xff;
+
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ shift = 9 * (AMDGPU_VM_PDB0 - level) +
+ adev->vm_manager.block_size;
+ break;
+ case AMDGPU_VM_PTB:
+ shift = 0;
+ break;
+ default:
+ dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ }
+
+ return shift;
+}
+
+/**
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
@@ -148,17 +133,18 @@ struct amdgpu_prt_cb {
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
{
- if (level == 0)
+ unsigned shift = amdgpu_vm_level_shift(adev,
+ adev->vm_manager.root_level);
+
+ if (level == adev->vm_manager.root_level)
/* For the root directory */
- return adev->vm_manager.max_pfn >>
- (adev->vm_manager.block_size *
- adev->vm_manager.num_level);
- else if (level == adev->vm_manager.num_level)
+ return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+ else if (level != AMDGPU_VM_PTB)
+ /* Everything in between */
+ return 512;
+ else
/* For the page tables on the leaves */
return AMDGPU_VM_PTE_COUNT(adev);
- else
- /* Everything in between */
- return 1 << adev->vm_manager.block_size;
}
/**
@@ -273,6 +259,104 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_clear_bo - initially clear the PDs/PTs
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: BO to clear
+ * @level: level this BO is at
+ *
+ * Root PD needs to be reserved when calling this.
+ */
+static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo,
+ unsigned level, bool pte_support_ats)
+{
+ struct ttm_operation_ctx ctx = { true, false };
+ struct dma_fence *fence = NULL;
+ unsigned entries, ats_entries;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ uint64_t addr;
+ int r;
+
+ addr = amdgpu_bo_gpu_offset(bo);
+ entries = amdgpu_bo_size(bo) / 8;
+
+ if (pte_support_ats) {
+ if (level == adev->vm_manager.root_level) {
+ ats_entries = amdgpu_vm_level_shift(adev, level);
+ ats_entries += AMDGPU_GPU_PAGE_SHIFT;
+ ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+ } else {
+ ats_entries = entries;
+ entries = 0;
+ }
+ } else {
+ ats_entries = 0;
+ }
+
+ ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ return r;
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto error;
+
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ if (r)
+ goto error;
+
+ if (ats_entries) {
+ uint64_t ats_value;
+
+ ats_value = AMDGPU_PTE_DEFAULT_ATC;
+ if (level != AMDGPU_VM_PTB)
+ ats_value |= AMDGPU_PDE_PTE;
+
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
+ ats_entries, 0, ats_value);
+ addr += ats_entries * 8;
+ }
+
+ if (entries)
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
+ entries, 0, 0);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+
+ WARN_ON(job->ibs[0].length_dw > 64);
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (r)
+ goto error_free;
+
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+ if (bo->shadow)
+ return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
+ level, pte_support_ats);
+
+ return 0;
+
+error_free:
+ amdgpu_job_free(job);
+
+error:
+ return r;
+}
+
+/**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
*
* @adev: amdgpu_device pointer
@@ -286,14 +370,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_vm_pt *parent,
uint64_t saddr, uint64_t eaddr,
- unsigned level)
+ unsigned level, bool ats)
{
- unsigned shift = (adev->vm_manager.num_level - level) *
- adev->vm_manager.block_size;
+ unsigned shift = amdgpu_vm_level_shift(adev, level);
unsigned pt_idx, from, to;
- int r;
u64 flags;
- uint64_t init_value = 0;
+ int r;
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -312,28 +394,17 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL;
- if (to > parent->last_entry_used)
- parent->last_entry_used = to;
-
++level;
saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1);
- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
- if (vm->pte_support_ats) {
- init_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != adev->vm_manager.num_level - 1)
- init_value |= AMDGPU_PDE_PTE;
-
- }
-
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
struct reservation_object *resv = vm->root.base.bo->tbo.resv;
@@ -343,16 +414,23 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
- AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- flags,
- NULL, resv, init_value, &pt);
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, flags,
+ ttm_bo_type_kernel, resv, &pt);
if (r)
return r;
+ r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
+ if (r) {
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ return r;
+ }
+
if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL);
if (r) {
+ amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
return r;
}
@@ -369,15 +447,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- entry->addr = 0;
}
- if (level < adev->vm_manager.num_level) {
+ if (level < AMDGPU_VM_PTB) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
- sub_eaddr, level);
+ sub_eaddr, level, ats);
if (r)
return r;
}
@@ -400,305 +477,29 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size)
{
- uint64_t last_pfn;
uint64_t eaddr;
+ bool ats = false;
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
eaddr = saddr + size - 1;
- last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- last_pfn, adev->vm_manager.max_pfn);
- return -EINVAL;
- }
+
+ if (vm->pte_support_ats)
+ ats = saddr < AMDGPU_VA_HOLE_START;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
-}
-
-/**
- * amdgpu_vm_had_gpu_reset - check if reset occured since last use
- *
- * @adev: amdgpu_device pointer
- * @id: VMID structure
- *
- * Check if GPU reset occured since last use of the VMID.
- */
-static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
-{
- return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter);
-}
-
-static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
-{
- return !!vm->reserved_vmid[vmhub];
-}
-
-/* idr_mgr->lock must be held */
-static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
- struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct dma_fence *fence,
- struct amdgpu_job *job)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- uint64_t fence_context = adev->fence_context + ring->idx;
- struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence *updates = sync->last_vm_update;
- int r = 0;
- struct dma_fence *flushed, *tmp;
- bool needs_flush = vm->use_cpu_for_update;
-
- flushed = id->flushed_updates;
- if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
- (atomic64_read(&id->owner) != vm->client_id) ||
- (job->vm_pd_addr != id->pd_gpu_addr) ||
- (updates && (!flushed || updates->context != flushed->context ||
- dma_fence_is_later(updates, flushed))) ||
- (!id->last_flush || (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush)))) {
- needs_flush = true;
- /* to prevent one context starved by another context */
- id->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&id->active, ring);
- if (tmp) {
- r = amdgpu_sync_fence(adev, sync, tmp);
- return r;
- }
- }
-
- /* Good we can use this VMID. Remember this submission as
- * user of the VMID.
- */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence);
- if (r)
- goto out;
-
- if (updates && (!flushed || updates->context != flushed->context ||
- dma_fence_is_later(updates, flushed))) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- }
- id->pd_gpu_addr = job->vm_pd_addr;
- atomic64_set(&id->owner, vm->client_id);
- job->vm_needs_flush = needs_flush;
- if (needs_flush) {
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
- }
- job->vm_id = id - id_mgr->ids;
- trace_amdgpu_vm_grab_id(vm, ring, job);
-out:
- return r;
-}
-
-/**
- * amdgpu_vm_grab_id - allocate the next free VMID
- *
- * @vm: vm to allocate id for
- * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
- *
- * Allocate an id for the vm, adding fences to the sync obj as necessary.
- */
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- uint64_t fence_context = adev->fence_context + ring->idx;
- struct dma_fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id, *idle;
- struct dma_fence **fences;
- unsigned i;
- int r = 0;
-
- mutex_lock(&id_mgr->lock);
- if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
- r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
- mutex_unlock(&id_mgr->lock);
- return r;
- }
- fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
- if (!fences) {
- mutex_unlock(&id_mgr->lock);
- return -ENOMEM;
- }
- /* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry(idle, &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
- if (!fences[i])
- break;
- ++i;
- }
-
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&idle->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- r = -ENOMEM;
- goto error;
- }
-
-
- r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- dma_fence_put(&array->base);
- if (r)
- goto error;
-
- mutex_unlock(&id_mgr->lock);
- return 0;
-
- }
- kfree(fences);
-
- job->vm_needs_flush = vm->use_cpu_for_update;
- /* Check if we can use a VMID already assigned to this VM */
- list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
- struct dma_fence *flushed;
- bool needs_flush = vm->use_cpu_for_update;
-
- /* Check all the prerequisites to using this VMID */
- if (amdgpu_vm_had_gpu_reset(adev, id))
- continue;
-
- if (atomic64_read(&id->owner) != vm->client_id)
- continue;
-
- if (job->vm_pd_addr != id->pd_gpu_addr)
- continue;
-
- if (!id->last_flush ||
- (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush)))
- needs_flush = true;
-
- flushed = id->flushed_updates;
- if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
- needs_flush = true;
-
- /* Concurrent flushes are only possible starting with Vega10 */
- if (adev->asic_type < CHIP_VEGA10 && needs_flush)
- continue;
-
- /* Good we can use this VMID. Remember this submission as
- * user of the VMID.
- */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence);
- if (r)
- goto error;
-
- if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- }
-
- if (needs_flush)
- goto needs_flush;
- else
- goto no_flush_needed;
-
- };
-
- /* Still no ID to use? Then use the idle one found earlier */
- id = idle;
-
- /* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence);
- if (r)
- goto error;
-
- id->pd_gpu_addr = job->vm_pd_addr;
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- atomic64_set(&id->owner, vm->client_id);
-
-needs_flush:
- job->vm_needs_flush = true;
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
-
-no_flush_needed:
- list_move_tail(&id->list, &id_mgr->ids_lru);
-
- job->vm_id = id - id_mgr->ids;
- trace_amdgpu_vm_grab_id(vm, ring, job);
-
-error:
- mutex_unlock(&id_mgr->lock);
- return r;
-}
-
-static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub)
-{
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-
- mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub]) {
- list_add(&vm->reserved_vmid[vmhub]->list,
- &id_mgr->ids_lru);
- vm->reserved_vmid[vmhub] = NULL;
- atomic_dec(&id_mgr->reserved_vmid_num);
- }
- mutex_unlock(&id_mgr->lock);
-}
-
-static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub)
-{
- struct amdgpu_vm_id_manager *id_mgr;
- struct amdgpu_vm_id *idle;
- int r = 0;
-
- id_mgr = &adev->vm_manager.id_mgr[vmhub];
- mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub])
- goto unlock;
- if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
- AMDGPU_VM_MAX_RESERVED_VMID) {
- DRM_ERROR("Over limitation of reserved vmid\n");
- atomic_dec(&id_mgr->reserved_vmid_num);
- r = -EINVAL;
- goto unlock;
+ if (eaddr >= adev->vm_manager.max_pfn) {
+ dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
+ eaddr, adev->vm_manager.max_pfn);
+ return -EINVAL;
}
- /* Select the first entry VMID */
- idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
- list_del_init(&idle->list);
- vm->reserved_vmid[vmhub] = idle;
- mutex_unlock(&id_mgr->lock);
- return 0;
-unlock:
- mutex_unlock(&id_mgr->lock);
- return r;
+ return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level, ats);
}
/**
@@ -715,7 +516,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
has_compute_vm_bug = false;
- ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block) {
/* Compute has a VM bug for GFX version < 7.
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
@@ -741,14 +542,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
bool gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
- if (job->vm_id == 0)
+ if (job->vmid == 0)
return false;
- id = &id_mgr->ids[job->vm_id];
+ id = &id_mgr->ids[job->vmid];
gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -757,7 +558,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
- if (amdgpu_vm_had_gpu_reset(adev, id))
+ if (amdgpu_vmid_had_gpu_reset(adev, id))
return true;
return vm_flush_needed || gds_switch_needed;
@@ -765,14 +566,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
{
- return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
+ return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
}
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vm_id: vmid number to use
+ * @vmid: vmid number to use
* @pd_addr: address of the page directory
*
* Emit a VM flush when it is necessary.
@@ -781,8 +582,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -791,14 +592,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
+ bool pasid_mapping_needed = id->pasid != job->pasid ||
+ !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping);
+ struct dma_fence *fence = NULL;
unsigned patch_offset = 0;
int r;
- if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
+ pasid_mapping_needed = true;
}
+ gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+ vm_flush_needed &= !!ring->funcs->emit_vm_flush;
+ pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ ring->funcs->emit_wreg;
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
@@ -808,23 +619,36 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush && vm_flush_needed) {
- struct dma_fence *fence;
+ if (vm_flush_needed) {
+ trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ }
- trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+ if (pasid_mapping_needed)
+ amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
+ if (vm_flush_needed || pasid_mapping_needed) {
r = amdgpu_fence_emit(ring, &fence);
if (r)
return r;
+ }
+ if (vm_flush_needed) {
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
- id->last_flush = fence;
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
+ id->last_flush = dma_fence_get(fence);
+ id->current_gpu_reset_count =
+ atomic_read(&adev->gpu_reset_counter);
mutex_unlock(&id_mgr->lock);
}
+ if (pasid_mapping_needed) {
+ id->pasid = job->pasid;
+ dma_fence_put(id->pasid_mapping);
+ id->pasid_mapping = dma_fence_get(fence);
+ }
+ dma_fence_put(fence);
+
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
@@ -832,7 +656,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+ amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
@@ -850,49 +674,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
/**
- * amdgpu_vm_reset_id - reset VMID to zero
- *
- * @adev: amdgpu device structure
- * @vm_id: vmid number to use
- *
- * Reset saved GDW, GWS and OA to force switch on next flush.
- */
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid)
-{
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
-
- atomic64_set(&id->owner, 0);
- id->gds_base = 0;
- id->gds_size = 0;
- id->gws_base = 0;
- id->gws_size = 0;
- id->oa_base = 0;
- id->oa_size = 0;
-}
-
-/**
- * amdgpu_vm_reset_all_id - reset VMID to zero
- *
- * @adev: amdgpu device structure
- *
- * Reset VMID to force flush on next use
- */
-void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
-{
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
-
- for (j = 1; j < id_mgr->num_ids; ++j)
- amdgpu_vm_reset_id(adev, i, j);
- }
-}
-
-/**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
@@ -921,6 +702,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_do_set_ptes - helper to call the right asic function
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -931,10 +713,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
{
+ pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
if (count < 3) {
@@ -951,6 +735,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -960,13 +745,14 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* Traces the parameters and calls the DMA function to copy the PTEs.
*/
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
{
uint64_t src = (params->src + (addr >> 12) * 8);
-
+ pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count);
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
@@ -1000,6 +786,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: kmap addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -1009,6 +796,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* Write count number of PT/PD entries directly.
*/
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
@@ -1016,14 +804,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
unsigned int i;
uint64_t value;
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
for (i = 0; i < count; i++) {
value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) :
addr;
- amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
- i, value, flags);
+ amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
addr += incr;
}
}
@@ -1043,162 +833,39 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/*
- * amdgpu_vm_update_level - update a single level in the hierarchy
+ * amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @adev: amdgpu_device pointer
+ * @param: parameters for the update
* @vm: requested vm
* @parent: parent directory
+ * @entry: entry to update
*
- * Makes sure all entries in @parent are up to date.
- * Returns 0 for success, error for failure.
+ * Makes sure the requested entry in parent is up to date.
*/
-static int amdgpu_vm_update_level(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ struct amdgpu_vm_pt *entry)
{
- struct amdgpu_bo *shadow;
- struct amdgpu_ring *ring = NULL;
- uint64_t pd_addr, shadow_addr = 0;
- uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
- unsigned count = 0, pt_idx, ndw = 0;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *fence = NULL;
- uint32_t incr;
-
- int r;
+ struct amdgpu_bo *bo = parent->base.bo, *pbo;
+ uint64_t pde, pt, flags;
+ unsigned level;
- if (!parent->entries)
- return 0;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- shadow = parent->base.bo->shadow;
-
- if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
- } else {
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
- sched);
-
- /* padding, etc. */
- ndw = 64;
-
- /* assume the worst case */
- ndw += parent->last_entry_used * 6;
-
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
-
- if (shadow) {
- shadow_addr = amdgpu_bo_gpu_offset(shadow);
- ndw *= 2;
- } else {
- shadow_addr = 0;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
- }
-
-
- /* walk over the address space and update the directory */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- struct amdgpu_bo *bo = entry->base.bo;
- uint64_t pde, pt;
-
- if (bo == NULL)
- continue;
-
- spin_lock(&vm->status_lock);
- list_del_init(&entry->base.vm_status);
- spin_unlock(&vm->status_lock);
-
- pt = amdgpu_bo_gpu_offset(bo);
- pt = amdgpu_gart_get_vm_pde(adev, pt);
- /* Don't update huge pages here */
- if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
- parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
- continue;
-
- parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
-
- pde = pd_addr + pt_idx * 8;
- incr = amdgpu_bo_size(bo);
- if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt) ||
- (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
-
- if (count) {
- if (shadow)
- params.func(&params,
- last_shadow,
- last_pt, count,
- incr,
- AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde,
- last_pt, count, incr,
- AMDGPU_PTE_VALID);
- }
-
- count = 1;
- last_pde = pde;
- last_shadow = shadow_addr + pt_idx * 8;
- last_pt = pt;
- } else {
- ++count;
- }
- }
-
- if (count) {
- if (vm->root.base.bo->shadow)
- params.func(&params, last_shadow, last_pt,
- count, incr, AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
- }
-
- if (!vm->use_cpu_for_update) {
- if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync,
- parent->base.bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync,
- shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
-
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->base.bo, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
- }
- }
+ /* Don't update huge pages here */
+ if (entry->huge)
+ return;
- return 0;
+ for (level = 0, pbo = bo->parent; pbo; ++level)
+ pbo = pbo->parent;
-error_free:
- amdgpu_job_free(job);
- return r;
+ level += params->adev->vm_manager.root_level;
+ pt = amdgpu_bo_gpu_offset(entry->base.bo);
+ flags = AMDGPU_PTE_VALID;
+ amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
+ pde = (entry - parent->entries) * 8;
+ if (bo->shadow)
+ params->func(params, bo->shadow, pde, pt, 1, 0, flags);
+ params->func(params, bo, pde, pt, 1, 0, flags);
}
/*
@@ -1208,27 +875,29 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned pt_idx;
+ unsigned pt_idx, num_entries;
/*
* Recurse into the subdirectories. This recursion is harmless because
* we only have a maximum of 5 layers.
*/
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ num_entries = amdgpu_vm_num_entries(adev, level);
+ for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
if (!entry->base.bo)
continue;
- entry->addr = ~0ULL;
spin_lock(&vm->status_lock);
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- amdgpu_vm_invalidate_level(vm, entry);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
@@ -1244,47 +913,103 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
+ struct amdgpu_pte_update_params params;
+ struct amdgpu_job *job;
+ unsigned ndw = 0;
int r = 0;
+ if (list_empty(&vm->relocated))
+ return 0;
+
+restart:
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
+ return r;
+
+ params.func = amdgpu_vm_cpu_set_ptes;
+ } else {
+ ndw = 512 * 8;
+ r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ if (r)
+ return r;
+
+ params.ib = &job->ibs[0];
+ params.func = amdgpu_vm_do_set_ptes;
+ }
+
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *parent;
+ struct amdgpu_vm_pt *pt, *entry;
struct amdgpu_bo *bo;
bo_base = list_first_entry(&vm->relocated,
struct amdgpu_vm_bo_base,
vm_status);
+ list_del_init(&bo_base->vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo->parent;
- if (bo) {
- struct amdgpu_vm_bo_base *parent;
- struct amdgpu_vm_pt *pt;
-
- parent = list_first_entry(&bo->va,
- struct amdgpu_vm_bo_base,
- bo_list);
- pt = container_of(parent, struct amdgpu_vm_pt, base);
-
- r = amdgpu_vm_update_level(adev, vm, pt);
- if (r) {
- amdgpu_vm_invalidate_level(vm, &vm->root);
- return r;
- }
- spin_lock(&vm->status_lock);
- } else {
+ if (!bo) {
spin_lock(&vm->status_lock);
- list_del_init(&bo_base->vm_status);
+ continue;
}
+
+ parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+ entry = container_of(bo_base, struct amdgpu_vm_pt, base);
+
+ amdgpu_vm_update_pde(&params, vm, pt, entry);
+
+ spin_lock(&vm->status_lock);
+ if (!vm->use_cpu_for_update &&
+ (ndw - params.ib->length_dw) < 32)
+ break;
}
spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
+ } else if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ } else {
+ struct amdgpu_bo *root = vm->root.base.bo;
+ struct amdgpu_ring *ring;
+ struct dma_fence *fence;
+
+ ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ sched);
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
+ if (!list_empty(&vm->relocated))
+ goto restart;
+
+ return 0;
+
+error:
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
+ amdgpu_job_free(job);
return r;
}
@@ -1302,18 +1027,19 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
struct amdgpu_vm_pt **entry,
struct amdgpu_vm_pt **parent)
{
- unsigned idx, level = p->adev->vm_manager.num_level;
+ unsigned level = p->adev->vm_manager.root_level;
*parent = NULL;
*entry = &p->vm->root;
while ((*entry)->entries) {
- idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
+ unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
+
*parent = *entry;
- *entry = &(*entry)->entries[idx];
+ *entry = &(*entry)->entries[addr >> shift];
+ addr &= (1ULL << shift) - 1;
}
- if (level)
+ if (level != AMDGPU_VM_PTB)
*entry = NULL;
}
@@ -1335,56 +1061,33 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
unsigned nptes, uint64_t dst,
uint64_t flags)
{
- bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
- uint64_t pd_addr, pde;
+ uint64_t pde;
/* In the case of a mixed PT the PDE must point to it*/
- if (p->adev->asic_type < CHIP_VEGA10 ||
- nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
- p->src ||
- !(flags & AMDGPU_PTE_VALID)) {
-
- dst = amdgpu_bo_gpu_offset(entry->base.bo);
- dst = amdgpu_gart_get_vm_pde(p->adev, dst);
- flags = AMDGPU_PTE_VALID;
- } else {
+ if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+ nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
/* Set the huge page flag to stop scanning at this PDE */
flags |= AMDGPU_PDE_PTE;
}
- if (entry->addr == (dst | flags))
+ if (!(flags & AMDGPU_PDE_PTE)) {
+ if (entry->huge) {
+ /* Add the entry to the relocated list to update it. */
+ entry->huge = false;
+ spin_lock(&p->vm->status_lock);
+ list_move(&entry->base.vm_status, &p->vm->relocated);
+ spin_unlock(&p->vm->status_lock);
+ }
return;
+ }
- entry->addr = (dst | flags);
-
- if (use_cpu_update) {
- /* In case a huge page is replaced with a system
- * memory mapping, p->pages_addr != NULL and
- * amdgpu_vm_cpu_set_ptes would try to translate dst
- * through amdgpu_vm_map_gart. But dst is already a
- * GPU address (of the page table). Disable
- * amdgpu_vm_map_gart temporarily.
- */
- dma_addr_t *tmp;
-
- tmp = p->pages_addr;
- p->pages_addr = NULL;
-
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
- pde = pd_addr + (entry - parent->entries) * 8;
- amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+ entry->huge = true;
+ amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
- p->pages_addr = tmp;
- } else {
- if (parent->base.bo->shadow) {
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
- pde = pd_addr + (entry - parent->entries) * 8;
- amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
- }
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
- pde = pd_addr + (entry - parent->entries) * 8;
- amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
- }
+ pde = (entry - parent->entries) * 8;
+ if (parent->base.bo->shadow)
+ p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
+ p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
}
/**
@@ -1410,7 +1113,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t addr, pe_start;
struct amdgpu_bo *pt;
unsigned nptes;
- bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
/* walk over the address space and update the page tables */
for (addr = start; addr < end; addr += nptes,
@@ -1429,24 +1131,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
amdgpu_vm_handle_huge_pages(params, entry, parent,
nptes, dst, flags);
/* We don't need to update PTEs for huge pages */
- if (entry->addr & AMDGPU_PDE_PTE)
+ if (entry->huge)
continue;
pt = entry->base.bo;
- if (use_cpu_update) {
- pe_start = (unsigned long)amdgpu_bo_kptr(pt);
- } else {
- if (pt->shadow) {
- pe_start = amdgpu_bo_gpu_offset(pt->shadow);
- pe_start += (addr & mask) * 8;
- params->func(params, pe_start, dst, nptes,
- AMDGPU_GPU_PAGE_SIZE, flags);
- }
- pe_start = amdgpu_bo_gpu_offset(pt);
- }
-
- pe_start += (addr & mask) * 8;
- params->func(params, pe_start, dst, nptes,
+ pe_start = (addr & mask) * 8;
+ if (pt->shadow)
+ params->func(params, pt->shadow, pe_start, dst, nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+ params->func(params, pt, pe_start, dst, nptes,
AMDGPU_GPU_PAGE_SIZE, flags);
}
@@ -1588,14 +1281,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
*
* The second command is for the shadow pagetables.
*/
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
+ if (vm->root.base.bo->shadow)
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
+ else
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
/* padding, etc. */
ndw = 64;
- /* one PDE write for each huge page */
- ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
-
if (pages_addr) {
/* copy commands needed */
ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1607,11 +1300,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else {
/* set page commands needed */
- ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
+ ndw += ncmds * 10;
/* extra commands for begin/end fragments */
- ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
- * adev->vm_manager.fragment_size;
+ ndw += 2 * 10 * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1639,7 +1331,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr = 0;
}
- r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+ r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
if (r)
goto error_free;
@@ -1670,7 +1362,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(vm, &vm->root);
return r;
}
@@ -1861,7 +1552,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
}
spin_lock(&vm->status_lock);
@@ -1889,7 +1580,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
- adev->gart.gart_funcs->set_prt(adev, enable);
+ adev->gmc.gmc_funcs->set_prt(adev, enable);
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}
@@ -1898,7 +1589,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1933,7 +1624,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
{
struct amdgpu_prt_cb *cb;
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2027,16 +1718,16 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct dma_fence **fence)
{
struct amdgpu_bo_va_mapping *mapping;
+ uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
int r;
- uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- if (vm->pte_support_ats)
+ if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
@@ -2081,18 +1772,31 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
+ struct reservation_object *resv;
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
+ resv = bo_va->base.bo->tbo.resv;
+
/* Per VM BOs never need to bo cleared in the page tables */
- clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+ if (resv == vm->root.base.bo->tbo.resv)
+ clear = false;
+ /* Try to reserve the BO to avoid clearing its ptes */
+ else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+ clear = false;
+ /* Somebody else is using the BO right now */
+ else
+ clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
+ if (!clear && resv != vm->root.base.bo->tbo.resv)
+ reservation_object_unlock(resv);
+
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
@@ -2132,8 +1836,26 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (bo)
- list_add_tail(&bo_va->base.bo_list, &bo->va);
+ if (!bo)
+ return bo_va;
+
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
+
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return bo_va;
+
+ if (bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ return bo_va;
+
+ /*
+ * We checked all the prerequisites, but it looks like this per VM BO
+ * is currently evicted. add the BO to the evicted list to make sure it
+ * is validated on next VM use to avoid fault.
+ * */
+ spin_lock(&vm->status_lock);
+ list_move_tail(&bo_va->base.vm_status, &vm->evicted);
+ spin_unlock(&vm->status_lock);
return bo_va;
}
@@ -2556,47 +2278,69 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
}
/**
- * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
- *
- * @adev: amdgpu_device pointer
- * @fragment_size_default: the default fragment size if it's set auto
- */
-void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
- uint32_t fragment_size_default)
-{
- if (amdgpu_vm_fragment_size == -1)
- adev->vm_manager.fragment_size = fragment_size_default;
- else
- adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
-}
-
-/**
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
- uint32_t fragment_size_default)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits)
{
- /* adjust vm size firstly */
- if (amdgpu_vm_size == -1)
- adev->vm_manager.vm_size = vm_size;
- else
- adev->vm_manager.vm_size = amdgpu_vm_size;
+ uint64_t tmp;
- /* block size depends on vm size */
- if (amdgpu_vm_block_size == -1)
+ /* adjust vm size first */
+ if (amdgpu_vm_size != -1) {
+ unsigned max_size = 1 << (max_bits - 30);
+
+ vm_size = amdgpu_vm_size;
+ if (vm_size > max_size) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
+ amdgpu_vm_size, max_size);
+ vm_size = max_size;
+ }
+ }
+
+ adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
+
+ tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
+ if (amdgpu_vm_block_size != -1)
+ tmp >>= amdgpu_vm_block_size - 9;
+ tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
+ adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
+ switch (adev->vm_manager.num_level) {
+ case 3:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+ break;
+ case 2:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+ break;
+ case 1:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+ break;
+ default:
+ dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+ }
+ /* block size depends on vm size and hw setup*/
+ if (amdgpu_vm_block_size != -1)
adev->vm_manager.block_size =
- amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
+ min((unsigned)amdgpu_vm_block_size, max_bits
+ - AMDGPU_GPU_PAGE_SHIFT
+ - 9 * adev->vm_manager.num_level);
+ else if (adev->vm_manager.num_level > 1)
+ adev->vm_manager.block_size = 9;
else
- adev->vm_manager.block_size = amdgpu_vm_block_size;
+ adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
- amdgpu_vm_set_fragment_size(adev, fragment_size_default);
+ if (amdgpu_vm_fragment_size == -1)
+ adev->vm_manager.fragment_size = fragment_size_default;
+ else
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
- adev->vm_manager.vm_size, adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
}
/**
@@ -2615,13 +2359,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
+ unsigned long size;
+ uint64_t flags;
int r, i;
- u64 flags;
- uint64_t init_pde_value = 0;
vm->va = RB_ROOT_CACHED;
- vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
@@ -2635,9 +2378,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &vm->entity,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &vm->entity,
+ rq, amdgpu_sched_jobs, NULL);
if (r)
return r;
@@ -2647,50 +2390,44 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN)
vm->pte_support_ats = true;
- init_pde_value = AMDGPU_PTE_DEFAULT_ATC
- | AMDGPU_PDE_PTE;
-
- }
- } else
+ } else {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
+ }
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
- r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- flags,
- NULL, NULL, init_pde_value, &vm->root.base.bo);
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+ r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
+ ttm_bo_type_kernel, NULL, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
- vm->root.base.vm = vm;
- list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
- INIT_LIST_HEAD(&vm->root.base.vm_status);
+ r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ if (r)
+ goto error_free_root;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_reserve(vm->root.base.bo, false);
- if (r)
- goto error_free_root;
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
+ adev->vm_manager.root_level,
+ vm->pte_support_ats);
+ if (r)
+ goto error_unreserve;
- r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
- amdgpu_bo_unreserve(vm->root.base.bo);
- if (r)
- goto error_free_root;
- }
+ vm->root.base.vm = vm;
+ list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+ list_add_tail(&vm->root.base.vm_status, &vm->evicted);
+ amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
unsigned long flags;
@@ -2710,40 +2447,115 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
+error_unreserve:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+
error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
error_free_sched_entity:
- amd_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_fini(&ring->sched, &vm->entity);
return r;
}
/**
+ * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
+ *
+ * This only works on GFX VMs that don't have any BOs added and no
+ * page tables allocated yet.
+ *
+ * Changes the following VM parameters:
+ * - use_cpu_for_update
+ * - pte_supports_ats
+ * - pasid (old PASID is released, because compute manages its own PASIDs)
+ *
+ * Reinitializes the page directory to reflect the changed ATS
+ * setting. May leave behind an unused shadow BO for the page
+ * directory when switching from SDMA updates to CPU updates.
+ *
+ * Returns 0 for success, -errno for errors.
+ */
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
+ int r;
+
+ r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ if (r)
+ return r;
+
+ /* Sanity checks */
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
+ r = -EINVAL;
+ goto error;
+ }
+
+ /* Check if PD needs to be reinitialized and do it before
+ * changing any other state, in case it fails.
+ */
+ if (pte_support_ats != vm->pte_support_ats) {
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
+ adev->vm_manager.root_level,
+ pte_support_ats);
+ if (r)
+ goto error;
+ }
+
+ /* Update VM state */
+ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE);
+ vm->pte_support_ats = pte_support_ats;
+ DRM_DEBUG_DRIVER("VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ "CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+
+ vm->pasid = 0;
+ }
+
+error:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ return r;
+}
+
+/**
* amdgpu_vm_free_levels - free PD/PT levels
*
- * @level: PD/PT starting level to free
+ * @adev: amdgpu device structure
+ * @parent: PD/PT starting level to free
+ * @level: level of parent structure
*
* Free the page directory or page table level and all sub levels.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned i;
+ unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
- if (level->base.bo) {
- list_del(&level->base.bo_list);
- list_del(&level->base.vm_status);
- amdgpu_bo_unref(&level->base.bo->shadow);
- amdgpu_bo_unref(&level->base.bo);
+ if (parent->base.bo) {
+ list_del(&parent->base.bo_list);
+ list_del(&parent->base.vm_status);
+ amdgpu_bo_unref(&parent->base.bo->shadow);
+ amdgpu_bo_unref(&parent->base.bo);
}
- if (level->entries)
- for (i = 0; i <= level->last_entry_used; i++)
- amdgpu_vm_free_levels(&level->entries[i]);
+ if (parent->entries)
+ for (i = 0; i < num_entries; i++)
+ amdgpu_vm_free_levels(adev, &parent->entries[i],
+ level + 1);
- kvfree(level->entries);
+ kvfree(parent->entries);
}
/**
@@ -2758,11 +2570,13 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
u64 fault;
int i, r;
+ amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+
/* Clear pending page faults from IH when the VM is destroyed */
while (kfifo_get(&vm->faults, &fault))
amdgpu_ih_clear_fault(adev, fault);
@@ -2775,7 +2589,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2801,13 +2615,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(&vm->root);
+ amdgpu_vm_free_levels(adev, &vm->root,
+ adev->vm_manager.root_level);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
- amdgpu_vm_free_reserved_vmid(adev, vm, i);
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
/**
@@ -2826,17 +2641,21 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- spin_unlock(&adev->vm_manager.pasid_lock);
- if (!vm)
+ if (!vm) {
/* VM not found, can't track fault credit */
+ spin_unlock(&adev->vm_manager.pasid_lock);
return true;
+ }
/* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit)
+ if (!vm->fault_credit) {
/* Too many faults in this VM */
+ spin_unlock(&adev->vm_manager.pasid_lock);
return false;
+ }
vm->fault_credit--;
+ spin_unlock(&adev->vm_manager.pasid_lock);
return true;
}
@@ -2849,23 +2668,9 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
+ unsigned i;
- mutex_init(&id_mgr->lock);
- INIT_LIST_HEAD(&id_mgr->ids_lru);
- atomic_set(&id_mgr->reserved_vmid_num, 0);
-
- /* skip over VMID 0, since it is the system VM */
- for (j = 1; j < id_mgr->num_ids; ++j) {
- amdgpu_vm_reset_id(adev, i, j);
- amdgpu_sync_create(&id_mgr->ids[i].active);
- list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
- }
- }
+ amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
@@ -2873,7 +2678,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
- atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2906,24 +2710,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- unsigned i, j;
-
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
idr_destroy(&adev->vm_manager.pasid_idr);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
-
- mutex_destroy(&id_mgr->lock);
- for (j = 0; j < AMDGPU_NUM_VM; ++j) {
- struct amdgpu_vm_id *id = &id_mgr->ids[j];
-
- amdgpu_sync_free(&id->active);
- dma_fence_put(id->flushed_updates);
- dma_fence_put(id->last_flush);
- }
- }
+ amdgpu_vmid_mgr_fini(adev);
}
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -2936,13 +2726,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
- AMDGPU_GFXHUB);
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
if (r)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
+ amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index bae7735..30f0803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -24,12 +24,15 @@
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
-#include <linux/rbtree.h>
#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_file.h>
-#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
struct amdgpu_bo_va;
struct amdgpu_job;
@@ -39,9 +42,6 @@ struct amdgpu_bo_list_entry;
* GPUVM handling
*/
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
/* Maximum number of PTEs the hardware can write with one command */
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
@@ -69,6 +69,12 @@ struct amdgpu_bo_list_entry;
/* PDE is handled as PTE for VEGA10 */
#define AMDGPU_PDE_PTE (1ULL << 54)
+/* PTE is handled as PDE for VEGA10 (Translate Further) */
+#define AMDGPU_PTE_TF (1ULL << 56)
+
+/* PDE Block Fragment Size for VEGA10 */
+#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
@@ -94,7 +100,20 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+
+/* VA hole for 48bit addresses on Vega10 */
+#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
+#define AMDGPU_VA_HOLE_END 0xffff800000000000ULL
+
+/*
+ * Hardware is programmed as if the hole doesn't exists with start and end
+ * address values.
+ *
+ * This mask is used to remove the upper 16bits of the VA and so come up with
+ * the linear addr value.
+ */
+#define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
@@ -106,6 +125,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* VMPT level enumerate, and the hiberachy is:
+ * PDB2->PDB1->PDB0->PTB
+ */
+enum amdgpu_vm_level {
+ AMDGPU_VM_PDB2,
+ AMDGPU_VM_PDB1,
+ AMDGPU_VM_PDB0,
+ AMDGPU_VM_PTB
+};
+
/* base structure for tracking BO usage in a VM */
struct amdgpu_vm_bo_base {
/* constant after initialization */
@@ -124,11 +153,10 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- uint64_t addr;
+ bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
};
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
@@ -162,13 +190,11 @@ struct amdgpu_vm {
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
- /* client id and PASID (TODO: replace client_id with PASID) */
- u64 client_id;
unsigned int pasid;
/* dedicated to vm */
- struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -181,39 +207,20 @@ struct amdgpu_vm {
/* Limit non-retry fault storms */
unsigned int fault_credit;
-};
-
-struct amdgpu_vm_id {
- struct list_head list;
- struct amdgpu_sync active;
- struct dma_fence *last_flush;
- atomic64_t owner;
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct dma_fence *flushed_updates;
+ /* Points to the KFD process VM info */
+ struct amdkfd_process_info *process_info;
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
+ /* List node in amdkfd_process_info.vm_list_head */
+ struct list_head vm_list_node;
-struct amdgpu_vm_id_manager {
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
- atomic_t reserved_vmid_num;
+ /* Valid while the PD is reserved or fenced */
+ uint64_t pd_phys_addr;
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
- struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
/* Handling of VM fences */
u64 fence_context;
@@ -221,9 +228,9 @@ struct amdgpu_vm_manager {
uint64_t max_pfn;
uint32_t num_level;
- uint64_t vm_size;
uint32_t block_size;
uint32_t fragment_size;
+ enum amdgpu_vm_level root_level;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
@@ -231,8 +238,6 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
/* partial resident texture handling */
spinlock_t prt_lock;
@@ -251,12 +256,11 @@ struct amdgpu_vm_manager {
spinlock_t pasid_lock;
};
-int amdgpu_vm_alloc_pasid(unsigned int bits);
-void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid);
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid);
@@ -270,13 +274,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid);
-void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -312,10 +310,9 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
- uint32_t fragment_size_default);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
- uint32_t fragment_size_default);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 4acca92..9aca653 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
uint64_t start = node->start << PAGE_SHIFT;
uint64_t end = (node->size + node->start) << PAGE_SHIFT;
- if (start >= adev->mc.visible_vram_size)
+ if (start >= adev->gmc.visible_vram_size)
return 0;
- return (end > adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size : end) - start;
+ return (end > adev->gmc.visible_vram_size ?
+ adev->gmc.visible_vram_size : end) - start;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2..d702fb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
#include <linux/backlight.h>
#include "bif/bif_4_1_d.h"
-static u8
+u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
{
u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
return backlight_level;
}
-static void
+void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40..f77cbde 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
#define __ATOMBIOS_ENCODER_H__
u8
+amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
+void
+amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
+ u8 backlight_level);
+u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
void
amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index b374653..f9b2ce9 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -65,8 +65,15 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
args.ucRegIndex = buf[0];
if (num)
num--;
- if (num)
- memcpy(&out, &buf[1], num);
+ if (num) {
+ if (buf) {
+ memcpy(&out, &buf[1], num);
+ } else {
+ DRM_ERROR("hw i2c: missing buf with num > 1\n");
+ r = -EINVAL;
+ goto done;
+ }
+ }
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 68b505c..47ef3e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -65,6 +65,8 @@ MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
#define VOLTAGE_VID_OFFSET_SCALE1 625
#define VOLTAGE_VID_OFFSET_SCALE2 100
+static const struct amd_pm_funcs ci_dpm_funcs;
+
static const struct ci_pt_defaults defaults_hawaii_xt =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
@@ -891,12 +893,12 @@ static void ci_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
ci_update_uvd_dpm(adev, gate);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
ci_update_uvd_dpm(adev, gate);
}
}
@@ -905,7 +907,7 @@ static bool ci_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
@@ -2954,7 +2956,7 @@ static int ci_calculate_mclk_params(struct amdgpu_device *adev,
mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
@@ -3077,7 +3079,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
(memory_clock <= pi->mclk_strobe_mode_threshold))
memory_level->StrobeEnable = 1;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
memory_level->StrobeRatio =
ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
if (pi->mclk_edc_enable_threshold &&
@@ -3695,40 +3697,6 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
return ret;
}
-static void ci_save_default_power_profile(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct SMU7_Discrete_GraphicsLevel *levels =
- pi->smc_state_table.GraphicsLevel;
- uint32_t min_level = 0;
-
- pi->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
- pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
- pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- pi->default_compute_power_profile = pi->default_gfx_power_profile;
- pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- pi->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
-
- pi->default_compute_power_profile.up_hyst = 0;
- pi->default_compute_power_profile.down_hyst = 5;
-
- pi->gfx_power_profile = pi->default_gfx_power_profile;
- pi->compute_power_profile = pi->default_compute_power_profile;
-}
-
static int ci_init_smc_table(struct amdgpu_device *adev)
{
struct ci_power_info *pi = ci_get_pi(adev);
@@ -3752,7 +3720,7 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (ulv->supported) {
@@ -3874,8 +3842,6 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- ci_save_default_power_profile(adev);
-
return 0;
}
@@ -4540,23 +4506,23 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
+
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
-
temp_reg = RREG32(mmMC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
for (k = 0; k < table->num_entries; k++) {
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -4564,8 +4530,6 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
}
break;
case mmMC_SEQ_RESERVE_M:
@@ -4577,8 +4541,6 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
default:
break;
@@ -6281,6 +6243,8 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &ci_dpm_funcs;
+ adev->powerplay.pp_handle = adev;
ci_dpm_set_irq_funcs(adev);
return 0;
@@ -6625,9 +6589,9 @@ static int ci_dpm_print_clock_levels(void *handle,
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
break;
default:
@@ -6643,9 +6607,10 @@ static int ci_dpm_force_clock_level(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
- if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
+ if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ if (mask == 0)
return -EINVAL;
switch (type) {
@@ -6666,15 +6631,15 @@ static int ci_dpm_force_clock_level(void *handle,
case PP_PCIE:
{
uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
- while (tmp >>= 1)
- level++;
-
- if (!pi->pcie_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ if (!pi->pcie_dpm_key_disabled) {
+ if (fls(tmp) != ffs(tmp))
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ else
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
+ fls(tmp) - 1);
+ }
break;
}
default:
@@ -6756,222 +6721,6 @@ static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
return 0;
}
-static int ci_dpm_get_power_profile_state(void *handle,
- struct amd_pp_profile *query)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi || !query)
- return -EINVAL;
-
- if (query->type == AMD_PP_GFX_PROFILE)
- memcpy(query, &pi->gfx_power_profile,
- sizeof(struct amd_pp_profile));
- else if (query->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(query, &pi->compute_power_profile,
- sizeof(struct amd_pp_profile));
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &(pi->dpm_table);
- struct SMU7_Discrete_GraphicsLevel *levels =
- pi->smc_state_table.GraphicsLevel;
- uint32_t array = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
- SMU7_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpH = request->up_hyst;
- levels[i].DownH = request->down_hyst;
- }
-
- return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
- array_size, pi->sram_end);
-}
-
-static void ci_find_min_clock_masks(struct amdgpu_device *adev,
- uint32_t *sclk_mask, uint32_t *mclk_mask,
- uint32_t min_sclk, uint32_t min_mclk)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &(pi->dpm_table);
- uint32_t i;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].enabled &&
- dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
- *sclk_mask |= 1 << i;
- }
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- if (dpm_table->mclk_table.dpm_levels[i].enabled &&
- dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
- *mclk_mask |= 1 << i;
- }
-}
-
-static int ci_set_power_profile_state(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int tmp_result, result = 0;
- uint32_t sclk_mask = 0, mclk_mask = 0;
-
- tmp_result = ci_freeze_sclk_mclk_dpm(adev);
- if (tmp_result) {
- DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
- result = tmp_result;
- }
-
- tmp_result = ci_populate_requested_graphic_levels(adev,
- request);
- if (tmp_result) {
- DRM_ERROR("Failed to populate requested graphic levels!");
- result = tmp_result;
- }
-
- tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
- if (tmp_result) {
- DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
- result = tmp_result;
- }
-
- ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
- request->min_sclk, request->min_mclk);
-
- if (sclk_mask) {
- if (!pi->sclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(
- adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.
- sclk_dpm_enable_mask &
- sclk_mask);
- }
-
- if (mclk_mask) {
- if (!pi->mclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(
- adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.
- mclk_dpm_enable_mask &
- mclk_mask);
- }
-
-
- return result;
-}
-
-static int ci_dpm_set_power_profile_state(void *handle,
- struct amd_pp_profile *request)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = -1;
-
- if (!pi || !request)
- return -EINVAL;
-
- if (adev->pm.dpm.forced_level !=
- AMD_DPM_FORCED_LEVEL_AUTO)
- return -EINVAL;
-
- if (request->min_sclk ||
- request->min_mclk ||
- request->activity_threshold ||
- request->up_hyst ||
- request->down_hyst) {
- if (request->type == AMD_PP_GFX_PROFILE)
- memcpy(&pi->gfx_power_profile, request,
- sizeof(struct amd_pp_profile));
- else if (request->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(&pi->compute_power_profile, request,
- sizeof(struct amd_pp_profile));
- else
- return -EINVAL;
-
- if (request->type == pi->current_power_profile)
- ret = ci_set_power_profile_state(
- adev,
- request);
- } else {
- /* set power profile if it exists */
- switch (request->type) {
- case AMD_PP_GFX_PROFILE:
- ret = ci_set_power_profile_state(
- adev,
- &pi->gfx_power_profile);
- break;
- case AMD_PP_COMPUTE_PROFILE:
- ret = ci_set_power_profile_state(
- adev,
- &pi->compute_power_profile);
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (!ret)
- pi->current_power_profile = request->type;
-
- return 0;
-}
-
-static int ci_dpm_reset_power_profile_state(void *handle,
- struct amd_pp_profile *request)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi || !request)
- return -EINVAL;
-
- if (request->type == AMD_PP_GFX_PROFILE) {
- pi->gfx_power_profile = pi->default_gfx_power_profile;
- return ci_dpm_set_power_profile_state(adev,
- &pi->gfx_power_profile);
- } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
- pi->compute_power_profile =
- pi->default_compute_power_profile;
- return ci_dpm_set_power_profile_state(adev,
- &pi->compute_power_profile);
- } else
- return -EINVAL;
-}
-
-static int ci_dpm_switch_power_profile(void *handle,
- enum amd_pp_profile_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amd_pp_profile request = {0};
-
- if (!pi)
- return -EINVAL;
-
- if (pi->current_power_profile != type) {
- request.type = type;
- return ci_dpm_set_power_profile_state(adev, &request);
- }
-
- return 0;
-}
-
static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
@@ -7015,7 +6764,7 @@ static int ci_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs ci_dpm_ip_funcs = {
+static const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
.late_init = ci_dpm_late_init,
@@ -7032,8 +6781,16 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
-const struct amd_pm_funcs ci_dpm_funcs = {
- .get_temperature = &ci_dpm_get_temp,
+const struct amdgpu_ip_block_version ci_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ci_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs ci_dpm_funcs = {
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
.post_set_power_state = &ci_dpm_post_set_power_state,
@@ -7057,10 +6814,6 @@ const struct amd_pm_funcs ci_dpm_funcs = {
.set_mclk_od = ci_dpm_set_mclk_od,
.check_state_equal = ci_check_state_equal,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
- .get_power_profile_state = ci_dpm_get_power_profile_state,
- .set_power_profile_state = ci_dpm_set_power_profile_state,
- .reset_power_profile_state = ci_dpm_reset_power_profile_state,
- .switch_power_profile = ci_dpm_switch_power_profile,
.read_sensor = ci_dpm_read_sensor,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index 84cbc9c..91be299 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -295,13 +295,6 @@ struct ci_power_info {
bool fan_is_controlled_by_smc;
u32 t_min;
u32 fan_ctrl_default_mode;
-
- /* power profile */
- struct amd_pp_profile gfx_power_profile;
- struct amd_pp_profile compute_power_profile;
- struct amd_pp_profile default_gfx_power_profile;
- struct amd_pp_profile default_compute_power_profile;
- enum amd_pp_profile_type current_power_profile;
};
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index a296f7bb..0df2203 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,7 +67,6 @@
#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
/*
@@ -755,74 +754,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_program_register_sequence(adev,
- bonaire_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_common_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_spm_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_mgcg_cgcg_init,
+ ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_registers,
+ ARRAY_SIZE(bonaire_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_common_registers,
+ ARRAY_SIZE(bonaire_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_spm_registers,
+ ARRAY_SIZE(bonaire_golden_spm_registers));
break;
case CHIP_KABINI:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_registers,
+ ARRAY_SIZE(kalindi_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_MULLINS:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- godavari_golden_registers,
- (const u32)ARRAY_SIZE(godavari_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ godavari_golden_registers,
+ ARRAY_SIZE(godavari_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_KAVERI:
- amdgpu_program_register_sequence(adev,
- spectre_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- spectre_golden_registers,
- (const u32)ARRAY_SIZE(spectre_golden_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_common_registers,
- (const u32)ARRAY_SIZE(spectre_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_spm_registers,
- (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_mgcg_cgcg_init,
+ ARRAY_SIZE(spectre_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_registers,
+ ARRAY_SIZE(spectre_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_common_registers,
+ ARRAY_SIZE(spectre_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_spm_registers,
+ ARRAY_SIZE(spectre_golden_spm_registers));
break;
case CHIP_HAWAII:
- amdgpu_program_register_sequence(adev,
- hawaii_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_common_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_spm_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_mgcg_cgcg_init,
+ ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_registers,
+ ARRAY_SIZE(hawaii_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_common_registers,
+ ARRAY_SIZE(hawaii_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_spm_registers,
+ ARRAY_SIZE(hawaii_golden_spm_registers));
break;
default:
break;
@@ -1246,7 +1245,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -1715,6 +1714,27 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ }
+}
+
+static void cik_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_DEBUG0, 1);
+ RREG32(mmHDP_DEBUG0);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+ }
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1726,6 +1746,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_config_memsize = &cik_get_config_memsize,
+ .flush_hdp = &cik_flush_hdp,
+ .invalidate_hdp = &cik_invalidate_hdp,
};
static int cik_common_early_init(void *handle)
@@ -1864,10 +1886,6 @@ static int cik_common_early_init(void *handle)
return -EINVAL;
}
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_get_pcie_info(adev);
-
return 0;
}
@@ -1974,77 +1992,83 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_HAWAII:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ if (amdgpu_dpm == -1)
+ amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KAVERI:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index c4989f5..e49c6f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,6 +24,8 @@
#ifndef __CIK_H__
#define __CIK_H__
+#define CIK_FLUSH_GPU_TLB_NUM_WREG 3
+
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index c7b4349..2a08661 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -24,8 +24,7 @@
#ifndef __CIK_DPM_H__
#define __CIK_DPM_H__
-extern const struct amd_ip_funcs ci_dpm_ip_funcs;
-extern const struct amd_ip_funcs kv_dpm_ip_funcs;
-extern const struct amd_pm_funcs ci_dpm_funcs;
-extern const struct amd_pm_funcs kv_dpm_funcs;
+extern const struct amdgpu_ip_block_version ci_smu_ip_block;
+extern const struct amdgpu_ip_block_version kv_smu_ip_block;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index a870b35..44d10c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -111,7 +111,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
cik_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -280,8 +280,8 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 60cecd1..f48ea0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 extra_bits = vm_id & 0xf;
+ u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -261,13 +261,6 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 1);
-}
-
/**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
*
@@ -317,7 +310,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -517,7 +510,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
}
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@@ -626,7 +619,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -639,7 +632,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
@@ -657,13 +650,13 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -686,7 +679,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -724,7 +717,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
@@ -735,7 +728,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -880,23 +873,12 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (CIK).
*/
static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* flush TLB */
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@@ -906,6 +888,14 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -1279,9 +1269,9 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.set_wptr = cik_sdma_ring_set_wptr,
.emit_frame_size =
6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 3 + /* hdp invalidate */
6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
+ CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
@@ -1289,11 +1279,11 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
- .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
+ .emit_wreg = cik_sdma_ring_emit_wreg,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1391,9 +1381,6 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte,
-
- .set_max_nums_pte_pde = 0x1fffff >> 3,
- .set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 6a9e38a..cee6e8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,7 +562,7 @@
#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
#define SHARED_BASE(x) ((x) << 16) /* LDS */
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+#define KFD_CIK_SDMA_QUEUE_OFFSET (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL)
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
enum {
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 003a131..567a904 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -48,7 +48,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // DB_STENCIL_WRITE_BASE
0x00000000, // DB_STENCIL_WRITE_BASE_HI
0x00000000, // DB_DFSM_CONTROL
- 0x00000000, // DB_RENDER_FILTER
+ 0, // HOLE
0x00000000, // DB_Z_INFO2
0x00000000, // DB_STENCIL_INFO2
0, // HOLE
@@ -259,8 +259,8 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // PA_SC_RIGHT_VERT_GRID
0x00000000, // PA_SC_LEFT_VERT_GRID
0x00000000, // PA_SC_HORIZ_GRID
- 0x00000000, // PA_SC_FOV_WINDOW_LR
- 0x00000000, // PA_SC_FOV_WINDOW_TB
+ 0, // HOLE
+ 0, // HOLE
0, // HOLE
0, // HOLE
0, // HOLE
@@ -701,7 +701,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
{
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
- 0x00000000, // VGT_INDEX_PAYLOAD_CNTL
+ 0, // HOLE
0x00000000, // VGT_INSTANCE_STEP_RATE_0
0x00000000, // VGT_INSTANCE_STEP_RATE_1
0, // HOLE
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index fa61d64..960c29e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -111,7 +111,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
cz_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -259,8 +259,8 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4e519dc..452f88e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -145,20 +145,20 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
default:
break;
@@ -190,66 +190,6 @@ static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v10_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v10_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v10_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
@@ -1205,7 +1145,7 @@ static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size;
int i;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2517,9 +2457,9 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.cursor_set2 = dce_v10_0_crtc_cursor_set2,
.cursor_move = dce_v10_0_crtc_cursor_move,
.gamma_set = dce_v10_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2537,7 +2477,8 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2676,7 +2617,7 @@ static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2773,7 +2714,6 @@ static int dce_v10_0_early_init(void *handle)
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
dce_v10_0_set_display_funcs(adev);
- dce_v10_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
@@ -2788,6 +2728,8 @@ static int dce_v10_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v10_0_set_irq_funcs(adev);
+
return 0;
}
@@ -2823,9 +2765,9 @@ static int dce_v10_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -2840,7 +2782,7 @@ static int dce_v10_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev->ddev);
else
return -EINVAL;
@@ -2920,6 +2862,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v10_0_hw_fini(handle);
}
@@ -2928,6 +2875,9 @@ static int dce_v10_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v10_0_hw_init(handle);
/* turn on the BL */
@@ -3248,7 +3198,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
@@ -3600,7 +3550,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
- .vblank_wait = &dce_v10_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v10_0_hpd_sense,
@@ -3635,13 +3584,16 @@ static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 11edc75..a7c1c58 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -154,28 +154,28 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- (const u32)ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_settings_a11,
+ ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_settings_a11,
+ ARRAY_SIZE(polaris10_golden_settings_a11));
break;
default:
break;
@@ -207,66 +207,6 @@ static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v11_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v11_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v11_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v11_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
@@ -1229,7 +1169,7 @@ static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size;
int i;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2592,9 +2532,9 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.cursor_set2 = dce_v11_0_crtc_cursor_set2,
.cursor_move = dce_v11_0_crtc_cursor_move,
.gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2612,7 +2552,8 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2779,7 +2720,7 @@ static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2876,7 +2817,6 @@ static int dce_v11_0_early_init(void *handle)
adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
dce_v11_0_set_display_funcs(adev);
- dce_v11_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
@@ -2903,6 +2843,8 @@ static int dce_v11_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v11_0_set_irq_funcs(adev);
+
return 0;
}
@@ -2938,9 +2880,9 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -2956,7 +2898,7 @@ static int dce_v11_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev->ddev);
else
return -EINVAL;
@@ -3046,6 +2988,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v11_0_hw_fini(handle);
}
@@ -3054,6 +3001,9 @@ static int dce_v11_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v11_0_hw_init(handle);
/* turn on the BL */
@@ -3367,7 +3317,8 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
@@ -3724,7 +3675,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .vblank_wait = &dce_v11_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v11_0_hpd_sense,
@@ -3759,13 +3709,16 @@ static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index a51e35f..9f67b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -142,64 +142,6 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v6_0_wait_for_vblank - vblank wait asic callback.
- *
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v6_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v6_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
@@ -1108,7 +1050,7 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
if (!adev->mode_info.mode_config_initialized)
return;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2407,9 +2349,9 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.cursor_set2 = dce_v6_0_crtc_cursor_set2,
.cursor_move = dce_v6_0_crtc_cursor_move,
.gamma_set = dce_v6_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2425,7 +2367,8 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2562,7 +2505,7 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2639,7 +2582,6 @@ static int dce_v6_0_early_init(void *handle)
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
dce_v6_0_set_display_funcs(adev);
- dce_v6_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
@@ -2658,6 +2600,8 @@ static int dce_v6_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v6_0_set_irq_funcs(adev);
+
return 0;
}
@@ -2692,9 +2636,9 @@ static int dce_v6_0_sw_init(void *handle)
adev->ddev->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -2710,7 +2654,7 @@ static int dce_v6_0_sw_init(void *handle)
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret)
- amdgpu_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev->ddev);
else
return -EINVAL;
@@ -2786,6 +2730,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v6_0_hw_fini(handle);
}
@@ -2794,6 +2743,9 @@ static int dce_v6_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v6_0_hw_init(handle);
/* turn on the BL */
@@ -2965,7 +2917,8 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
@@ -3092,7 +3045,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
- DRM_INFO("IH: HPD%d\n", hpd + 1);
+ DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
return 0;
@@ -3406,7 +3359,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
- .vblank_wait = &dce_v6_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense,
@@ -3441,13 +3393,16 @@ static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 9cf14b8..f55422c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -140,66 +140,6 @@ static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v8_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v8_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v8_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
@@ -1144,7 +1084,7 @@ static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size;
int i;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -2421,9 +2361,9 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.cursor_set2 = dce_v8_0_crtc_cursor_set2,
.cursor_move = dce_v8_0_crtc_cursor_move,
.gamma_set = dce_v8_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2441,7 +2381,8 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2587,7 +2528,7 @@ static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2664,7 +2605,6 @@ static int dce_v8_0_early_init(void *handle)
adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
dce_v8_0_set_display_funcs(adev);
- dce_v8_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
@@ -2688,6 +2628,8 @@ static int dce_v8_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v8_0_set_irq_funcs(adev);
+
return 0;
}
@@ -2723,9 +2665,9 @@ static int dce_v8_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -2740,7 +2682,7 @@ static int dce_v8_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev->ddev);
else
return -EINVAL;
@@ -2818,6 +2760,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
return dce_v8_0_hw_fini(handle);
}
@@ -2826,6 +2773,9 @@ static int dce_v8_0_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
ret = dce_v8_0_hw_init(handle);
/* turn on the BL */
@@ -3062,7 +3012,8 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
@@ -3490,7 +3441,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
- .vblank_wait = &dce_v8_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v8_0_hpd_sense,
@@ -3525,13 +3475,16 @@ static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index a8829af..b51f05d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -44,19 +44,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
int index);
-
-/**
- * dce_virtual_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- return;
-}
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state);
static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
@@ -127,9 +117,9 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.cursor_set2 = NULL,
.cursor_move = NULL,
.gamma_set = dce_virtual_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
};
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -146,7 +136,8 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
/* Make sure VBLANK interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_crtc_vblank_on(crtc);
break;
@@ -403,9 +394,9 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -437,6 +428,8 @@ static int dce_virtual_sw_fini(void *handle)
drm_kms_helper_poll_fini(adev->ddev);
drm_mode_config_cleanup(adev->ddev);
+ /* clear crtcs pointer to avoid dce irq finish routine access freed data */
+ memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
adev->mode_info.mode_config_initialized = false;
return 0;
}
@@ -480,6 +473,7 @@ static int dce_virtual_hw_init(void *handle)
/* no DCE */
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
break;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
@@ -489,6 +483,13 @@ static int dce_virtual_hw_init(void *handle)
static int dce_virtual_hw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i = 0;
+
+ for (i = 0; i<adev->mode_info.num_crtc; i++)
+ if (adev->mode_info.crtcs[i])
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
+
return 0;
}
@@ -641,7 +642,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter,
- .vblank_wait = &dce_virtual_vblank_wait,
.backlight_set_level = NULL,
.backlight_get_level = NULL,
.hpd_sense = &dce_virtual_hpd_sense,
@@ -723,7 +723,7 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
int crtc,
enum amdgpu_interrupt_state state)
{
- if (crtc >= adev->mode_info.num_crtc) {
+ if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) {
DRM_DEBUG("invalid crtc %d\n", crtc);
return;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
index 8fe8ba9..d72c25c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/amdgpu/emu_soc.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,7 +20,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+
+int emu_soc_asic_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
-bool acpi_atcs_functions_supported(void *device, uint32_t index);
-int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
-bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index dbbe986..0fff5b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -38,6 +38,7 @@
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
#include "si_enums.h"
+#include "si.h"
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1798,7 +1799,7 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
ring->idx, scratch, tmp);
@@ -1808,17 +1809,6 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
-static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- /* flush hdp cache */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
- WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0x1);
-}
-
static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
@@ -1826,24 +1816,6 @@ static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0));
}
-/**
- * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
- *
- * @adev: amdgpu_device pointer
- * @ridx: amdgpu ring index
- *
- * Emits an hdp invalidate on the cp.
- */
-static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
- WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0x1);
-}
-
static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
@@ -1874,7 +1846,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -1889,7 +1861,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -1951,7 +1923,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
@@ -2354,29 +2326,11 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- /* write new base address */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
- WRITE_DATA_DST_SEL(0)));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
- } else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
- }
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* bits 0-15 are the VM contexts0-15 */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
- WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -2401,6 +2355,18 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
}
}
+static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{
@@ -2962,25 +2928,7 @@ static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
-
- switch (adev->asic_type) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- buffer[count++] = cpu_to_le32(0x2a00126a);
- break;
- case CHIP_VERDE:
- buffer[count++] = cpu_to_le32(0x0000124a);
- break;
- case CHIP_OLAND:
- buffer[count++] = cpu_to_le32(0x00000082);
- break;
- case CHIP_HAINAN:
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -3529,23 +3477,21 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
.emit_frame_size =
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 5 + 5 + /* hdp flush / invalidate */
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
- .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+ .emit_wreg = gfx_v6_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3556,21 +3502,19 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
.emit_frame_size =
- 5 + /* gfx_v6_0_ring_emit_hdp_flush */
- 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 5 + 5 + /* hdp flush / invalidate */
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
- .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .emit_wreg = gfx_v6_0_ring_emit_wreg,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 419ba0c..e13d9d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -48,6 +48,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
+
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
@@ -1944,7 +1946,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
if (i == 0)
sh_mem_base = 0;
else
- sh_mem_base = adev->mc.shared_aperture_start >> 48;
+ sh_mem_base = adev->gmc.shared_aperture_start >> 48;
cik_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
@@ -2085,7 +2087,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
ring->idx, scratch, tmp);
@@ -2145,26 +2147,6 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0));
}
-
-/**
- * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
- *
- * @adev: amdgpu_device pointer
- * @ridx: amdgpu ring index
- *
- * Emits an hdp invalidate on the cp.
- */
-static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1);
-}
-
/**
* gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
*
@@ -2252,7 +2234,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -2267,7 +2249,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2281,9 +2263,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
@@ -2365,7 +2347,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
@@ -2551,29 +2533,8 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- amdgpu_ring_write(ring, 0x16000012);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_KAVERI:
- amdgpu_ring_write(ring, 0x00000000); /* XXX */
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- amdgpu_ring_write(ring, 0x00000000); /* XXX */
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_HAWAII:
- amdgpu_ring_write(ring, 0x3a00161a);
- amdgpu_ring_write(ring, 0x0000002e);
- break;
- default:
- amdgpu_ring_write(ring, 0x00000000);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- }
+ amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
+ amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -3258,30 +3219,11 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using the CP (CIK).
*/
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
- WRITE_DATA_DST_SEL(0)));
- if (vm_id < 8) {
- amdgpu_ring_write(ring,
- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring,
- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* bits 0-15 are the VM contexts0-15 */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -3308,6 +3250,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
}
}
+static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
/*
* RLC
* The RLC is a multi-purpose microengine that handles a
@@ -4403,34 +4358,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
case CHIP_KAVERI:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
- if ((adev->pdev->device == 0x1304) ||
- (adev->pdev->device == 0x1305) ||
- (adev->pdev->device == 0x130C) ||
- (adev->pdev->device == 0x130F) ||
- (adev->pdev->device == 0x1310) ||
- (adev->pdev->device == 0x1311) ||
- (adev->pdev->device == 0x131C)) {
- adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1309) ||
- (adev->pdev->device == 0x130A) ||
- (adev->pdev->device == 0x130D) ||
- (adev->pdev->device == 0x1313) ||
- (adev->pdev->device == 0x131D)) {
- adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1306) ||
- (adev->pdev->device == 0x1307) ||
- (adev->pdev->device == 0x130B) ||
- (adev->pdev->device == 0x130E) ||
- (adev->pdev->device == 0x1315) ||
- (adev->pdev->device == 0x131B)) {
- adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
- } else {
- adev->gfx.config.max_cu_per_sh = 3;
- adev->gfx.config.max_backends_per_se = 1;
- }
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
@@ -5134,10 +5063,10 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_frame_size =
20 + /* gfx_v7_0_ring_emit_gds_switch */
7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
@@ -5146,12 +5075,12 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+ .emit_wreg = gfx_v7_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5165,9 +5094,9 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.emit_frame_size =
20 + /* gfx_v7_0_ring_emit_gds_switch */
7 + /* gfx_v7_0_ring_emit_hdp_flush */
- 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
@@ -5176,11 +5105,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_wreg = gfx_v7_0_ring_emit_wreg,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5298,6 +5227,11 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+ cu_info->max_waves_per_simd = 10;
+ cu_info->max_scratch_slots_per_cu = 32;
+ cu_info->wave_front_size = 64;
+ cu_info->lds_size = 64;
}
const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 9ecdf62..27943e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -679,55 +679,55 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
- amdgpu_program_register_sequence(adev,
- iceland_golden_common_all,
- (const u32)ARRAY_SIZE(iceland_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_golden_common_all,
+ ARRAY_SIZE(iceland_golden_common_all));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
- amdgpu_program_register_sequence(adev,
- fiji_golden_common_all,
- (const u32)ARRAY_SIZE(fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_golden_common_all,
+ ARRAY_SIZE(fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
- amdgpu_program_register_sequence(adev,
- tonga_golden_common_all,
- (const u32)ARRAY_SIZE(tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_golden_common_all,
+ ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
- amdgpu_program_register_sequence(adev,
- polaris11_golden_common_all,
- (const u32)ARRAY_SIZE(polaris11_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_common_all,
+ ARRAY_SIZE(polaris11_golden_common_all));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
- amdgpu_program_register_sequence(adev,
- polaris10_golden_common_all,
- (const u32)ARRAY_SIZE(polaris10_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_common_all,
+ ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
if (adev->pdev->revision == 0xc7 &&
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
@@ -738,26 +738,26 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
}
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- (const u32)ARRAY_SIZE(cz_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- cz_golden_common_all,
- (const u32)ARRAY_SIZE(cz_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_common_all,
+ ARRAY_SIZE(cz_golden_common_all));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- stoney_golden_common_all,
- (const u32)ARRAY_SIZE(stoney_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_common_all,
+ ARRAY_SIZE(stoney_golden_common_all));
break;
default:
break;
@@ -804,7 +804,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
@@ -856,7 +856,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
@@ -2114,7 +2114,6 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
@@ -3797,7 +3796,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp);
- tmp = adev->mc.shared_aperture_start >> 48;
+ tmp = adev->gmc.shared_aperture_start >> 48;
WREG32(mmSH_MEM_BASES, tmp);
}
@@ -3851,6 +3850,14 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
break;
udelay(1);
}
+ if (k == adev->usec_timeout) {
+ gfx_v8_0_select_se_sh(adev, 0xffffffff,
+ 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ i, j);
+ return;
+ }
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -4305,37 +4312,8 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_TONGA:
- case CHIP_POLARIS10:
- amdgpu_ring_write(ring, 0x16000012);
- amdgpu_ring_write(ring, 0x0000002A);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_ring_write(ring, 0x16000012);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_FIJI:
- amdgpu_ring_write(ring, 0x3a00161a);
- amdgpu_ring_write(ring, 0x0000002e);
- break;
- case CHIP_CARRIZO:
- amdgpu_ring_write(ring, 0x00000002);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_TOPAZ:
- amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
- 0x00000000 : 0x00000002);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- case CHIP_STONEY:
- amdgpu_ring_write(ring, 0x00000000);
- amdgpu_ring_write(ring, 0x00000000);
- break;
- default:
- BUG();
- }
+ amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
+ amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -4816,7 +4794,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->in_sriov_reset) { /* for GPU_RESET case */
+ if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4853,7 +4831,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4865,7 +4843,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
+ } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -5080,8 +5058,9 @@ static int gfx_v8_0_hw_fini(void *handle)
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
return 0;
}
@@ -5498,8 +5477,9 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
return 0;
}
@@ -5510,10 +5490,10 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6246,22 +6226,9 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0));
}
-
-static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1);
-
-}
-
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -6270,7 +6237,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
@@ -6291,9 +6258,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
@@ -6344,32 +6311,11 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
- WRITE_DATA_DST_SEL(0)) |
- WR_CONFIRM);
- if (vm_id < 8) {
- amdgpu_ring_write(ring,
- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring,
- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* bits 0-15 are the VM contexts0-15 */
- /* invalidate the cache */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(0)));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -6490,10 +6436,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
@@ -6633,8 +6579,22 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val)
{
+ uint32_t cmd;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = 1 << 16; /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
+
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
+ amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
@@ -6887,7 +6847,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_frame_size = /* maximum 215dw if count 16 IBs in */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
- 19 + /* VM_FLUSH */
+ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -6909,7 +6869,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -6918,6 +6877,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
+ .emit_wreg = gfx_v8_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6931,9 +6891,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
- 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
@@ -6942,12 +6902,12 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.set_priority = gfx_v8_0_ring_set_priority_compute,
+ .emit_wreg = gfx_v8_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@@ -6961,7 +6921,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */
- 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
@@ -7132,6 +7092,11 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+ cu_info->max_waves_per_simd = 10;
+ cu_info->max_scratch_slots_per_cu = 32;
+ cu_info->wave_front_size = 64;
+ cu_info->lds_size = 64;
}
const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
@@ -7162,12 +7127,12 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
} ce_payload = {};
if (ring->adev->virt.chained_ib_support) {
- ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 +
- offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
+ ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
+ offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
} else {
- ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 +
- offsetof(struct vi_gfx_meta_data, ce_payload);
+ ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
+ offsetof(struct vi_gfx_meta_data, ce_payload);
cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
}
@@ -7190,7 +7155,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};
- csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
+ csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096;
if (ring->adev->virt.chained_ib_support) {
de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5aeb5f8..1ae3de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,11 +28,10 @@
#include "soc15.h"
#include "soc15d.h"
-#include "vega10/soc15ip.h"
-#include "vega10/GC/gc_9_0_offset.h"
-#include "vega10/GC/gc_9_0_sh_mask.h"
-#include "vega10/vega10_enum.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "vega10_enum.h"
+#include "hdp/hdp_4_0_offset.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
@@ -58,6 +57,13 @@ MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
+MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vega12_me.bin");
+MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
+MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
+
MODULE_FIRMWARE("amdgpu/raven_ce.bin");
MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven_me.bin");
@@ -65,155 +71,122 @@ MODULE_FIRMWARE("amdgpu/raven_mec.bin");
MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
-static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
-{
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
-};
-
-static const u32 golden_settings_gc_9_0[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
- SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
- SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
+static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_0_vg10[] =
+static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
};
-static const u32 golden_settings_gc_9_1[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
+static const struct soc15_reg_golden golden_settings_gc_9_1[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_1_rv1[] =
+static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
};
-static const u32 golden_settings_gc_9_x_common[] =
+static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
};
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
+#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -230,26 +203,34 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
- amdgpu_program_register_sequence(adev,
+ ARRAY_SIZE(golden_settings_gc_9_0));
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg10,
- (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+ break;
+ case CHIP_VEGA12:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_2_1,
+ ARRAY_SIZE(golden_settings_gc_9_2_1));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_2_1_vg12,
+ ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1,
- (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
- amdgpu_program_register_sequence(adev,
+ ARRAY_SIZE(golden_settings_gc_9_1));
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv1,
- (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
+ ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
default:
break;
}
- amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -327,7 +308,7 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
@@ -340,58 +321,65 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_ib ib;
- struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
- long r;
-
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
- return r;
- }
- WREG32(scratch, 0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
-
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
- if (r)
- goto err2;
-
- r = dma_fence_wait_timeout(f, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
- r = -ETIMEDOUT;
- goto err2;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err2;
- }
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
- r = -EINVAL;
- }
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
+ long r;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err2;
+ }
+
+ tmp = adev->wb.wb[index];
+ if (tmp == 0xDEADBEEF) {
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ r = -EINVAL;
+ }
+
err2:
- amdgpu_ib_free(adev, &ib, NULL);
- dma_fence_put(f);
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
+ amdgpu_device_wb_free(adev, index);
+ return r;
}
@@ -431,6 +419,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA10:
chip_name = "vega10";
break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@@ -1030,6 +1021,15 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_VEGA12:
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
+ DRM_INFO("fix gfx.config for vega12\n");
+ break;
case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -1137,8 +1137,8 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
- adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
+ adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
+ adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
/* Primitive Buffer */
r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
@@ -1243,23 +1243,24 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
}
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
(adev->gds.mem.total_size +
adev->gfx.ngg.gds_reserve_size) >>
AMDGPU_GDS_SHIFT);
amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
+ PACKET3_DMA_DATA_DST_SEL(1) |
PACKET3_DMA_DATA_SRC_SEL(2)));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
-
+ amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
+ adev->gfx.ngg.gds_reserve_size);
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size, 0);
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
amdgpu_ring_commit(ring);
@@ -1310,6 +1311,7 @@ static int gfx_v9_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2;
break;
@@ -1322,23 +1324,23 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
/* KIQ event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
if (r)
return r;
/* EOP Event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -1464,7 +1466,6 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
@@ -1596,14 +1597,21 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
- SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ tmp = adev->gmc.shared_aperture_start >> 48;
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
+ }
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@@ -1645,6 +1653,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
break;
udelay(1);
}
+ if (k == adev->usec_timeout) {
+ gfx_v9_0_select_se_sh(adev, 0xffffffff,
+ 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
+ i, j);
+ return;
+ }
}
}
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -2749,7 +2765,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->in_sriov_reset) { /* for GPU_RESET case */
+ if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -2787,7 +2803,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -2799,7 +2815,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
+ } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3008,7 +3024,13 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) {
- pr_debug("For SRIOV client, shouldn't do anything.\n");
+ gfx_v9_0_cp_gfx_enable(adev, false);
+ /* must disable polling for SRIOV when hw finished, otherwise
+ * CPC engine may still keep fetching WB address which is already
+ * invalid after sw finished and trigger DMAR reading error in
+ * hypervisor side.
+ */
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
gfx_v9_0_cp_enable(adev, false);
@@ -3139,6 +3161,8 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
+ struct amdgpu_device *adev = ring->adev;
+
gds_base = gds_base >> AMDGPU_GDS_SHIFT;
gds_size = gds_size >> AMDGPU_GDS_SHIFT;
@@ -3150,22 +3174,22 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
/* GDS Base */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_base,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
gds_base);
/* GDS Size */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
gds_size);
/* GWS */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].gws,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
/* OA */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].oa,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
@@ -3521,6 +3545,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_RAVEN:
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -3610,13 +3635,9 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -3636,20 +3657,14 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- nbio_hf_reg->hdp_flush_req_offset,
- nbio_hf_reg->hdp_flush_done_offset,
+ adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio_funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
-static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- gfx_v9_0_write_data_to_reg(ring, 0, true,
- SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
-}
-
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -3658,7 +3673,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
@@ -3680,9 +3695,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
@@ -3738,33 +3753,12 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->vm_inv_eng;
-
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
- lower_32_bits(pd_addr));
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
- upper_32_bits(pd_addr));
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->vm_inv_eng0_req + eng, req);
-
- /* wait for the invalidate to complete */
- gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
- eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
- if (usepfp) {
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
@@ -3788,6 +3782,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
+static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+ bool acquire)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int pipe_num, tmp, reg;
+ int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+ pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+ /* first me only has 2 entries, GFX and HP3D */
+ if (ring->me > 0)
+ pipe_num -= 2;
+
+ reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
+ tmp = RREG32(reg);
+ tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+ WREG32(reg, tmp);
+}
+
+static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ int i, pipe;
+ bool reserve;
+ struct amdgpu_ring *iring;
+
+ mutex_lock(&adev->gfx.pipe_reserve_mutex);
+ pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+ if (acquire)
+ set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ else
+ clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+ if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+ /* Clear all reservations - everyone reacquires all resources */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+ gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+ true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+ gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+ true);
+ } else {
+ /* Lower all pipes without a current reservation */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ iring = &adev->gfx.gfx_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ iring = &adev->gfx.compute_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v9_0_ring_set_pipe_percent(iring, reserve);
+ }
+ }
+
+ mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+ uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+
+ gfx_v9_0_hqd_set_priority(adev, ring, acquire);
+ gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3804,6 +3897,8 @@ static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned int flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -3839,7 +3934,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
int cnt;
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
+ csa_addr = amdgpu_csa_vaddr(ring->adev);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -3857,7 +3952,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
uint64_t csa_addr, gds_addr;
int cnt;
- csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
+ csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096;
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -3955,15 +4050,34 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val)
+ uint32_t val)
{
+ uint32_t cmd = 0;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = (1 << 16); /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
+ amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
+static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -4250,7 +4364,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
- 24 + /* VM_FLUSH */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -4272,7 +4388,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -4282,6 +4397,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4296,9 +4413,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
- 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 24 + /* gfx_v9_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4307,11 +4426,13 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .set_priority = gfx_v9_0_ring_set_priority_compute,
+ .emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4326,9 +4447,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
- 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
+ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 24 + /* gfx_v9_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -4339,6 +4462,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -4393,6 +4517,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_RAVEN:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index c17996e..acfbd2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -23,11 +23,10 @@
#include "amdgpu.h"
#include "gfxhub_v1_0.h"
-#include "vega10/soc15ip.h"
-#include "vega10/GC/gc_9_0_offset.h"
-#include "vega10/GC/gc_9_0_sh_mask.h"
-#include "vega10/GC/gc_9_0_default.h"
-#include "vega10/vega10_enum.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "gc/gc_9_0_default.h"
+#include "vega10_enum.h"
#include "soc15_common.h"
@@ -41,7 +40,7 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
uint64_t value;
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
- value = adev->gart.table_addr - adev->mc.vram_start
+ value = adev->gart.table_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
value &= 0x0000FFFFFFFFF000ULL;
value |= 0x1; /*valid bit*/
@@ -58,14 +57,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gart_start >> 12));
+ (u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gart_start >> 44));
+ (u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gart_end >> 12));
+ (u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gart_end >> 44));
+ (u32)(adev->gmc.gart_end >> 44));
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -79,12 +78,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->mc.vram_start >> 18);
+ adev->gmc.vram_start >> 18);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->mc.vram_end >> 18);
+ adev->gmc.vram_end >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->mc.vram_start
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
@@ -93,9 +92,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page.addr >> 44));
+ (u32)((u64)adev->dummy_page_addr >> 44));
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
@@ -144,8 +143,15 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -183,31 +189,40 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
@@ -242,9 +257,9 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
* SRIOV driver need to program them
*/
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
- adev->mc.vram_start >> 24);
+ adev->gmc.vram_start >> 24);
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
- adev->mc.vram_end >> 24);
+ adev->gmc.vram_end >> 24);
}
/* GART Enable. */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index f4603a7..5617cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
#include <drm/drmP.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v6_0.h"
#include "amdgpu_ucode.h"
@@ -36,7 +37,7 @@
#include "dce/dce_6_0_sh_mask.h"
#include "si_enums.h"
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle);
@@ -136,19 +137,19 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->mc.fw);
+ err = amdgpu_ucode_validate(adev->gmc.fw);
out:
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
}
return err;
}
@@ -161,20 +162,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
int i, regs_size, ucode_size;
const struct mc_firmware_header_v1_0 *hdr;
- if (!adev->mc.fw)
+ if (!adev->gmc.fw)
return -EINVAL;
- hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
- adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
new_io_mc_regs = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
new_fw_data = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
@@ -217,18 +218,13 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
}
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
- struct amdgpu_mc *mc)
+ struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- if (mc->mc_vram_size > 0xFFC0000000ULL) {
- dev_warn(adev->dev, "limiting VRAM\n");
- mc->real_vram_size = 0xFFC0000000ULL;
- mc->mc_vram_size = 0xFFC0000000ULL;
- }
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->gmc, base);
+ amdgpu_device_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -264,9 +260,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->mc.vram_start >> 12);
+ adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->mc.vram_end >> 12);
+ adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
@@ -283,6 +279,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
u32 tmp;
int chansize, numchan;
+ int r;
tmp = RREG32(mmMC_ARB_RAMCFG);
if (tmp & (1 << 11)) {
@@ -323,51 +320,69 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
numchan = 16;
break;
}
- adev->mc.vram_width = numchan * chansize;
- /* Could aper size report 0 ? */
- adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+ adev->gmc.vram_width = numchan * chansize;
/* size in MB on si */
- adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.visible_vram_size = adev->mc.aper_size;
+ adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_HAINAN: /* no MM engines */
default:
- adev->mc.gart_size = 256ULL << 20;
+ adev->gmc.gart_size = 256ULL << 20;
break;
case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
- adev->mc.gart_size = 1024ULL << 20;
+ adev->gmc.gart_size = 1024ULL << 20;
break;
}
} else {
- adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
- gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+ gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
-static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
{
- WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
-
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
-static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
- void *cpu_pt_addr,
- uint32_t gpu_page_idx,
- uint64_t addr,
- uint64_t flags)
+static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ uint32_t reg;
+
+ /* write new base address */
+ if (vmid < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
+ amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
+
+ return pd_addr;
+}
+
+static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
@@ -394,10 +409,10 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
@@ -431,9 +446,9 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{
u32 tmp;
- if (enable && !adev->mc.prt_warning) {
+ if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
- adev->mc.prt_warning = true;
+ adev->gmc.prt_warning = true;
}
tmp = RREG32(mmVM_PRT_CNTL);
@@ -453,7 +468,8 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
- uint32_t high = adev->vm_manager.max_pfn;
+ uint32_t high = adev->vm_manager.max_pfn -
+ (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -513,11 +529,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
(field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
(field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
WREG32(mmVM_CONTEXT0_CNTL,
VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
@@ -547,7 +563,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
@@ -559,9 +575,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v6_0_set_fault_enable_default(adev, true);
- gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+ gmc_v6_0_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gart_size >> 20),
+ (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -793,7 +809,7 @@ static int gmc_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v6_0_set_gart_funcs(adev);
+ gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
return 0;
@@ -804,7 +820,7 @@ static int gmc_v6_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
@@ -816,27 +832,26 @@ static int gmc_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+ adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64, 9);
- adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
- adev->mc.mc_mask = 0xffffffffffULL;
+ adev->gmc.mc_mask = 0xffffffffffULL;
- adev->mc.stolen_size = 256 * 1024;
+ adev->gmc.stolen_size = 256 * 1024;
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 40;
@@ -851,6 +866,7 @@ static int gmc_v6_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
}
+ adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
r = gmc_v6_0_init_microcode(adev);
if (r) {
@@ -877,7 +893,6 @@ static int gmc_v6_0_sw_init(void *handle)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
@@ -897,12 +912,12 @@ static int gmc_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
gmc_v6_0_gart_fini(adev);
- amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
return 0;
}
@@ -933,7 +948,7 @@ static int gmc_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
return 0;
@@ -957,7 +972,7 @@ static int gmc_v6_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
@@ -1128,9 +1143,10 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.set_powergating_state = gmc_v6_0_set_powergating_state,
};
-static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
+ .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
+ .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
+ .set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
@@ -1141,16 +1157,16 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
.process = gmc_v6_0_process_interrupt,
};
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+ if (adev->gmc.gmc_funcs == NULL)
+ adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
}
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->mc.vm_fault.num_types = 1;
- adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+ adev->gmc.vm_fault.num_types = 1;
+ adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index b0528ca..80054f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
#include <drm/drmP.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "cikd.h"
#include "cik.h"
@@ -42,7 +43,7 @@
#include "amdgpu_atombios.h"
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
@@ -67,12 +68,12 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -151,16 +152,16 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->mc.fw);
+ err = amdgpu_ucode_validate(adev->gmc.fw);
out:
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
}
return err;
}
@@ -181,19 +182,19 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
u32 running;
int i, ucode_size, regs_size;
- if (!adev->mc.fw)
+ if (!adev->gmc.fw)
return -EINVAL;
- hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
- adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
@@ -235,19 +236,13 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
}
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
- struct amdgpu_mc *mc)
+ struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- if (mc->mc_vram_size > 0xFFC0000000ULL) {
- /* leave room for at least 1024M GTT */
- dev_warn(adev->dev, "limiting VRAM\n");
- mc->real_vram_size = 0xFFC0000000ULL;
- mc->mc_vram_size = 0xFFC0000000ULL;
- }
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->gmc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -289,9 +284,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
}
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->mc.vram_start >> 12);
+ adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->mc.vram_end >> 12);
+ adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
@@ -322,8 +317,10 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
- adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
- if (!adev->mc.vram_width) {
+ int r;
+
+ adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->gmc.vram_width) {
u32 tmp;
int chansize, numchan;
@@ -365,33 +362,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
numchan = 16;
break;
}
- adev->mc.vram_width = numchan * chansize;
+ adev->gmc.vram_width = numchan * chansize;
}
- /* Could aper size report 0 ? */
- adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
- adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
- adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
- adev->mc.aper_size = adev->mc.real_vram_size;
+ adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
/* In case the PCI BAR is larger than the actual amount of vram */
- adev->mc.visible_vram_size = adev->mc.aper_size;
- if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
- adev->mc.visible_vram_size = adev->mc.real_vram_size;
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
+ if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
+ adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_TOPAZ: /* no MM engines */
default:
- adev->mc.gart_size = 256ULL << 20;
+ adev->gmc.gart_size = 256ULL << 20;
break;
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
@@ -399,15 +401,15 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
- adev->mc.gart_size = 1024ULL << 20;
+ adev->gmc.gart_size = 1024ULL << 20;
break;
#endif
}
} else {
- adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
- gmc_v7_0_vram_gtt_location(adev, &adev->mc);
+ gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
@@ -420,25 +422,44 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*/
/**
- * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
{
- /* flush hdp cache */
- WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
-
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
+static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ uint32_t reg;
+
+ if (vmid < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
+ amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
+
+ return pd_addr;
+}
+
+static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
+ unsigned pasid)
+{
+ amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
+}
+
/**
- * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v7_0_set_pte_pde - update the page tables using MMIO
*
* @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table
@@ -448,11 +469,9 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
*
* Update the page tables using the CPU.
*/
-static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
- void *cpu_pt_addr,
- uint32_t gpu_page_idx,
- uint64_t addr,
- uint64_t flags)
+static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
@@ -479,10 +498,10 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
@@ -522,9 +541,9 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
{
uint32_t tmp;
- if (enable && !adev->mc.prt_warning) {
+ if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
- adev->mc.prt_warning = true;
+ adev->gmc.prt_warning = true;
}
tmp = RREG32(mmVM_PRT_CNTL);
@@ -546,7 +565,8 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
- uint32_t high = adev->vm_manager.max_pfn;
+ uint32_t high = adev->vm_manager.max_pfn -
+ (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -620,11 +640,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
@@ -654,7 +674,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
@@ -673,9 +693,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp);
}
- gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
+ gmc_v7_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gart_size >> 20),
+ (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -748,21 +768,21 @@ static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
*
* Print human readable fault information (CIK).
*/
-static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
+ u32 addr, u32 mc_client, unsigned pasid)
{
- u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+ u32 mc_id;
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
- protections, vmid, addr,
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ protections, vmid, pasid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
@@ -920,16 +940,16 @@ static int gmc_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v7_0_set_gart_funcs(adev);
+ gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- adev->mc.shared_aperture_start = 0x2000000000000000ULL;
- adev->mc.shared_aperture_end =
- adev->mc.shared_aperture_start + (4ULL << 30) - 1;
- adev->mc.private_aperture_start =
- adev->mc.shared_aperture_end + 1;
- adev->mc.private_aperture_end =
- adev->mc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->gmc.shared_aperture_end =
+ adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.private_aperture_start =
+ adev->gmc.shared_aperture_end + 1;
+ adev->gmc.private_aperture_end =
+ adev->gmc.private_aperture_start + (4ULL << 30) - 1;
return 0;
}
@@ -939,7 +959,7 @@ static int gmc_v7_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
@@ -951,18 +971,18 @@ static int gmc_v7_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
@@ -970,16 +990,15 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 9);
- adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
- adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- adev->mc.stolen_size = 256 * 1024;
+ adev->gmc.stolen_size = 256 * 1024;
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
@@ -999,6 +1018,7 @@ static int gmc_v7_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
+ adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
r = gmc_v7_0_init_microcode(adev);
if (r) {
@@ -1026,7 +1046,6 @@ static int gmc_v7_0_sw_init(void *handle)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
@@ -1046,12 +1065,12 @@ static int gmc_v7_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
gmc_v7_0_gart_fini(adev);
- amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
return 0;
}
@@ -1084,7 +1103,7 @@ static int gmc_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
return 0;
@@ -1108,7 +1127,7 @@ static int gmc_v7_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
@@ -1258,7 +1277,8 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
+ gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
+ entry->pasid);
}
return 0;
@@ -1307,9 +1327,11 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.set_powergating_state = gmc_v7_0_set_powergating_state,
};
-static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
+ .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
+ .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
+ .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
+ .set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde
@@ -1320,16 +1342,16 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
.process = gmc_v7_0_process_interrupt,
};
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+ if (adev->gmc.gmc_funcs == NULL)
+ adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
}
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->mc.vm_fault.num_types = 1;
- adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
+ adev->gmc.vm_fault.num_types = 1;
+ adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index f368cfe..d71d4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
#include <drm/drmP.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
@@ -44,7 +45,7 @@
#include "amdgpu_atombios.h"
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v8_0_wait_for_idle(void *handle);
@@ -120,44 +121,44 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_stoney_common,
- (const u32)ARRAY_SIZE(golden_settings_stoney_common));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
@@ -235,16 +236,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->mc.fw);
+ err = amdgpu_ucode_validate(adev->gmc.fw);
out:
if (err) {
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
}
return err;
}
@@ -273,19 +274,19 @@ static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- if (!adev->mc.fw)
+ if (!adev->gmc.fw)
return -EINVAL;
- hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
- adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
@@ -349,19 +350,19 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (vbios_version == 0)
return 0;
- if (!adev->mc.fw)
+ if (!adev->gmc.fw)
return -EINVAL;
- hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
- adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
- (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
data = RREG32(mmMC_SEQ_MISC0);
data &= ~(0x40);
@@ -397,7 +398,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
}
static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
- struct amdgpu_mc *mc)
+ struct amdgpu_gmc *mc)
{
u64 base = 0;
@@ -405,14 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- if (mc->mc_vram_size > 0xFFC0000000ULL) {
- /* leave room for at least 1024M GTT */
- dev_warn(adev->dev, "limiting VRAM\n");
- mc->real_vram_size = 0xFFC0000000ULL;
- mc->mc_vram_size = 0xFFC0000000ULL;
- }
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->gmc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -454,18 +449,18 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->mc.vram_start >> 12);
+ adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->mc.vram_end >> 12);
+ adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
if (amdgpu_sriov_vf(adev)) {
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
WREG32(mmMC_VM_FB_LOCATION, tmp);
/* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
}
@@ -498,8 +493,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
- adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
- if (!adev->mc.vram_width) {
+ int r;
+
+ adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->gmc.vram_width) {
u32 tmp;
int chansize, numchan;
@@ -541,26 +538,31 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
numchan = 16;
break;
}
- adev->mc.vram_width = numchan * chansize;
+ adev->gmc.vram_width = numchan * chansize;
}
- /* Could aper size report 0 ? */
- adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
- adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
- adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
- adev->mc.aper_size = adev->mc.real_vram_size;
+ adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
/* In case the PCI BAR is larger than the actual amount of vram */
- adev->mc.visible_vram_size = adev->mc.aper_size;
- if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
- adev->mc.visible_vram_size = adev->mc.real_vram_size;
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
+ if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
+ adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -569,20 +571,20 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
case CHIP_POLARIS10: /* all engines support GPUVM */
case CHIP_POLARIS12: /* all engines support GPUVM */
default:
- adev->mc.gart_size = 256ULL << 20;
+ adev->gmc.gart_size = 256ULL << 20;
break;
case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
- adev->mc.gart_size = 1024ULL << 20;
+ adev->gmc.gart_size = 1024ULL << 20;
break;
}
} else {
- adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
- gmc_v8_0_vram_gtt_location(adev, &adev->mc);
+ gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
@@ -595,25 +597,45 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
*/
/**
- * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid)
{
- /* flush hdp cache */
- WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
-
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
+static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ uint32_t reg;
+
+ if (vmid < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
+ amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
+
+ return pd_addr;
+}
+
+static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
+ unsigned pasid)
+{
+ amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
+}
+
/**
- * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v8_0_set_pte_pde - update the page tables using MMIO
*
* @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table
@@ -623,11 +645,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
*
* Update the page tables using the CPU.
*/
-static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
- void *cpu_pt_addr,
- uint32_t gpu_page_idx,
- uint64_t addr,
- uint64_t flags)
+static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
@@ -676,10 +696,10 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
@@ -721,9 +741,9 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
{
u32 tmp;
- if (enable && !adev->mc.prt_warning) {
+ if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
- adev->mc.prt_warning = true;
+ adev->gmc.prt_warning = true;
}
tmp = RREG32(mmVM_PRT_CNTL);
@@ -745,7 +765,8 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
- uint32_t high = adev->vm_manager.max_pfn;
+ uint32_t high = adev->vm_manager.max_pfn -
+ (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@@ -835,11 +856,11 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
@@ -869,7 +890,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
@@ -889,9 +910,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v8_0_set_fault_enable_default(adev, true);
- gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
+ gmc_v8_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gart_size >> 20),
+ (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -964,21 +985,21 @@ static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
*
* Print human readable fault information (CIK).
*/
-static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
+ u32 addr, u32 mc_client, unsigned pasid)
{
- u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+ u32 mc_id;
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
- protections, vmid, addr,
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ protections, vmid, pasid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
@@ -1010,16 +1031,16 @@ static int gmc_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v8_0_set_gart_funcs(adev);
+ gmc_v8_0_set_gmc_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- adev->mc.shared_aperture_start = 0x2000000000000000ULL;
- adev->mc.shared_aperture_end =
- adev->mc.shared_aperture_start + (4ULL << 30) - 1;
- adev->mc.private_aperture_start =
- adev->mc.shared_aperture_end + 1;
- adev->mc.private_aperture_end =
- adev->mc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->gmc.shared_aperture_end =
+ adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.private_aperture_start =
+ adev->gmc.shared_aperture_end + 1;
+ adev->gmc.private_aperture_end =
+ adev->gmc.private_aperture_start + (4ULL << 30) - 1;
return 0;
}
@@ -1029,7 +1050,7 @@ static int gmc_v8_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
@@ -1043,7 +1064,7 @@ static int gmc_v8_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp;
@@ -1052,14 +1073,14 @@ static int gmc_v8_0_sw_init(void *handle)
else
tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
}
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
@@ -1067,16 +1088,15 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 9);
- adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
- adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- adev->mc.stolen_size = 256 * 1024;
+ adev->gmc.stolen_size = 256 * 1024;
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
@@ -1096,6 +1116,7 @@ static int gmc_v8_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
+ adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
r = gmc_v8_0_init_microcode(adev);
if (r) {
@@ -1123,7 +1144,6 @@ static int gmc_v8_0_sw_init(void *handle)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
@@ -1143,12 +1163,12 @@ static int gmc_v8_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
gmc_v8_0_gart_fini(adev);
- amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->mc.fw);
- adev->mc.fw = NULL;
+ release_firmware(adev->gmc.fw);
+ adev->gmc.fw = NULL;
return 0;
}
@@ -1189,7 +1209,7 @@ static int gmc_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v8_0_gart_disable(adev);
return 0;
@@ -1213,7 +1233,7 @@ static int gmc_v8_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
@@ -1269,10 +1289,10 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
- adev->mc.srbm_soft_reset = srbm_soft_reset;
+ adev->gmc.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
- adev->mc.srbm_soft_reset = 0;
+ adev->gmc.srbm_soft_reset = 0;
return false;
}
}
@@ -1281,7 +1301,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->mc.srbm_soft_reset)
+ if (!adev->gmc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev);
@@ -1297,9 +1317,9 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->mc.srbm_soft_reset)
+ if (!adev->gmc.srbm_soft_reset)
return 0;
- srbm_soft_reset = adev->mc.srbm_soft_reset;
+ srbm_soft_reset = adev->gmc.srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
@@ -1327,7 +1347,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->mc.srbm_soft_reset)
+ if (!adev->gmc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_resume(adev);
@@ -1408,7 +1428,8 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
+ gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
+ entry->pasid);
}
return 0;
@@ -1640,9 +1661,11 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
};
-static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
+static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
+ .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+ .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
+ .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
+ .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde
@@ -1653,16 +1676,16 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
.process = gmc_v8_0_process_interrupt,
};
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+ if (adev->gmc.gmc_funcs == NULL)
+ adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
}
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->mc.vm_fault.num_types = 1;
- adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
+ adev->gmc.vm_fault.num_types = 1;
+ adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c8f1aeb..e687363 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -21,24 +21,25 @@
*
*/
#include <linux/firmware.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v9_0.h"
#include "amdgpu_atomfirmware.h"
-#include "vega10/soc15ip.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
-#include "vega10/HDP/hdp_4_0_sh_mask.h"
-#include "vega10/GC/gc_9_0_sh_mask.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/vega10_enum.h"
-#include "vega10/MMHUB/mmhub_1_0_offset.h"
-#include "vega10/ATHUB/athub_1_0_offset.h"
-
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "vega10_enum.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "athub/athub_1_0_offset.h"
+#include "oss/osssys_4_0_offset.h"
+
+#include "soc15.h"
#include "soc15_common.h"
+#include "umc/umc_6_0_sh_mask.h"
-#include "nbio_v6_1.h"
-#include "nbio_v7_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
@@ -73,16 +74,131 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
-static const u32 golden_settings_mmhub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
- SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
- SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
};
-static const u32 golden_settings_athub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
{
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
+};
+
+/* Ecc related register addresses, (BASE + reg offset) */
+/* Universal Memory Controller caps (may be fused). */
+/* UMCCH:UmcLocalCap */
+#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
+#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
+#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
+#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
+#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
+#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
+#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
+#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
+#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
+#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
+#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
+#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
+#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
+#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
+#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
+#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
+
+/* Universal Memory Controller Channel config. */
+/* UMCCH:UMC_CONFIG */
+#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
+#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
+#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
+#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
+#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
+#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
+#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
+#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
+#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
+#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
+#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
+#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
+#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
+#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
+#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
+#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
+
+/* Universal Memory Controller Channel Ecc config. */
+/* UMCCH:EccCtrl */
+#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
+#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
+#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
+#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
+#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
+#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
+#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
+#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
+#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
+#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
+#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
+#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
+#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
+#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
+#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
+#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
+
+static const uint32_t ecc_umclocalcap_addrs[] = {
+ UMCLOCALCAPS_ADDR0,
+ UMCLOCALCAPS_ADDR1,
+ UMCLOCALCAPS_ADDR2,
+ UMCLOCALCAPS_ADDR3,
+ UMCLOCALCAPS_ADDR4,
+ UMCLOCALCAPS_ADDR5,
+ UMCLOCALCAPS_ADDR6,
+ UMCLOCALCAPS_ADDR7,
+ UMCLOCALCAPS_ADDR8,
+ UMCLOCALCAPS_ADDR9,
+ UMCLOCALCAPS_ADDR10,
+ UMCLOCALCAPS_ADDR11,
+ UMCLOCALCAPS_ADDR12,
+ UMCLOCALCAPS_ADDR13,
+ UMCLOCALCAPS_ADDR14,
+ UMCLOCALCAPS_ADDR15,
+};
+
+static const uint32_t ecc_umcch_umc_config_addrs[] = {
+ UMCCH_UMC_CONFIG_ADDR0,
+ UMCCH_UMC_CONFIG_ADDR1,
+ UMCCH_UMC_CONFIG_ADDR2,
+ UMCCH_UMC_CONFIG_ADDR3,
+ UMCCH_UMC_CONFIG_ADDR4,
+ UMCCH_UMC_CONFIG_ADDR5,
+ UMCCH_UMC_CONFIG_ADDR6,
+ UMCCH_UMC_CONFIG_ADDR7,
+ UMCCH_UMC_CONFIG_ADDR8,
+ UMCCH_UMC_CONFIG_ADDR9,
+ UMCCH_UMC_CONFIG_ADDR10,
+ UMCCH_UMC_CONFIG_ADDR11,
+ UMCCH_UMC_CONFIG_ADDR12,
+ UMCCH_UMC_CONFIG_ADDR13,
+ UMCCH_UMC_CONFIG_ADDR14,
+ UMCCH_UMC_CONFIG_ADDR15,
+};
+
+static const uint32_t ecc_umcch_eccctrl_addrs[] = {
+ UMCCH_ECCCTRL_ADDR0,
+ UMCCH_ECCCTRL_ADDR1,
+ UMCCH_ECCCTRL_ADDR2,
+ UMCCH_ECCCTRL_ADDR3,
+ UMCCH_ECCCTRL_ADDR4,
+ UMCCH_ECCCTRL_ADDR5,
+ UMCCH_ECCCTRL_ADDR6,
+ UMCCH_ECCCTRL_ADDR7,
+ UMCCH_ECCCTRL_ADDR8,
+ UMCCH_ECCCTRL_ADDR9,
+ UMCCH_ECCCTRL_ADDR10,
+ UMCCH_ECCCTRL_ADDR11,
+ UMCCH_ECCCTRL_ADDR12,
+ UMCCH_ECCCTRL_ADDR13,
+ UMCCH_ECCCTRL_ADDR14,
+ UMCCH_ECCCTRL_ADDR15,
};
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
@@ -134,7 +250,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
+ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
uint32_t status = 0;
u64 addr;
@@ -148,10 +264,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) {
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
- entry->vm_id_src ? "mmhub" : "gfxhub",
- entry->src_id, entry->ring_id, entry->vm_id,
- entry->pas_id);
+ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+ entry->vmid_src ? "mmhub" : "gfxhub",
+ entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid);
dev_err(adev->dev, " at page 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
@@ -170,17 +286,17 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->mc.vm_fault.num_types = 1;
- adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
+ adev->gmc.vm_fault.num_types = 1;
+ adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
{
u32 req = 0;
- /* invalidate using legacy mode on vm_id*/
+ /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
@@ -201,27 +317,21 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
*/
/**
- * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
*
* Flush the TLB for the requested page table.
*/
-static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid)
{
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned i, j;
- /* flush hdp cache */
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
-
- spin_lock(&adev->mc.invalidate_lock);
+ spin_lock(&adev->gmc.invalidate_lock);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
@@ -254,11 +364,52 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
- spin_unlock(&adev->mc.invalidate_lock);
+ spin_unlock(&adev->gmc.invalidate_lock);
+}
+
+static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
+ uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
+ unsigned eng = ring->vm_inv_eng;
+
+ amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
+
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
+ lower_32_bits(pd_addr));
+
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
+ upper_32_bits(pd_addr));
+
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
+ 1 << vmid, 1 << vmid);
+
+ return pd_addr;
+}
+
+static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
+ unsigned pasid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reg;
+
+ if (ring->funcs->vmhub == AMDGPU_GFXHUB)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
+ else
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
+
+ amdgpu_ring_emit_wreg(ring, reg, pasid);
}
/**
- * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
+ * gmc_v9_0_set_pte_pde - update the page tables using MMIO
*
* @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table
@@ -268,11 +419,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
*
* Update the page tables using the CPU.
*/
-static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
- void *cpu_pt_addr,
- uint32_t gpu_page_idx,
- uint64_t addr,
- uint64_t flags)
+static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
@@ -358,37 +507,142 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
+static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
- BUG_ON(addr & 0xFFFF00000000003FULL);
- return addr;
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *addr = adev->vm_manager.vram_base_offset + *addr -
+ adev->gmc.vram_start;
+ BUG_ON(*addr & 0xFFFF00000000003FULL);
+
+ if (!adev->gmc.translate_further)
+ return;
+
+ if (level == AMDGPU_VM_PDB1) {
+ /* Set the block fragment size */
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *flags |= AMDGPU_PDE_BFS(0x9);
+
+ } else if (level == AMDGPU_VM_PDB0) {
+ if (*flags & AMDGPU_PDE_PTE)
+ *flags &= ~AMDGPU_PDE_PTE;
+ else
+ *flags |= AMDGPU_PTE_TF;
+ }
}
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
- .get_invalidate_req = gmc_v9_0_get_invalidate_req,
+static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
+ .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
+ .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
+ .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
+ .set_pte_pde = gmc_v9_0_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde
};
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
+ if (adev->gmc.gmc_funcs == NULL)
+ adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
}
static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_set_gart_funcs(adev);
+ gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
+ adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->gmc.shared_aperture_end =
+ adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.private_aperture_start =
+ adev->gmc.shared_aperture_end + 1;
+ adev->gmc.private_aperture_end =
+ adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+
return 0;
}
+static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
+{
+ uint32_t reg_val;
+ uint32_t reg_addr;
+ uint32_t field_val;
+ size_t i;
+ uint32_t fv2;
+ size_t lost_sheep;
+
+ DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
+
+ lost_sheep = 0;
+ for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
+ reg_addr = ecc_umclocalcap_addrs[i];
+ DRM_DEBUG("ecc: "
+ "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
+ i, reg_addr);
+ reg_val = RREG32(reg_addr);
+ field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
+ EccDis);
+ DRM_DEBUG("ecc: "
+ "reg_val: 0x%08x, "
+ "EccDis: 0x%08x, ",
+ reg_val, field_val);
+ if (field_val) {
+ DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
+ ++lost_sheep;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
+ reg_addr = ecc_umcch_umc_config_addrs[i];
+ DRM_DEBUG("ecc: "
+ "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
+ i, reg_addr);
+ reg_val = RREG32(reg_addr);
+ field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
+ DramReady);
+ DRM_DEBUG("ecc: "
+ "reg_val: 0x%08x, "
+ "DramReady: 0x%08x\n",
+ reg_val, field_val);
+
+ if (!field_val) {
+ DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
+ ++lost_sheep;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
+ reg_addr = ecc_umcch_eccctrl_addrs[i];
+ DRM_DEBUG("ecc: "
+ "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
+ i, reg_addr);
+ reg_val = RREG32(reg_addr);
+ field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
+ WrEccEn);
+ fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
+ RdEccEn);
+ DRM_DEBUG("ecc: "
+ "reg_val: 0x%08x, "
+ "WrEccEn: 0x%08x, "
+ "RdEccEn: 0x%08x\n",
+ reg_val, field_val, fv2);
+
+ if (!field_val) {
+ DRM_DEBUG("ecc: WrEccEn is not set\n");
+ ++lost_sheep;
+ }
+ if (!fv2) {
+ DRM_DEBUG("ecc: RdEccEn is not set\n");
+ ++lost_sheep;
+ }
+ }
+
+ DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
+ return lost_sheep == 0;
+}
+
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -403,6 +657,7 @@ static int gmc_v9_0_late_init(void *handle)
*/
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
+ int r;
for(i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -418,17 +673,29 @@ static int gmc_v9_0_late_init(void *handle)
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
BUG_ON(vm_inv_eng[i] > 16);
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
+ r = gmc_v9_0_ecc_available(adev);
+ if (r == 1) {
+ DRM_INFO("ECC is active.\n");
+ } else if (r == 0) {
+ DRM_INFO("ECC is not present.\n");
+ } else {
+ DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
+ return r;
+ }
+ }
+
+ return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
- struct amdgpu_mc *mc)
+ struct amdgpu_gmc *mc)
{
u64 base = 0;
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->gmc, base);
+ amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -449,11 +716,16 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
u32 tmp;
int chansize, numchan;
+ int r;
- adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- if (!adev->mc.vram_width) {
+ if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
+ if (!adev->gmc.vram_width) {
/* hbm memory channel size */
- chansize = 128;
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
@@ -488,39 +760,50 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
numchan = 2;
break;
}
- adev->mc.vram_width = numchan * chansize;
+ adev->gmc.vram_width = numchan * chansize;
}
- /* Could aper size report 0 ? */
- adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
- adev->mc.mc_vram_size =
- ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
- adev->mc.real_vram_size = adev->mc.mc_vram_size;
- adev->mc.visible_vram_size = adev->mc.aper_size;
+ adev->gmc.mc_vram_size =
+ adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+#ifdef CONFIG_X86_64
+ if (adev->flags & AMD_IS_APU) {
+ adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+ adev->gmc.aper_size = adev->gmc.real_vram_size;
+ }
+#endif
/* In case the PCI BAR is larger than the actual amount of vram */
- if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
- adev->mc.visible_vram_size = adev->mc.real_vram_size;
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
+ if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
+ adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_VEGA10: /* all engines support GPUVM */
+ case CHIP_VEGA12: /* all engines support GPUVM */
default:
- adev->mc.gart_size = 256ULL << 20;
+ adev->gmc.gart_size = 512ULL << 20;
break;
case CHIP_RAVEN: /* DCE SG support */
- adev->mc.gart_size = 1024ULL << 20;
+ adev->gmc.gart_size = 1024ULL << 20;
break;
}
} else {
- adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
- gmc_v9_0_vram_gtt_location(adev, &adev->mc);
+ gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
@@ -552,66 +835,53 @@ static int gmc_v9_0_sw_init(void *handle)
gfxhub_v1_0_init(adev);
mmhub_v1_0_init(adev);
- spin_lock_init(&adev->mc.invalidate_lock);
+ spin_lock_init(&adev->gmc.invalidate_lock);
+ adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
switch (adev->asic_type) {
case CHIP_RAVEN:
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
- adev->vm_manager.vm_size = 1U << 18;
- adev->vm_manager.block_size = 9;
- adev->vm_manager.num_level = 3;
- amdgpu_vm_set_fragment_size(adev, 9);
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
} else {
- /* vm_size is 64GB for legacy 2-level page support */
- amdgpu_vm_adjust_size(adev, 64, 9);
- adev->vm_manager.num_level = 1;
+ /* vm_size is 128TB + 512GB for legacy 3-level page support */
+ amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
+ adev->gmc.translate_further =
+ adev->vm_manager.num_level > 1;
}
break;
case CHIP_VEGA10:
- /* XXX Don't know how to get VRAM type yet. */
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ case CHIP_VEGA12:
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
- adev->vm_manager.vm_size = 1U << 18;
- adev->vm_manager.block_size = 9;
- adev->vm_manager.num_level = 3;
- amdgpu_vm_set_fragment_size(adev, 9);
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
default:
break;
}
- DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
- adev->vm_manager.vm_size,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
-
/* This interrupt is VMC page fault.*/
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
- &adev->mc.vm_fault);
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
- &adev->mc.vm_fault);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
+ &adev->gmc.vm_fault);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
+ &adev->gmc.vm_fault);
if (r)
return r;
- adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
-
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
- adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
+ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
/*
* It needs to reserve 8M stolen memory for vega10
* TODO: Figure out how to avoid that...
*/
- adev->mc.stolen_size = 8 * 1024 * 1024;
+ adev->gmc.stolen_size = 8 * 1024 * 1024;
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 44-bits.
@@ -631,6 +901,7 @@ static int gmc_v9_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
}
+ adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
r = gmc_v9_0_mc_init(adev);
if (r)
@@ -660,7 +931,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
/**
- * gmc_v8_0_gart_fini - vm fini callback
+ * gmc_v9_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*
@@ -676,9 +947,9 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
gmc_v9_0_gart_fini(adev);
- amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
return 0;
@@ -686,19 +957,22 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
+
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
- (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
- amdgpu_program_register_sequence(adev,
+ ARRAY_SIZE(golden_settings_mmhub_1_0_0));
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
- (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
+ ARRAY_SIZE(golden_settings_athub_1_0_0));
+ break;
+ case CHIP_VEGA12:
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
- (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
+ ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
default:
break;
@@ -716,9 +990,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
bool value;
u32 tmp;
- amdgpu_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -751,10 +1025,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
+ adev->nbio_funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -763,10 +1034,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
- gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
+ gmc_v9_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gart_size >> 20),
+ (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -817,7 +1088,7 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
- amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev);
return 0;
@@ -839,7 +1110,7 @@ static int gmc_v9_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index bd592cb..842c4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -111,7 +111,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
iceland_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -259,8 +259,8 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f33d1ff..26ba984 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,6 +42,8 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
+static const struct amd_pm_funcs kv_dpm_funcs;
+
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@@ -1682,8 +1684,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg)
/* power off the UVD block */
@@ -1695,8 +1697,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
/* re-init the UVD block */
kv_update_uvd_dpm(adev, gate);
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
@@ -2960,6 +2962,8 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &kv_dpm_funcs;
+ adev->powerplay.pp_handle = adev;
kv_dpm_set_irq_funcs(adev);
return 0;
@@ -3301,7 +3305,7 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs kv_dpm_ip_funcs = {
+static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
.late_init = kv_dpm_late_init,
@@ -3318,8 +3322,16 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-const struct amd_pm_funcs kv_dpm_funcs = {
- .get_temperature = &kv_dpm_get_temp,
+const struct amdgpu_ip_block_version kv_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &kv_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs kv_dpm_funcs = {
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index cc21c4b..43f9257 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -23,14 +23,12 @@
#include "amdgpu.h"
#include "mmhub_v1_0.h"
-#include "vega10/soc15ip.h"
-#include "vega10/MMHUB/mmhub_1_0_offset.h"
-#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
-#include "vega10/MMHUB/mmhub_1_0_default.h"
-#include "vega10/ATHUB/athub_1_0_offset.h"
-#include "vega10/ATHUB/athub_1_0_sh_mask.h"
-#include "vega10/ATHUB/athub_1_0_default.h"
-#include "vega10/vega10_enum.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
+#include "mmhub/mmhub_1_0_default.h"
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "vega10_enum.h"
#include "soc15_common.h"
@@ -52,7 +50,7 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
uint64_t value;
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
- value = adev->gart.table_addr - adev->mc.vram_start +
+ value = adev->gart.table_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
value &= 0x0000FFFFFFFFF000ULL;
value |= 0x1; /* valid bit */
@@ -69,14 +67,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gart_start >> 12));
+ (u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gart_start >> 44));
+ (u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gart_end >> 12));
+ (u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gart_end >> 44));
+ (u32)(adev->gmc.gart_end >> 44));
}
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -91,12 +89,12 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->mc.vram_start >> 18);
+ adev->gmc.vram_start >> 18);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->mc.vram_end >> 18);
+ adev->gmc.vram_end >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
@@ -105,9 +103,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program "protection fault". */
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page.addr >> 12));
+ (u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page.addr >> 44));
+ (u32)((u64)adev->dummy_page_addr >> 44));
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
@@ -157,10 +155,15 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
- tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@ -198,32 +201,40 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- ENABLE_CONTEXT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
@@ -261,21 +272,21 @@ static const struct pctl_data pctl0_data[] = {
{0x11, 0x6a684},
{0x19, 0xea68e},
{0x29, 0xa69e},
- {0x2b, 0x34a6c0},
- {0x61, 0x83a707},
- {0xe6, 0x8a7a4},
- {0xf0, 0x1a7b8},
- {0xf3, 0xfa7cc},
- {0x104, 0x17a7dd},
- {0x11d, 0xa7dc},
- {0x11f, 0x12a7f5},
- {0x133, 0xa808},
- {0x135, 0x12a810},
- {0x149, 0x7a82c}
+ {0x2b, 0x0010a6c0},
+ {0x3d, 0x83a707},
+ {0xc2, 0x8a7a4},
+ {0xcc, 0x1a7b8},
+ {0xcf, 0xfa7cc},
+ {0xe0, 0x17a7dd},
+ {0xf9, 0xa7dc},
+ {0xfb, 0x12a7f5},
+ {0x10f, 0xa808},
+ {0x111, 0x12a810},
+ {0x125, 0x7a82c}
};
#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
-#define PCTL0_RENG_EXEC_END_PTR 0x151
+#define PCTL0_RENG_EXEC_END_PTR 0x12d
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
@@ -374,10 +385,9 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return;
+ /****************** pctl0 **********************/
pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
- pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
- pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
/* Light sleep must be disabled before writing to pctl0 registers */
pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
@@ -391,12 +401,13 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
pctl0_data[i].data);
}
- /* Set the reng execute end ptr for pctl0 */
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
- PCTL0_RENG_EXECUTE,
- RENG_EXECUTE_END_PTR,
- PCTL0_RENG_EXEC_END_PTR);
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+ /* Re-enable light sleep */
+ pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
+
+ /****************** pctl1 **********************/
+ pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
+ pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
/* Light sleep must be disabled before writing to pctl1 registers */
pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
@@ -410,20 +421,25 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
pctl1_data[i].data);
}
+ /* Re-enable light sleep */
+ pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
+ WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
+
+ mmhub_v1_0_power_gating_write_save_ranges(adev);
+
+ /* Set the reng execute end ptr for pctl0 */
+ pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
+ PCTL0_RENG_EXECUTE,
+ RENG_EXECUTE_END_PTR,
+ PCTL0_RENG_EXEC_END_PTR);
+ WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
+
/* Set the reng execute end ptr for pctl1 */
pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
PCTL1_RENG_EXECUTE,
RENG_EXECUTE_END_PTR,
PCTL1_RENG_EXEC_END_PTR);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
-
- mmhub_v1_0_power_gating_write_save_ranges(adev);
-
- /* Re-enable light sleep */
- pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
- WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
- pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
- WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
}
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
@@ -455,6 +471,9 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
RENG_EXECUTE_ON_REG_UPDATE, 1);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
+ if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
+ amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
+
} else {
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
PCTL0_RENG_EXECUTE,
@@ -483,9 +502,9 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
* SRIOV driver need to program them
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
- adev->mc.vram_start >> 24);
+ adev->gmc.vram_start >> 24);
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
- adev->mc.vram_end >> 24);
+ adev->gmc.vram_end >> 24);
}
/* GART Enable. */
@@ -714,6 +733,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_RAVEN:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index b4906d2..4933486 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -22,11 +22,10 @@
*/
#include "amdgpu.h"
-#include "vega10/soc15ip.h"
-#include "vega10/NBIO/nbio_6_1_offset.h"
-#include "vega10/NBIO/nbio_6_1_sh_mask.h"
-#include "vega10/GC/gc_9_0_offset.h"
-#include "vega10/GC/gc_9_0_sh_mask.h"
+#include "nbio/nbio_6_1_offset.h"
+#include "nbio/nbio_6_1_sh_mask.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
#include "soc15.h"
#include "vega10_ih.h"
#include "soc15_common.h"
@@ -34,56 +33,34 @@
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{
- u32 reg;
- int timeout = AI_MAILBOX_TIMEDOUT;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
-
- /*Wait for RCV_MSG_VALID to be 0*/
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- while (reg & mask) {
- if (timeout <= 0) {
- pr_err("RCV_MSG_VALID is not cleared\n");
- break;
- }
- mdelay(1);
- timeout -=1;
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- }
+ WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
}
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{
- u32 reg;
+ WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
- TRN_MSG_VALID, val ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
- reg);
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
}
+
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
u32 reg;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
-
- if (event != IDH_FLR_NOTIFICATION_CMPL) {
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- if (!(reg & mask))
- return -ENOENT;
- }
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
@@ -95,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
return 0;
}
+static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
+ return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
{
- int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
- u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
- u32 reg;
+ int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- while (!(reg & mask)) {
- if (timeout <= 0) {
- pr_err("Doesn't get ack from pf.\n");
- r = -ETIME;
- break;
- }
mdelay(5);
timeout -= 5;
+ } while (timeout > 1);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_CONTROL));
- }
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
- return r;
+ return -ETIME;
}
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
- int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
-
- r = xgpu_ai_mailbox_rcv_msg(adev, event);
- while (r) {
- if (timeout <= 0) {
- pr_err("Doesn't get msg:%d from pf.\n", event);
- r = -ETIME;
- break;
- }
- mdelay(5);
- timeout -= 5;
+ int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
+ do {
r = xgpu_ai_mailbox_rcv_msg(adev, event);
- }
+ if (!r)
+ return 0;
- return r;
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
}
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3) {
u32 reg;
int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_ai_mailbox_set_valid(adev, false);
+ trn = xgpu_ai_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while(trn);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
@@ -246,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
-
- /* wait until RCV_MSG become 3 */
- if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
- return;
- }
-
- /* Trigger recovery due to world switch failure */
- amdgpu_sriov_gpu_reset(adev, NULL);
+ int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked)
+ mutex_unlock(&adev->lock_reset);
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_lockup_timeout == 0)
+ amdgpu_device_gpu_recover(adev, NULL, true);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -275,16 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int r;
+ enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
- /* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
- /* see what event we get */
- r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
-
- /* only handle FLR_NOTIFY now */
- if (!r)
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
}
return 0;
@@ -312,11 +329,11 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
if (r)
return r;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
@@ -353,5 +370,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
+ .wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 67e7857..b4a9cee 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,9 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
+#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
+#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index c25a831..9fc1c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -279,32 +279,32 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_settings_a10,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_golden_settings_a10));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_common_all,
- (const u32)ARRAY_SIZE(
- xgpu_fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_settings_a10,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_settings_a10));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_settings_a11,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_common_all,
- (const u32)ARRAY_SIZE(
- xgpu_tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_settings_a11,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_common_all));
break;
default:
BUG_ON("Doesn't support chip type.\n");
@@ -446,8 +446,10 @@ static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
request == IDH_REQ_GPU_FINI_ACCESS ||
request == IDH_REQ_GPU_RESET_ACCESS) {
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
- if (r)
- pr_err("Doesn't get ack from pf, continue\n");
+ if (r) {
+ pr_err("Doesn't get ack from pf, give up\n");
+ return r;
+ }
}
return 0;
@@ -458,6 +460,11 @@ static int xgpu_vi_request_reset(struct amdgpu_device *adev)
return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
}
+static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
+{
+ return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
+}
+
static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
@@ -514,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
}
/* Trigger recovery due to world switch failure */
- amdgpu_sriov_gpu_reset(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -538,7 +545,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
int r;
/* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
@@ -613,5 +620,6 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
.req_full_gpu = xgpu_vi_request_full_gpu_access,
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
.reset_gpu = xgpu_vi_request_reset,
+ .wait_reset = xgpu_vi_wait_reset_cmpl,
.trans_msg = NULL, /* Does not need to trans VF errors to host. */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 904a1ba..6f9c549 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -24,17 +24,16 @@
#include "amdgpu_atombios.h"
#include "nbio_v6_1.h"
-#include "vega10/soc15ip.h"
-#include "vega10/NBIO/nbio_6_1_default.h"
-#include "vega10/NBIO/nbio_6_1_offset.h"
-#include "vega10/NBIO/nbio_6_1_sh_mask.h"
-#include "vega10/vega10_enum.h"
+#include "nbio/nbio_6_1_default.h"
+#include "nbio/nbio_6_1_offset.h"
+#include "nbio/nbio_6_1_sh_mask.h"
+#include "vega10_enum.h"
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -44,19 +43,7 @@ u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -66,26 +53,30 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(NBIO, 0,
+ mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
+ 0);
+ else
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
}
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
-{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
-
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index)
{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -93,17 +84,18 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
+
}
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
{
u32 tmp = 0;
@@ -122,8 +114,8 @@ void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
}
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -136,12 +128,12 @@ void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
}
-void nbio_v6_1_ih_control(struct amdgpu_device *adev)
+static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
- WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -152,8 +144,8 @@ void nbio_v6_1_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -180,8 +172,8 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_PCIE(smnCPM_CONTROL, data);
}
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -200,7 +192,8 @@ void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_CNTL2, data);
}
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int data;
@@ -215,9 +208,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
+}
+
+static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+}
+
+static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -232,12 +243,7 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
-};
-
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
+static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -254,7 +260,7 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
}
}
-void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -265,3 +271,25 @@ void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
}
+
+const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+ .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
+ .get_rev_id = nbio_v6_1_get_rev_id,
+ .mc_access_enable = nbio_v6_1_mc_access_enable,
+ .hdp_flush = nbio_v6_1_hdp_flush,
+ .get_memsize = nbio_v6_1_get_memsize,
+ .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v6_1_get_clockgating_state,
+ .ih_control = nbio_v6_1_ih_control,
+ .init_registers = nbio_v6_1_init_registers,
+ .detect_hw_virt = nbio_v6_1_detect_hw_virt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 14ca8d4..0743a6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,30 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
-int nbio_v6_1_init(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev);
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_ih_control(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev);
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
-void nbio_v6_1_init_registers(struct amdgpu_device *adev);
+extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index f802b97..df34dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -24,15 +24,17 @@
#include "amdgpu_atombios.h"
#include "nbio_v7_0.h"
-#include "vega10/soc15ip.h"
-#include "raven1/NBIO/nbio_7_0_default.h"
-#include "raven1/NBIO/nbio_7_0_offset.h"
-#include "raven1/NBIO/nbio_7_0_sh_mask.h"
-#include "vega10/vega10_enum.h"
+#include "nbio/nbio_7_0_default.h"
+#include "nbio/nbio_7_0_offset.h"
+#include "nbio/nbio_7_0_sh_mask.h"
+#include "vega10_enum.h"
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+#define smnCPM_CONTROL 0x11180460
+#define smnPCIE_CNTL2 0x11180070
+
+static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -42,19 +44,7 @@ u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -63,26 +53,28 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ else
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
}
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
+static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index)
{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
-{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -90,17 +82,23 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
}
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+
+}
+
+static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -130,8 +128,8 @@ static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t o
WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
}
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -169,12 +167,48 @@ void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
}
-void nbio_v7_0_ih_control(struct amdgpu_device *adev)
+static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+ data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CNTL2, data);
+}
+
+static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_PCIE(smnCPM_CONTROL);
+ if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
+static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
- WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -185,9 +219,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
+static u32 nbio_v7_0_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v7_0_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v7_0_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
+}
+
+static u32 nbio_v7_0_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+}
+
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -202,7 +254,35 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+{
+
+}
+
+const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
+ .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
+ .get_rev_id = nbio_v7_0_get_rev_id,
+ .mc_access_enable = nbio_v7_0_mc_access_enable,
+ .hdp_flush = nbio_v7_0_hdp_flush,
+ .get_memsize = nbio_v7_0_get_memsize,
+ .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_0_get_clockgating_state,
+ .ih_control = nbio_v7_0_ih_control,
+ .init_registers = nbio_v7_0_init_registers,
+ .detect_hw_virt = nbio_v7_0_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index df8fa90..508d549 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,24 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
-int nbio_v7_0_init(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_ih_control(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev);
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable);
+extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 4e20d91..8873d83 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -30,10 +30,9 @@
#include "soc15_common.h"
#include "psp_v10_0.h"
-#include "vega10/soc15ip.h"
-#include "raven1/MP/mp_10_0_offset.h"
-#include "raven1/GC/gc_9_1_offset.h"
-#include "raven1/SDMA0/sdma0_4_1_offset.h"
+#include "mp/mp_10_0_offset.h"
+#include "gc/gc_9_1_offset.h"
+#include "sdma0/sdma0_4_1_offset.h"
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
@@ -88,7 +87,7 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
return 0;
}
-int psp_v10_0_init_microcode(struct psp_context *psp)
+static int psp_v10_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
@@ -134,7 +133,8 @@ out:
return err;
}
-int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
+static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
+ struct psp_gfx_cmd_resp *cmd)
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
@@ -153,7 +153,8 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
return ret;
}
-int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v10_0_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -178,7 +179,8 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return 0;
}
-int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v10_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
@@ -209,7 +211,8 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v10_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -232,7 +235,8 @@ int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v10_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring = &psp->km_ring;
@@ -249,10 +253,10 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
return ret;
}
-int psp_v10_0_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
+static int psp_v10_0_cmd_submit(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
+ int index)
{
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@@ -298,9 +302,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
}
static int
-psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
+psp_v10_0_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
{
int ret = 0;
@@ -383,9 +388,9 @@ psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset
return ret;
}
-bool psp_v10_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
+static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ enum AMDGPU_UCODE_ID ucode_type)
{
int err = 0;
unsigned int fw_sram_reg_val = 0;
@@ -395,7 +400,7 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
@@ -419,8 +424,25 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
}
-int psp_v10_0_mode1_reset(struct psp_context *psp)
+static int psp_v10_0_mode1_reset(struct psp_context *psp)
{
DRM_INFO("psp mode 1 reset not supported now! \n");
return -EINVAL;
}
+
+static const struct psp_funcs psp_v10_0_funcs = {
+ .init_microcode = psp_v10_0_init_microcode,
+ .prep_cmd_buf = psp_v10_0_prep_cmd_buf,
+ .ring_init = psp_v10_0_ring_init,
+ .ring_create = psp_v10_0_ring_create,
+ .ring_stop = psp_v10_0_ring_stop,
+ .ring_destroy = psp_v10_0_ring_destroy,
+ .cmd_submit = psp_v10_0_cmd_submit,
+ .compare_sram_data = psp_v10_0_compare_sram_data,
+ .mode1_reset = psp_v10_0_mode1_reset,
+};
+
+void psp_v10_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v10_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index 451e830..20c2a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -27,24 +27,6 @@
#include "amdgpu_psp.h"
-extern int psp_v10_0_init_microcode(struct psp_context *psp);
-extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd);
-extern int psp_v10_0_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v10_0_ring_create(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v10_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v10_0_ring_destroy(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v10_0_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index);
-extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
+void psp_v10_0_set_psp_funcs(struct psp_context *psp);
-extern int psp_v10_0_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c7bcfe8..196e75d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -31,15 +31,16 @@
#include "soc15_common.h"
#include "psp_v3_1.h"
-#include "vega10/soc15ip.h"
-#include "vega10/MP/mp_9_0_offset.h"
-#include "vega10/MP/mp_9_0_sh_mask.h"
-#include "vega10/GC/gc_9_0_offset.h"
-#include "vega10/SDMA0/sdma0_4_0_offset.h"
-#include "vega10/NBIO/nbio_6_1_offset.h"
+#include "mp/mp_9_0_offset.h"
+#include "mp/mp_9_0_sh_mask.h"
+#include "gc/gc_9_0_offset.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "nbio/nbio_6_1_offset.h"
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028
@@ -94,7 +95,7 @@ psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *t
return 0;
}
-int psp_v3_1_init_microcode(struct psp_context *psp)
+static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
@@ -108,6 +109,9 @@ int psp_v3_1_init_microcode(struct psp_context *psp)
case CHIP_VEGA10:
chip_name = "vega10";
break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
default: BUG();
}
@@ -162,7 +166,7 @@ out:
return err;
}
-int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
+static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
@@ -203,7 +207,7 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
return ret;
}
-int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
+static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
@@ -244,7 +248,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
+static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
+ struct psp_gfx_cmd_resp *cmd)
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
@@ -263,7 +268,8 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
return ret;
}
-int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v3_1_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -288,7 +294,8 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return 0;
}
-int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v3_1_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
@@ -319,7 +326,8 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v3_1_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -342,7 +350,8 @@ int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+static int psp_v3_1_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring = &psp->km_ring;
@@ -359,10 +368,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index)
+static int psp_v3_1_cmd_submit(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
+ int index)
{
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@@ -410,7 +419,8 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
}
static int
-psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+psp_v3_1_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
unsigned int *sram_data_reg_offset,
enum AMDGPU_UCODE_ID ucode_id)
{
@@ -495,9 +505,9 @@ psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
return ret;
}
-bool psp_v3_1_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
+static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ enum AMDGPU_UCODE_ID ucode_type)
{
int err = 0;
unsigned int fw_sram_reg_val = 0;
@@ -507,7 +517,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v3_1_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
@@ -530,7 +540,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
return true;
}
-bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
+static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
uint32_t reg;
@@ -541,7 +551,7 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
-int psp_v3_1_mode1_reset(struct psp_context *psp)
+static int psp_v3_1_mode1_reset(struct psp_context *psp)
{
int ret;
uint32_t offset;
@@ -574,3 +584,23 @@ int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0;
}
+
+static const struct psp_funcs psp_v3_1_funcs = {
+ .init_microcode = psp_v3_1_init_microcode,
+ .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
+ .bootloader_load_sos = psp_v3_1_bootloader_load_sos,
+ .prep_cmd_buf = psp_v3_1_prep_cmd_buf,
+ .ring_init = psp_v3_1_ring_init,
+ .ring_create = psp_v3_1_ring_create,
+ .ring_stop = psp_v3_1_ring_stop,
+ .ring_destroy = psp_v3_1_ring_destroy,
+ .cmd_submit = psp_v3_1_cmd_submit,
+ .compare_sram_data = psp_v3_1_compare_sram_data,
+ .smu_reload_quirk = psp_v3_1_smu_reload_quirk,
+ .mode1_reset = psp_v3_1_mode1_reset,
+};
+
+void psp_v3_1_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v3_1_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index b05dbad..e411e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -32,26 +32,6 @@ enum { PSP_BINARY_ALIGNMENT = 64 };
enum { PSP_BOOTLOADER_1_MEG_ALIGNMENT = 0x100000 };
enum { PSP_BOOTLOADER_8_MEM_ALIGNMENT = 0x800000 };
-extern int psp_v3_1_init_microcode(struct psp_context *psp);
-extern int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp);
-extern int psp_v3_1_bootloader_load_sos(struct psp_context *psp);
-extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd);
-extern int psp_v3_1_ring_init(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v3_1_ring_create(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v3_1_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v3_1_ring_destroy(struct psp_context *psp,
- enum psp_ring_type ring_type);
-extern int psp_v3_1_cmd_submit(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
- int index);
-extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
-extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
-extern int psp_v3_1_mode1_reset(struct psp_context *psp);
+void psp_v3_1_set_psp_funcs(struct psp_context *psp);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 67f375b..6452101 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -93,12 +93,12 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -291,13 +289,6 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 1);
-}
/**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
*
@@ -348,7 +339,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -493,7 +484,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
}
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@@ -600,7 +591,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -613,7 +604,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -633,13 +624,13 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -662,7 +653,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -704,7 +695,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
@@ -715,7 +706,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -861,22 +852,9 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* flush TLB */
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -890,6 +868,15 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1205,9 +1192,9 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.set_wptr = sdma_v2_4_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v2_4_ring_emit_hdp_flush */
- 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 3 + /* hdp invalidate */
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
- 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
@@ -1215,11 +1202,11 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
- .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
+ .emit_wreg = sdma_v2_4_ring_emit_wreg,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1318,9 +1305,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
.copy_pte = sdma_v2_4_vm_copy_pte,
.write_pte = sdma_v2_4_vm_write_pte,
-
- .set_max_nums_pte_pde = 0x1fffff >> 3,
- .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6d06f8e..ecaef08 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -192,47 +192,47 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- (const u32)ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
default:
break;
@@ -355,7 +355,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 wptr;
- if (ring->use_doorbell) {
+ if (ring->use_doorbell || ring->use_pollmem) {
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
@@ -380,10 +380,13 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
-
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
+ } else if (ring->use_pollmem) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -414,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -459,14 +460,6 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 1);
-}
-
/**
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
*
@@ -517,7 +510,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@@ -718,10 +711,17 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
- else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ if (ring->use_pollmem) {
+ /*wptr polling is not enogh fast, directly clean the wptr register */
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 1);
+ } else {
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 0);
+ }
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
/* enable DMA RB */
@@ -753,7 +753,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
}
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@@ -860,7 +860,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -873,7 +873,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -893,13 +893,13 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -922,7 +922,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -964,7 +964,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
@@ -974,7 +974,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1120,22 +1120,9 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* flush TLB */
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -1149,6 +1136,15 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1203,9 +1199,13 @@ static int sdma_v3_0_sw_init(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->use_doorbell = true;
+ ring->doorbell_index = (i == 0) ?
+ AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ } else {
+ ring->use_pollmem = true;
+ }
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1628,9 +1628,9 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.set_wptr = sdma_v3_0_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v3_0_ring_emit_hdp_flush */
- 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 3 + /* hdp invalidate */
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
- 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
@@ -1638,11 +1638,11 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
+ .emit_wreg = sdma_v3_0_ring_emit_wreg,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1741,10 +1741,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
.copy_pte = sdma_v3_0_vm_copy_pte,
.write_pte = sdma_v3_0_vm_write_pte,
-
- /* not 0x3fffff due to HW limitation */
- .set_max_nums_pte_pde = 0x3fffe0 >> 3,
- .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 46009db..2a81840 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -27,15 +27,12 @@
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
-#include "vega10/soc15ip.h"
-#include "vega10/SDMA0/sdma0_4_0_offset.h"
-#include "vega10/SDMA0/sdma0_4_0_sh_mask.h"
-#include "vega10/SDMA1/sdma1_4_0_offset.h"
-#include "vega10/SDMA1/sdma1_4_0_sh_mask.h"
-#include "vega10/MMHUB/mmhub_1_0_offset.h"
-#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
-#include "raven1/SDMA0/sdma0_4_1_default.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "sdma1/sdma1_4_0_offset.h"
+#include "sdma1/sdma1_4_0_sh_mask.h"
+#include "hdp/hdp_4_0_offset.h"
+#include "sdma0/sdma0_4_1_default.h"
#include "soc15_common.h"
#include "soc15.h"
@@ -43,6 +40,8 @@
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
@@ -53,97 +52,100 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
+static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static const u32 golden_settings_sdma_vg10[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
+static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
-static const u32 golden_settings_sdma_4_1[] =
-{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
+static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
-static const u32 golden_settings_sdma_rv1[] =
+static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
+static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{
- u32 base = 0;
-
- switch (instance) {
- case 0:
- base = SDMA0_BASE.instance[0].segment[0];
- break;
- case 1:
- base = SDMA1_BASE.instance[0].segment[0];
- break;
- default:
- BUG();
- break;
- }
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
+};
- return base + internal_offset;
+static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
+{
+ return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
+ (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
}
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4,
- (const u32)ARRAY_SIZE(golden_settings_sdma_4));
- amdgpu_program_register_sequence(adev,
+ ARRAY_SIZE(golden_settings_sdma_4));
+ soc15_program_register_sequence(adev,
golden_settings_sdma_vg10,
- (const u32)ARRAY_SIZE(golden_settings_sdma_vg10));
+ ARRAY_SIZE(golden_settings_sdma_vg10));
+ break;
+ case CHIP_VEGA12:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4,
+ ARRAY_SIZE(golden_settings_sdma_4));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_vg12,
+ ARRAY_SIZE(golden_settings_sdma_vg12));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
- (const u32)ARRAY_SIZE(golden_settings_sdma_4_1));
- amdgpu_program_register_sequence(adev,
+ ARRAY_SIZE(golden_settings_sdma_4_1));
+ soc15_program_register_sequence(adev,
golden_settings_sdma_rv1,
- (const u32)ARRAY_SIZE(golden_settings_sdma_rv1));
+ ARRAY_SIZE(golden_settings_sdma_rv1));
break;
default:
break;
@@ -177,6 +179,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA10:
chip_name = "vega10";
break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
case CHIP_RAVEN:
chip_name = "raven";
break;
@@ -251,31 +256,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u64 *wptr = NULL;
- uint64_t local_wptr = 0;
+ u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
- DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
- *wptr = (*wptr) >> 2;
- DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- wptr = &local_wptr;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
me, highbit, lowbit);
- *wptr = highbit;
- *wptr = (*wptr) << 32;
- *wptr |= lowbit;
+ wptr = highbit;
+ wptr = wptr << 32;
+ wptr |= lowbit;
}
- return *wptr;
+ return wptr >> 2;
}
/**
@@ -315,8 +316,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -343,15 +344,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -370,13 +369,9 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -386,22 +381,14 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2);
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
-static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
- amdgpu_ring_write(ring, 1);
-}
-
/**
* sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
*
@@ -457,15 +444,15 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
sdma0->ready = false;
@@ -522,18 +509,18 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -557,9 +544,9 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
}
}
@@ -587,48 +574,48 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
}
- doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -637,55 +624,53 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
- else
- nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index);
if (amdgpu_sriov_vf(adev))
sdma_v4_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
/* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
}
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
if (amdgpu_sriov_vf(adev))
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
else
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->ready = true;
@@ -701,7 +686,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
}
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
@@ -816,12 +801,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
return 0;
@@ -886,7 +871,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -899,7 +884,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -919,13 +904,13 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -948,7 +933,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u32 tmp = 0;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -990,7 +975,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
@@ -1000,7 +985,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1152,39 +1137,30 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VEGA10).
*/
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->vm_inv_eng;
-
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
- /* flush TLB */
+static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
- amdgpu_ring_write(ring, req);
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
- /* wait for flush */
+static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+ amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id); /* reference */
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, val); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
@@ -1214,13 +1190,13 @@ static int sdma_v4_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
&adev->sdma.trap_irq);
if (r)
return r;
@@ -1317,7 +1293,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1333,8 +1309,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
+ sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1358,8 +1334,8 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
@@ -1375,7 +1351,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: SDMA trap\n");
switch (entry->client_id) {
- case AMDGPU_IH_CLIENTID_SDMA0:
+ case SOC15_IH_CLIENTID_SDMA0:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
@@ -1391,7 +1367,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_IH_CLIENTID_SDMA1:
+ case SOC15_IH_CLIENTID_SDMA1:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
@@ -1441,7 +1417,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1469,7 +1445,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
@@ -1500,7 +1476,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 1-not override: enable sdma1 mem light sleep */
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
@@ -1514,7 +1490,7 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 0-override:disable sdma1 mem light sleep */
- if (adev->asic_type == CHIP_VEGA10) {
+ if (adev->sdma.num_instances > 1) {
def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
@@ -1533,6 +1509,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_RAVEN:
sdma_v4_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1610,9 +1587,11 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.set_wptr = sdma_v4_0_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v4_0_ring_emit_hdp_flush */
- 3 + /* sdma_v4_0_ring_emit_hdp_invalidate */
+ 3 + /* hdp invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
- 18 + /* sdma_v4_0_ring_emit_vm_flush */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib,
@@ -1620,11 +1599,12 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = sdma_v4_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v4_0_ring_test_ring,
.test_ib = sdma_v4_0_ring_test_ib,
.insert_nop = sdma_v4_0_ring_insert_nop,
.pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
};
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1659,7 +1639,7 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
- * Copy GPU buffers using the DMA engine (VEGA10).
+ * Copy GPU buffers using the DMA engine (VEGA10/12).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
@@ -1686,7 +1666,7 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
- * Fill GPU buffers using the DMA engine (VEGA10).
+ * Fill GPU buffers using the DMA engine (VEGA10/12).
*/
static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
@@ -1723,9 +1703,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
.copy_pte = sdma_v4_0_vm_copy_pte,
.write_pte = sdma_v4_0_vm_write_pte,
-
- .set_max_nums_pte_pde = 0x400000 >> 3,
- .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 8284d5d..b154667 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,7 +31,8 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
-#include "amdgpu_powerplay.h"
+#include "amd_pcie.h"
+#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
#include "gfx_v6_0.h"
@@ -1230,6 +1231,27 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
+static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ }
+}
+
+static void si_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_DEBUG0, 1);
+ RREG32(mmHDP_DEBUG0);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+ }
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1241,6 +1263,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
.get_config_memsize = &si_get_config_memsize,
+ .flush_hdp = &si_flush_hdp,
+ .invalidate_hdp = &si_invalidate_hdp,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -1390,65 +1414,65 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TAHITI:
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers,
- (const u32)ARRAY_SIZE(tahiti_golden_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_rlc_registers,
- (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers2,
- (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers,
+ ARRAY_SIZE(tahiti_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_rlc_registers,
+ ARRAY_SIZE(tahiti_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_mgcg_cgcg_init,
+ ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers2,
+ ARRAY_SIZE(tahiti_golden_registers2));
break;
case CHIP_PITCAIRN:
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_registers,
- (const u32)ARRAY_SIZE(pitcairn_golden_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_rlc_registers,
- (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_registers,
+ ARRAY_SIZE(pitcairn_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_rlc_registers,
+ ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
break;
case CHIP_VERDE:
- amdgpu_program_register_sequence(adev,
- verde_golden_registers,
- (const u32)ARRAY_SIZE(verde_golden_registers));
- amdgpu_program_register_sequence(adev,
- verde_golden_rlc_registers,
- (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- verde_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- verde_pg_init,
- (const u32)ARRAY_SIZE(verde_pg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_registers,
+ ARRAY_SIZE(verde_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_rlc_registers,
+ ARRAY_SIZE(verde_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_mgcg_cgcg_init,
+ ARRAY_SIZE(verde_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_pg_init,
+ ARRAY_SIZE(verde_pg_init));
break;
case CHIP_OLAND:
- amdgpu_program_register_sequence(adev,
- oland_golden_registers,
- (const u32)ARRAY_SIZE(oland_golden_registers));
- amdgpu_program_register_sequence(adev,
- oland_golden_rlc_registers,
- (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- oland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_registers,
+ ARRAY_SIZE(oland_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_rlc_registers,
+ ARRAY_SIZE(oland_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ ARRAY_SIZE(oland_mgcg_cgcg_init));
break;
case CHIP_HAINAN:
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers,
- (const u32)ARRAY_SIZE(hainan_golden_registers));
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers2,
- (const u32)ARRAY_SIZE(hainan_golden_registers2));
- amdgpu_program_register_sequence(adev,
- hainan_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers,
+ ARRAY_SIZE(hainan_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers2,
+ ARRAY_SIZE(hainan_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_mgcg_cgcg_init,
+ ARRAY_SIZE(hainan_mgcg_cgcg_init));
break;
@@ -1461,8 +1485,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1498,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return;
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -1506,7 +1527,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1610,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= 3;
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= 2;
else
tmp16 |= 1;
@@ -1959,42 +1980,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_OLAND:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 5892250..06ed721 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -24,6 +24,8 @@
#ifndef __SI_H__
#define __SI_H__
+#define SI_FLUSH_GPU_TLB_NUM_WREG 2
+
void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 3fa2fbf..b75d901 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -24,6 +24,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "si.h"
#include "sid.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -61,33 +62,19 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
while ((lower_32_bits(ring->wptr) & 7) != 5)
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
- amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+ amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
}
-static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
- amdgpu_ring_write(ring, 1);
-}
-
-static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
- amdgpu_ring_write(ring, 1);
-}
-
/**
* si_dma_ring_emit_fence - emit a fence on the DMA ring
*
@@ -134,7 +121,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
ring->ready = false;
}
}
@@ -197,7 +184,7 @@ static int si_dma_start(struct amdgpu_device *adev)
}
if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@@ -221,7 +208,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -234,7 +221,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -252,13 +239,13 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -281,7 +268,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -317,7 +304,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
@@ -328,7 +315,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -473,29 +460,27 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
- amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm_id < 8)
- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- else
- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* bits 0-7 are the VM contexts0-7 */
- amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for invalidate to complete */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0xff << 16); /* retry */
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, 0); /* value */
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
+static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | reg);
+ amdgpu_ring_write(ring, val);
+}
+
static int si_dma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -772,22 +757,20 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr,
.emit_frame_size =
- 3 + /* si_dma_ring_emit_hdp_flush */
- 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 3 + 3 + /* hdp flush / invalidate */
6 + /* si_dma_ring_emit_pipeline_sync */
- 12 + /* si_dma_ring_emit_vm_flush */
+ SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
.emit_vm_flush = si_dma_ring_emit_vm_flush,
- .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
- .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
.test_ring = si_dma_ring_test_ring,
.test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib,
+ .emit_wreg = si_dma_ring_emit_wreg,
};
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@@ -891,9 +874,6 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
.copy_pte = si_dma_vm_copy_pte,
.write_pte = si_dma_vm_write_pte,
-
- .set_max_nums_pte_pde = 0xffff8 >> 3,
- .set_pte_pde_num_dw = 9,
.set_pte_pde = si_dma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 51fd0c9..672eaff 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,6 +26,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
+#include "amd_pcie.h"
#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
@@ -66,6 +67,8 @@ MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+static const struct amd_pm_funcs si_dpm_funcs;
+
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -3064,7 +3067,7 @@ static bool si_dpm_vblank_too_short(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */
- u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
@@ -3331,29 +3334,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
}
}
-static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
-{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
-
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
@@ -3464,6 +3444,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ if ((adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6665)) {
+ max_sclk = 60000;
+ max_mclk = 80000;
+ }
} else if (adev->asic_type == CHIP_OLAND) {
if ((adev->pdev->revision == 0xC7) ||
(adev->pdev->revision == 0x80) ||
@@ -4345,7 +4330,7 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
if (mclk <= pi->mclk_strobe_mode_threshold)
strobe_mode = true;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
else
result = si_get_ddr3_mclk_frequency_ratio(mclk);
@@ -4932,7 +4917,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
table->initialState.levels[0].strobeMode =
si_get_strobe_mode_settings(adev,
initial_state->performance_levels[0].mclk);
@@ -5023,10 +5008,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.levels[0].vddc.index,
&table->ACPIState.levels[0].std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
+ table->ACPIState.levels[0].gen2PCIE =
+ (u8)amdgpu_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ AMDGPU_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
@@ -5203,7 +5189,7 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
@@ -5380,7 +5366,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
YCLK_POST_DIV(mpll_param.post_div);
@@ -5392,7 +5378,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
u32 tmp;
u32 reference_clock = adev->clock.mpll.reference_freq;
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
freq_nom = memory_clock * 4;
else
freq_nom = memory_clock * 2;
@@ -5484,7 +5470,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
}
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
if (pl->mclk > pi->mclk_edc_enable_threshold)
level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
@@ -5845,9 +5831,9 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
((temp_reg & 0xffff0000)) |
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
j++;
+
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
-
temp_reg = RREG32(MC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
@@ -5855,22 +5841,20 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+ if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
j++;
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
}
break;
case MC_SEQ_RESERVE_M:
@@ -5882,8 +5866,6 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
- if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
break;
default:
break;
@@ -7167,10 +7149,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = r600_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
+ pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7325,7 +7307,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
int ret;
- u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
@@ -7335,11 +7316,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (ret)
- si_pi->sys_pcie_mask = 0;
- else
- si_pi->sys_pcie_mask = mask;
+ si_pi->sys_pcie_mask =
+ (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+ CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
@@ -7937,6 +7916,8 @@ static int si_dpm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->powerplay.pp_funcs = &si_dpm_funcs;
+ adev->powerplay.pp_handle = adev;
si_dpm_set_irq_funcs(adev);
return 0;
}
@@ -8037,7 +8018,7 @@ static int si_dpm_read_sensor(void *handle, int idx,
}
}
-const struct amd_ip_funcs si_dpm_ip_funcs = {
+static const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
.early_init = si_dpm_early_init,
.late_init = si_dpm_late_init,
@@ -8054,8 +8035,16 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
.set_powergating_state = si_dpm_set_powergating_state,
};
-const struct amd_pm_funcs si_dpm_funcs = {
- .get_temperature = &si_dpm_get_temp,
+const struct amdgpu_ip_block_version si_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dpm_ip_funcs,
+};
+
+static const struct amd_pm_funcs si_dpm_funcs = {
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
.post_set_power_state = &si_dpm_post_set_power_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 9fe343d..6b7d292 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -245,8 +245,7 @@ enum si_display_gap
SI_PM_DISPLAY_GAP_IGNORE = 3,
};
-extern const struct amd_ip_funcs si_dpm_ip_funcs;
-extern const struct amd_pm_funcs si_dpm_funcs;
+extern const struct amdgpu_ip_block_version si_smu_ip_block;
struct ni_leakage_coeffients
{
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index d2c6b80..60dad63 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
adev->irq.ih.rptr += 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4e67fe1..51cf8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -34,18 +34,17 @@
#include "atom.h"
#include "amd_pcie.h"
-#include "vega10/soc15ip.h"
-#include "vega10/UVD/uvd_7_0_offset.h"
-#include "vega10/GC/gc_9_0_offset.h"
-#include "vega10/GC/gc_9_0_sh_mask.h"
-#include "vega10/SDMA0/sdma0_4_0_offset.h"
-#include "vega10/SDMA1/sdma1_4_0_offset.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
-#include "vega10/HDP/hdp_4_0_sh_mask.h"
-#include "vega10/MP/mp_9_0_offset.h"
-#include "vega10/MP/mp_9_0_sh_mask.h"
-#include "vega10/SMUIO/smuio_9_0_offset.h"
-#include "vega10/SMUIO/smuio_9_0_sh_mask.h"
+#include "uvd/uvd_7_0_offset.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "sdma1/sdma1_4_0_offset.h"
+#include "hdp/hdp_4_0_offset.h"
+#include "hdp/hdp_4_0_sh_mask.h"
+#include "mp/mp_9_0_offset.h"
+#include "mp/mp_9_0_sh_mask.h"
+#include "smuio/smuio_9_0_offset.h"
+#include "smuio/smuio_9_0_sh_mask.h"
#include "soc15.h"
#include "soc15_common.h"
@@ -58,7 +57,6 @@
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
-#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
@@ -101,15 +99,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- const struct nbio_pcie_index_data *nbio_pcie_id;
-
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
-
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -122,15 +113,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- const struct nbio_pcie_index_data *nbio_pcie_id;
-
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -242,41 +227,9 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_memsize(adev);
- else
- return nbio_v6_1_get_memsize(adev);
+ return adev->nbio_funcs->get_memsize(adev);
}
-static const u32 vega10_golden_init[] =
-{
-};
-
-static const u32 raven_golden_init[] =
-{
-};
-
-static void soc15_init_golden_registers(struct amdgpu_device *adev)
-{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&adev->grbm_idx_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
- vega10_golden_init,
- (const u32)ARRAY_SIZE(vega10_golden_init));
- break;
- case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
- raven_golden_init,
- (const u32)ARRAY_SIZE(raven_golden_init));
- break;
- default:
- break;
- }
- mutex_unlock(&adev->grbm_idx_mutex);
-}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
@@ -332,25 +285,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
- { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
- { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
+struct soc15_allowed_register_entry {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ bool grbm_indexed;
+};
+
+
+static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
+ { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
+ { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
};
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -377,12 +339,9 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
if (indexed) {
return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
} else {
- switch (reg_offset) {
- case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
+ if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
return adev->gfx.config.gb_addr_config;
- default:
- return RREG32(reg_offset);
- }
+ return RREG32(reg_offset);
}
}
@@ -390,10 +349,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
+ struct soc15_allowed_register_entry *en;
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
- if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
+ en = &soc15_allowed_read_registers[i];
+ if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc15_get_register_value(adev,
@@ -404,6 +366,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
+
+/**
+ * soc15_program_register_sequence - program an array of registers.
+ *
+ * @adev: amdgpu_device pointer
+ * @regs: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *regs,
+ const u32 array_size)
+{
+ const struct soc15_reg_golden *entry;
+ u32 tmp, reg;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+
+ if (entry->and_mask == 0xffffffff) {
+ tmp = entry->or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~(entry->and_mask);
+ tmp |= entry->or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+
+}
+
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -417,20 +416,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
pci_save_state(adev->pdev);
- for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
- adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
- break;
- }
- }
+ psp_gpu_reset(adev);
pci_restore_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = (adev->flags & AMD_IS_APU) ?
- nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev);
+ u32 memsize = adev->nbio_funcs->get_memsize(adev);
+
if (memsize != 0xffffffff)
break;
udelay(1);
@@ -495,14 +488,10 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- if (adev->flags & AMD_IS_APU) {
- nbio_v7_0_enable_doorbell_aperture(adev, enable);
- } else {
- nbio_v6_1_enable_doorbell_aperture(adev, enable);
- nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
- }
+ adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -516,50 +505,66 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
int soc15_set_ip_blocks(struct amdgpu_device *adev)
{
- nbio_v6_1_detect_hw_virt(adev);
+ /* Set IP register base before any HW register access */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+ adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ case CHIP_VEGA12:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
case CHIP_RAVEN:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
default:
return -EINVAL;
@@ -570,10 +575,22 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_rev_id(adev);
+ return adev->nbio_funcs->get_rev_id(adev);
+}
+
+static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ adev->nbio_funcs->hdp_flush(adev, ring);
+}
+
+static void soc15_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg)
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
- return nbio_v6_1_get_rev_id(adev);
+ amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
+ HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs =
@@ -587,11 +604,12 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
};
static int soc15_common_early_init(void *handle)
{
- bool psp_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->smc_rreg = NULL;
@@ -609,10 +627,6 @@ static int soc15_common_early_init(void *handle)
adev->asic_funcs = &soc15_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
- psp_enabled = true;
-
adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
@@ -639,6 +653,28 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
+ case CHIP_VEGA12:
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
case CHIP_RAVEN:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -659,8 +695,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_MMHUB;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA;
+
adev->external_rev_id = 0x1;
break;
default:
@@ -673,10 +709,6 @@ static int soc15_common_early_init(void *handle)
xgpu_ai_mailbox_set_irq_funcs(adev);
}
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_get_pcie_info(adev);
-
return 0;
}
@@ -709,15 +741,12 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* move the golden regs per IP block */
- soc15_init_golden_registers(adev);
/* enable pcie gen2/3 link */
soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- if (!(adev->flags & AMD_IS_APU))
- nbio_v6_1_init_registers(adev);
+ adev->nbio_funcs->init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -878,9 +907,10 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
- nbio_v6_1_update_medium_grain_clock_gating(adev,
+ case CHIP_VEGA12:
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -894,9 +924,9 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
- nbio_v7_0_update_medium_grain_clock_gating(adev,
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -921,7 +951,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- nbio_v6_1_get_clockgating_state(adev, flags);
+ adev->nbio_funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index acb3cdb..f70da8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -27,10 +27,33 @@
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+
extern const struct amd_ip_funcs soc15_common_ip_funcs;
+struct soc15_reg_golden {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+ u32 and_mask;
+ u32 or_mask;
+};
+
+#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+
+#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *registers,
+ const u32 array_size);
+
+int vega10_reg_base_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 7a8e4e2..def8650 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -24,72 +24,28 @@
#ifndef __SOC15_COMMON_H__
#define __SOC15_COMMON_H__
-struct nbio_hdp_flush_reg {
- u32 hdp_flush_req_offset;
- u32 hdp_flush_done_offset;
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
-};
-
-struct nbio_pcie_index_data {
- u32 index_offset;
- u32 data_offset;
-};
-
/* Register Access Macros */
-#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define WREG32_FIELD15(ip, idx, reg, field, val) \
- WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define RREG32_SOC15(ip, inst, reg) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))))
+ RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset)
+ RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
#define WREG32_SOC15(ip, inst, reg, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
- WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset, value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index aa4e320..52853d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -107,7 +107,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
tonga_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -270,8 +270,8 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8ab0f78..948bb943 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -464,32 +464,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp flush.
- */
-static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp invalidate.
- */
-static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
- amdgpu_ring_write(ring, 1);
-}
-
-/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -521,7 +495,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
@@ -541,7 +515,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
@@ -563,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
/* programm the VCPU memory controller bits 0-27 */
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3;
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
@@ -765,14 +739,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
- 2 + /* uvd_v4_2_ring_emit_hdp_flush */
- 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
- .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
- .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index bb6d46e..6445d55 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -258,7 +258,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
upper_32_bits(adev->uvd.gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
@@ -479,32 +479,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp flush.
- */
-static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp invalidate.
- */
-static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
- amdgpu_ring_write(ring, 1);
-}
-
-/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -536,7 +510,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
@@ -556,7 +530,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -873,14 +847,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
- 2 + /* uvd_v5_0_ring_emit_hdp_flush */
- 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
- .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 920910a..f26f515 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -37,6 +37,9 @@
#include "gmc/gmc_8_1_d.h"
#include "vi.h"
+/* Polaris10/11/12 firmware version */
+#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
+
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -58,7 +61,9 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
*/
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
{
- return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
+ return ((adev->asic_type >= CHIP_POLARIS10) &&
+ (adev->asic_type <= CHIP_POLARIS12) &&
+ (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
}
/**
@@ -184,7 +189,7 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed\n",
@@ -360,7 +365,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
error:
@@ -411,12 +416,20 @@ static int uvd_v6_0_sw_init(void *handle)
if (r)
return r;
- if (uvd_v6_0_enc_support(adev)) {
- struct amd_sched_rq *rq;
+ if (!uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ adev->uvd.ring_enc[i].funcs = NULL;
+
+ adev->uvd.irq.num_types = 1;
+ adev->uvd.num_enc_rings = 0;
+
+ DRM_INFO("UVD ENC is disabled\n");
+ } else {
+ struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;
@@ -456,7 +469,7 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
@@ -603,7 +616,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
upper_32_bits(adev->uvd.gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+ size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
@@ -951,32 +964,6 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
/**
- * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp flush.
- */
-static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp invalidate.
- */
-static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
- amdgpu_ring_write(ring, 1);
-}
-
-/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -1008,7 +995,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
@@ -1028,10 +1015,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1050,45 +1037,37 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
-static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
{
- uint32_t reg;
-
- if (vm_id < 8)
- reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
- else
- reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
-
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
- amdgpu_ring_write(ring, pd_addr >> 12);
+ amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
+}
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
- amdgpu_ring_write(ring, 0x8);
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xC);
}
@@ -1127,14 +1106,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
}
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
}
static bool uvd_v6_0_is_idle(void *handle)
@@ -1549,21 +1528,19 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 6 + 6 + /* hdp flush / invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
- .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v6_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1575,24 +1552,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.emit_frame_size =
- 2 + /* uvd_v6_0_ring_emit_hdp_flush */
- 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 6 + 6 + /* hdp flush / invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
- 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
- .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v6_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
@@ -1605,7 +1580,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
.emit_frame_size =
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
- 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+ 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v6_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6634545..eddc57f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -25,20 +25,20 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_uvd.h"
+#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "vega10/soc15ip.h"
-#include "vega10/UVD/uvd_7_0_offset.h"
-#include "vega10/UVD/uvd_7_0_sh_mask.h"
-#include "vega10/VCE/vce_4_0_offset.h"
-#include "vega10/VCE/vce_4_0_default.h"
-#include "vega10/VCE/vce_4_0_sh_mask.h"
-#include "vega10/NBIF/nbif_6_1_offset.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
-#include "vega10/MMHUB/mmhub_1_0_offset.h"
-#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
+#include "uvd/uvd_7_0_offset.h"
+#include "uvd/uvd_7_0_sh_mask.h"
+#include "vce/vce_4_0_offset.h"
+#include "vce/vce_4_0_default.h"
+#include "vce/vce_4_0_sh_mask.h"
+#include "nbif/nbif_6_1_offset.h"
+#include "hdp/hdp_4_0_offset.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -184,7 +184,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed\n",
@@ -359,7 +359,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
error:
@@ -385,18 +385,18 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq);
if (r)
return r;
}
@@ -416,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle)
}
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
- rq, amdgpu_sched_jobs);
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;
@@ -472,7 +472,7 @@ static int uvd_v7_0_sw_fini(void *handle)
if (r)
return r;
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
@@ -616,7 +616,7 @@ static int uvd_v7_0_resume(void *handle)
*/
static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+ uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
uint32_t offset;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
@@ -1086,6 +1086,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -1123,6 +1125,7 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
@@ -1133,33 +1136,6 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
/**
- * uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp flush.
- */
-static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
- mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
- amdgpu_ring_write(ring, 0);
-}
-
-/**
- * uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp invalidate.
- */
-static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
- amdgpu_ring_write(ring, 1);
-}
-
-/**
* uvd_v7_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -1192,7 +1168,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
}
if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
@@ -1212,11 +1188,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
@@ -1238,38 +1216,42 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
-static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
- uint32_t data0, uint32_t data1)
+static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
- amdgpu_ring_write(ring, data0);
+ amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
- amdgpu_ring_write(ring, data1);
+ amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 8);
}
-static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
- uint32_t data0, uint32_t data1, uint32_t mask)
+static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
- amdgpu_ring_write(ring, data0);
+ amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
- amdgpu_ring_write(ring, data1);
+ amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
amdgpu_ring_write(ring, mask);
@@ -1279,39 +1261,28 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
}
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
- unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
- data1 = upper_32_bits(pd_addr);
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
- data1 = lower_32_bits(pd_addr);
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ /* wait for reg writes */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
- uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
-
- /* flush TLB */
- data0 = (hub->vm_inv_eng0_req + eng) << 2;
- data1 = req;
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- /* wait for flush */
- data0 = (hub->vm_inv_eng0_ack + eng) << 2;
- data1 = 1 << vm_id;
- mask = 1 << vm_id;
- uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
+ uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
}
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
@@ -1319,39 +1290,34 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
}
+static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, val);
+}
+
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->vm_inv_eng;
-
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+ /* wait for reg writes */
+ uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
+ lower_32_bits(pd_addr), 0xffffffff);
+}
- /* flush TLB */
+static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, val);
}
#if 0
@@ -1681,29 +1647,30 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
.emit_frame_size =
- 2 + /* uvd_v7_0_ring_emit_hdp_flush */
- 2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
- 34 + /* uvd_v7_0_ring_emit_vm_flush */
+ 6 + 6 + /* hdp flush / invalidate */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* uvd_v7_0_ring_emit_vm_flush */
14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
.emit_ib = uvd_v7_0_ring_emit_ib,
.emit_fence = uvd_v7_0_ring_emit_fence,
.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
- .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v7_0_ring_emit_wreg,
+ .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
@@ -1716,7 +1683,10 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
.emit_frame_size =
- 17 + /* uvd_v7_0_enc_ring_emit_vm_flush */
+ 3 + 3 + /* hdp flush / invalidate */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v7_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
@@ -1730,6 +1700,8 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
+ .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
};
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index cf81065..428d192 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -834,24 +834,24 @@ out:
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, VCE_CMD_END);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 7574554..73fd48d 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -28,16 +28,16 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
+#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "vega10/soc15ip.h"
-#include "vega10/VCE/vce_4_0_offset.h"
-#include "vega10/VCE/vce_4_0_default.h"
-#include "vega10/VCE/vce_4_0_sh_mask.h"
-#include "vega10/MMHUB/mmhub_1_0_offset.h"
-#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
+#include "vce/vce_4_0_offset.h"
+#include "vce/vce_4_0_default.h"
+#include "vce/vce_4_0_sh_mask.h"
+#include "mmhub/mmhub_1_0_offset.h"
+#include "mmhub/mmhub_1_0_sh_mask.h"
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
@@ -243,37 +243,49 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
} else {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
adev->vce.gpu_addr >> 8);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
+ (adev->vce.gpu_addr >> 40) & 0xff);
+ }
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
adev->vce.gpu_addr >> 8);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
+ (adev->vce.gpu_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
adev->vce.gpu_addr >> 8);
- }
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
+ mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
+ (adev->vce.gpu_addr >> 40) & 0xff);
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
- offset & 0x7FFFFFFF);
+ offset & ~0x0f000000);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
- offset += size;
+ offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
size = VCE_V4_0_STACK_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
- offset & 0x7FFFFFFF);
+ (offset & ~0x0f000000) | (1 << 24));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
offset += size;
size = VCE_V4_0_DATA_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
- offset & 0x7FFFFFFF);
+ (offset & ~0x0f000000) | (2 << 24));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
@@ -408,11 +420,11 @@ static int vce_v4_0_sw_init(void *handle)
unsigned size;
int r, i;
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
if (r)
return r;
- size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
+ size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
size += VCE_V4_0_FW_SIZE;
@@ -927,10 +939,10 @@ static int vce_v4_0_set_powergating_state(void *handle,
#endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
@@ -953,39 +965,33 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, VCE_CMD_END);
}
+static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, val);
+}
+
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
- amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+ /* wait for reg writes */
+ vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
+ lower_32_bits(pd_addr), 0xffffffff);
+}
- /* flush TLB */
+static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, val);
}
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1057,7 +1063,9 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.set_wptr = vce_v4_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
- 17 + /* vce_v4_0_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vce_v4_0_emit_vm_flush */
5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1, /* vce_v4_0_ring_insert_end */
.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
@@ -1071,6 +1079,8 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
+ .emit_wreg = vce_v4_0_emit_wreg,
+ .emit_reg_wait = vce_v4_0_emit_reg_wait,
};
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 0450ac5..8c13267 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,15 +25,15 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
-#include "vega10/soc15ip.h"
-#include "raven1/VCN/vcn_1_0_offset.h"
-#include "raven1/VCN/vcn_1_0_sh_mask.h"
-#include "vega10/HDP/hdp_4_0_offset.h"
-#include "raven1/MMHUB/mmhub_9_1_offset.h"
-#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
+#include "vcn/vcn_1_0_offset.h"
+#include "vcn/vcn_1_0_sh_mask.h"
+#include "hdp/hdp_4_0_offset.h"
+#include "mmhub/mmhub_9_1_offset.h"
+#include "mmhub/mmhub_9_1_sh_mask.h"
static int vcn_v1_0_start(struct amdgpu_device *adev);
static int vcn_v1_0_stop(struct amdgpu_device *adev);
@@ -75,13 +75,13 @@ static int vcn_v1_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCN DEC TRAP */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, i + 119,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
&adev->vcn.irq);
if (r)
return r;
@@ -744,6 +744,8 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, 0);
@@ -761,6 +763,8 @@ static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
@@ -777,6 +781,8 @@ static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -804,19 +810,6 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
}
/**
- * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
- *
- * @ring: amdgpu_ring pointer
- *
- * Emits an hdp invalidate.
- */
-static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
- amdgpu_ring_write(ring, 1);
-}
-
-/**
* vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
@@ -826,11 +819,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
@@ -843,29 +838,18 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
- uint32_t data0, uint32_t data1)
+static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
{
- amdgpu_ring_write(ring,
- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
- amdgpu_ring_write(ring, data0);
- amdgpu_ring_write(ring,
- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
- amdgpu_ring_write(ring, data1);
- amdgpu_ring_write(ring,
- PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
-}
+ struct amdgpu_device *adev = ring->adev;
-static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
- uint32_t data0, uint32_t data1, uint32_t mask)
-{
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
- amdgpu_ring_write(ring, data0);
+ amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
- amdgpu_ring_write(ring, data1);
+ amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
amdgpu_ring_write(ring, mask);
@@ -875,39 +859,34 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
}
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
- unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
- data1 = upper_32_bits(pd_addr);
- vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
- data1 = lower_32_bits(pd_addr);
- vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
- vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
-
- /* flush TLB */
- data0 = (hub->vm_inv_eng0_req + eng) << 2;
- data1 = req;
- vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
-
- /* wait for flush */
- data0 = (hub->vm_inv_eng0_ack + eng) << 2;
- data1 = 1 << vm_id;
- mask = 1 << vm_id;
- vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
+ vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
+ amdgpu_ring_write(ring, val);
+ amdgpu_ring_write(ring,
+ PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
+ amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
}
/**
@@ -997,51 +976,43 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer
*/
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
+static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val,
+ uint32_t mask)
+{
+ amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, val);
+}
+
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->vm_inv_eng;
-
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
- amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
- amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+ /* wait for reg writes */
+ vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
+ lower_32_bits(pd_addr), 0xffffffff);
+}
- /* flush TLB */
+static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, val);
}
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1077,6 +1048,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
+}
+
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1100,30 +1082,33 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
.emit_frame_size =
- 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
- 34 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
6,
.emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
.emit_ib = vcn_v1_0_dec_ring_emit_ib,
.emit_fence = vcn_v1_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
- .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
.test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = vcn_v1_0_ring_insert_nop,
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
};
static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
@@ -1136,7 +1121,9 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.get_wptr = vcn_v1_0_enc_ring_get_wptr,
.set_wptr = vcn_v1_0_enc_ring_set_wptr,
.emit_frame_size =
- 17 + /* vcn_v1_0_enc_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1, /* vcn_v1_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
@@ -1150,6 +1137,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
};
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 6973257..5ae5ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -25,10 +25,8 @@
#include "amdgpu_ih.h"
#include "soc15.h"
-
-#include "vega10/soc15ip.h"
-#include "vega10/OSSSYS/osssys_4_0_offset.h"
-#include "vega10/OSSSYS/osssys_4_0_sh_mask.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
#include "soc15_common.h"
#include "vega10_ih.h"
@@ -46,11 +44,11 @@ static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
*/
static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
{
- u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
}
@@ -63,14 +61,14 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
*/
static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
{
- u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
}
@@ -97,20 +95,17 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_control(adev);
- else
- nbio_v6_1_ih_control(adev);
+ adev->nbio_funcs->ih_control(adev);
- ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
if (adev->irq.ih.use_bus_addr) {
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.rb_dma_addr >> 8);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
} else {
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.gpu_addr >> 8);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), (adev->irq.ih.gpu_addr >> 40) & 0xff);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
}
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
@@ -126,21 +121,21 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
if (adev->irq.msi_enabled)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
/* set the writeback address whether it's enabled or not */
if (adev->irq.ih.use_bus_addr)
wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
else
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO), lower_32_bits(wptr_off));
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI), upper_32_bits(wptr_off) & 0xFF);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
/* set rptr, wptr to 0 */
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- ih_doorbell_rtpr = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR));
+ ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
if (adev->irq.ih.use_doorbell) {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
OFFSET, adev->irq.ih.doorbell_index);
@@ -150,20 +145,18 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
ENABLE, 0);
}
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
- else
- nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
- tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL), tmp);
+ WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
- tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL));
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL), tmp);
+ WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
pci_set_master(adev->pdev);
@@ -252,8 +245,8 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
* some faults get cleared.
*/
switch (dw0 & 0xff) {
- case AMDGPU_IH_CLIENTID_VMC:
- case AMDGPU_IH_CLIENTID_UTCL2:
+ case SOC15_IH_CLIENTID_VMC:
+ case SOC15_IH_CLIENTID_UTCL2:
break;
default:
/* Not a VM fault */
@@ -285,9 +278,9 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
/* Track retry faults in per-VM fault FIFO. */
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- spin_unlock(&adev->vm_manager.pasid_lock);
- if (WARN_ON_ONCE(!vm)) {
+ if (!vm) {
/* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
amdgpu_ih_clear_fault(adev, key);
return true;
}
@@ -295,9 +288,11 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
r = kfifo_put(&vm->faults, key);
if (!r) {
/* FIFO is full. Ignore it until there is space */
+ spin_unlock(&adev->vm_manager.pasid_lock);
amdgpu_ih_clear_fault(adev, key);
goto ignore_iv;
}
+ spin_unlock(&adev->vm_manager.pasid_lock);
/* It's the first fault for this address, process it normally */
return true;
@@ -334,11 +329,11 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = dw[0] & 0xff;
entry->src_id = (dw[0] >> 8) & 0xff;
entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vm_id = (dw[0] >> 24) & 0xf;
- entry->vm_id_src = (dw[0] >> 31);
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31;
- entry->pas_id = dw[3] & 0xffff;
+ entry->pasid = dw[3] & 0xffff;
entry->pasid_src = dw[3] >> 31;
entry->src_data[0] = dw[4];
entry->src_data[1] = dw[5];
@@ -367,7 +362,7 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev)
adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
} else {
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), adev->irq.ih.rptr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
new file mode 100644
index 0000000..4c45db7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+
+int vega10_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke beend by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
+
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 3a4c2fa..126f127 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,7 +71,6 @@
#include "uvd_v5_0.h"
#include "uvd_v6_0.h"
#include "vce_v3_0.h"
-#include "amdgpu_powerplay.h"
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
@@ -282,29 +281,29 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
@@ -449,14 +448,18 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- /* bit31: 0 means disable IOV and 1 means enable */
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ uint32_t reg = 0;
+
+ if (adev->asic_type == CHIP_TONGA ||
+ adev->asic_type == CHIP_FIJI) {
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ }
if (reg == 0) {
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
@@ -667,7 +670,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -852,6 +855,27 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
}
+static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ }
+}
+
+static void vi_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_DEBUG0, 1);
+ RREG32(mmHDP_DEBUG0);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+ }
+}
+
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
@@ -863,6 +887,8 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
.get_config_memsize = &vi_get_config_memsize,
+ .flush_hdp = &vi_flush_hdp,
+ .invalidate_hdp = &vi_invalidate_hdp,
};
#define CZ_REV_BRISTOL(rev) \
@@ -870,7 +896,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
static int vi_common_early_init(void *handle)
{
- bool smc_enabled = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) {
@@ -891,10 +916,6 @@ static int vi_common_early_init(void *handle)
adev->asic_funcs = &vi_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
- smc_enabled = true;
-
adev->rev_id = vi_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
@@ -1045,7 +1066,6 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
- AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
@@ -1071,11 +1091,6 @@ static int vi_common_early_init(void *handle)
xgpu_vi_mailbox_set_irq_funcs(adev);
}
- /* vi use smc load by default */
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_get_pcie_info(adev);
-
return 0;
}
@@ -1487,115 +1502,115 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
- amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
break;
case CHIP_FIJI:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_TONGA:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
case CHIP_CARRIZO:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
case CHIP_STONEY:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 575d7ae..0429fe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -24,6 +24,8 @@
#ifndef __VI_H__
#define __VI_H__
+#define VI_FLUSH_GPU_TLB_NUM_WREG 3
+
void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index dbf3703..19ddd23 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -27,6 +27,8 @@
#define SDMA1_REGISTER_OFFSET 0x200 /* not a register */
#define SDMA_MAX_INSTANCE 2
+#define KFD_VI_SDMA_QUEUE_OFFSET 0x80 /* not a register */
+
/* crtc instance offsets */
#define CRTC0_REGISTER_OFFSET (0x1b9c - 0x1b9c)
#define CRTC1_REGISTER_OFFSET (0x1d9c - 0x1b9c)
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index bc5a294..ed2f06c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,7 @@
config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && AMD_IOMMU_V2 && X86_64
+ depends on DRM_AMDGPU && X86_64
+ imply AMD_IOMMU_V2
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 342c2d9..0d02422 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -35,6 +35,12 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
- kfd_dbgdev.o kfd_dbgmgr.o
+ kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
+
+ifneq ($(CONFIG_AMD_IOMMU_V2),)
+amdkfd-y += kfd_iommu.o
+endif
+
+amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
new file mode 100644
index 0000000..997a383d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -0,0 +1,1384 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if 0
+HW (VI) source code for CWSR trap handler
+#Version 18 + multiple trap handler
+
+// this performance-optimal version was originally from Seven Xu at SRDC
+
+// Revison #18 --...
+/* Rev History
+** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
+** #4. SR Memory Layout:
+** 1. VGPR-SGPR-HWREG-{LDS}
+** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
+** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
+** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
+** #7. Update: 1. don't barrier if noLDS
+** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
+** 2. Fix SQ issue by s_sleep 2
+** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
+** 2. optimize s_buffer save by burst 16sgprs...
+** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
+** #11. Update 1. Add 2 more timestamp for debug version
+** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
+** #13. Integ 1. Always use MUBUF for PV trap shader...
+** #14. Update 1. s_buffer_store soft clause...
+** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
+** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
+** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
+** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
+** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
+** 2. FUNC - Handle non-CWSR traps
+*/
+
+var G8SR_WDMEM_HWREG_OFFSET = 0
+var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
+
+// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
+
+var G8SR_DEBUG_TIMESTAMP = 0
+var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
+var s_g8sr_ts_save_s = s[34:35] // save start
+var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
+var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
+var s_g8sr_ts_save_d = s[40:41] // save end
+var s_g8sr_ts_restore_s = s[42:43] // restore start
+var s_g8sr_ts_restore_d = s[44:45] // restore end
+
+var G8SR_VGPR_SR_IN_DWX4 = 0
+var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
+var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
+
+
+/*************************************************************************/
+/* control on how to run the shader */
+/*************************************************************************/
+//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+var EMU_RUN_HACK = 0
+var EMU_RUN_HACK_RESTORE_NORMAL = 0
+var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
+var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var SAVE_LDS = 1
+var WG_BASE_ADDR_LO = 0x9000a000
+var WG_BASE_ADDR_HI = 0x0
+var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+var CTX_SAVE_CONTROL = 0x0
+var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+
+/**************************************************************************/
+/* variables */
+/**************************************************************************/
+var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
+
+var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
+var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
+var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
+var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
+
+var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+var SQ_WAVE_IB_STS_RCNT_SIZE = 4 //FIXME
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 //FIXME
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+
+var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
+
+/* Save */
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+
+var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
+var S_SAVE_SPI_INIT_ATC_SHIFT = 27
+var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
+var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
+var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+
+ //tba_lo and tba_hi need to be saved/restored
+var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_status = ttmp4
+var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+var s_save_xnack_mask_lo = ttmp6
+var s_save_xnack_mask_hi = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+
+var s_save_mem_offset = tma_lo
+var s_save_alloc_size = s_save_trapsts //conflict
+var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
+var s_save_m0 = tma_hi
+
+/* Restore */
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
+var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
+var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
+var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
+var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
+var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+
+var s_restore_mem_offset = ttmp2
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp6 //tba_lo/hi need to be restored
+var s_restore_mem_offset_save = s_restore_tmp //no conflict
+
+var s_restore_m0 = s_restore_alloc_size //no conflict
+
+var s_restore_mode = ttmp7
+
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = tma_lo //no conflict
+var s_restore_exec_hi = tma_hi //no conflict
+var s_restore_status = ttmp4
+var s_restore_trapsts = ttmp5
+var s_restore_xnack_mask_lo = xnack_mask_lo
+var s_restore_xnack_mask_hi = xnack_mask_hi
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+
+/**************************************************************************/
+/* trap handler entry points */
+/**************************************************************************/
+/* Shader Main*/
+
+shader main
+ asic(VI)
+ type(CS)
+
+
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
+ //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
+ s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
+ s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
+ //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
+ s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
+ else
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+ end
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE //restore
+
+L_SKIP_RESTORE:
+
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+if (!EMU_RUN_HACK)
+ /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+ s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
+ s_waitcnt lgkmcnt(0)
+ s_or_b32 ttmp7, ttmp8, ttmp9
+ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
+ s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
+ s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
+ s_addc_u32 ttmp1, ttmp1, 0
+L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+end
+ // ********* End handling of non-CWSR traps *******************
+
+/**************************************************************************/
+/* save routine */
+/**************************************************************************/
+
+L_SAVE:
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_save_s
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+end
+
+ //check whether there is mem_viol
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_cbranch_scc0 L_NO_PC_REWIND
+
+ //if so, need rewind PC assuming GDS operation gets NACKed
+ s_mov_b32 s_save_tmp, 0 //clear mem_viol bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
+ s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc
+
+L_NO_PC_REWIND:
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
+ s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi //save XNACK must before any memory operation
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
+
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_sq_save_msg
+ s_waitcnt lgkmcnt(0)
+end
+
+ if (EMU_RUN_HACK)
+
+ else
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+ if (EMU_RUN_HACK)
+
+ else
+ s_cbranch_execz L_SLEEP
+ end
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_spi_wrexec
+ s_waitcnt lgkmcnt(0)
+end
+
+ /* setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_save_tmp, v9, 0
+ s_lshr_b32 s_save_tmp, s_save_tmp, 6
+ s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
+ s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+ else
+ end
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+ else
+ end
+
+
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
+
+ //FIXME right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi (might need to save them before using them?)
+ s_mov_b32 s_save_m0, m0 //save M0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0
+
+
+
+
+ /* save HW registers */
+ //////////////////////////////
+
+ L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ get_sgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+
+
+ s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
+
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
+ s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+ s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+ s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
+ s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
+ end
+
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
+ write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset) //STATUS
+
+ //s_save_trapsts conflicts with s_save_alloc_size
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS
+
+ write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
+ write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
+
+ //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(tba_lo, s_save_buf_rsrc0, s_save_mem_offset) //TBA_LO
+ write_hwreg_to_mem(tba_hi, s_save_buf_rsrc0, s_save_mem_offset) //TBA_HI
+
+
+
+ /* the first wave in the threadgroup */
+ // save fist_wave bits in tba_hi unused bit.26
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
+ //s_or_b32 tba_hi, s_save_tmp, tba_hi // save first wave bit to tba_hi.bits[26]
+ s_mov_b32 s_save_exec_hi, 0x0
+ s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26]
+
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+ //////////////////////////////
+
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+ if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
+ else
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+ end
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+ //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
+ s_mov_b64 s_save_xnack_mask_lo, s_save_buf_rsrc0
+ s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
+ s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) //PV: the best performance should be using s_buffer_store_dwordx4
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //SGPR save is complete?
+ // restore s_save_buf_rsrc0,1
+ //s_mov_b64 s_save_buf_rsrc0, s_save_pc_lo
+ s_mov_b64 s_save_buf_rsrc0, s_save_xnack_mask_lo
+
+
+
+
+ /* save first 4 VGPR, then LDS save could use */
+ // each wave will alloc 4 vgprs at least...
+ /////////////////////////////////////////////////////////////////////////////////////
+
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+end
+
+
+
+ /* save LDS */
+ //////////////////////////////
+
+ L_SAVE_LDS:
+
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_barrier //LDS is used? wait for other waves in the same TG
+ //s_and_b32 s_save_tmp, tba_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
+ s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset)
+ get_sgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+
+var LDS_DMA_ENABLE = 0
+var UNROLL = 0
+if UNROLL==0 && LDS_DMA_ENABLE==1
+ s_mov_b32 s3, 256*2
+ s_nop 0
+ s_nop 0
+ s_nop 0
+ L_SAVE_LDS_LOOP:
+ //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
+ if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+ end
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
+
+elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
+ // store from higest LDS address to lowest
+ s_mov_b32 s3, 256*2
+ s_sub_u32 m0, s_save_alloc_size, s3
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
+ s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
+ s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
+ s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
+ s_nop 0
+ s_nop 0
+ s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
+ s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
+ s_add_u32 s0, s0,s_save_alloc_size
+ s_addc_u32 s1, s1, 0
+ s_setpc_b64 s[0:1]
+
+
+ for var i =0; i< 128; i++
+ // be careful to make here a 64Byte aligned address, which could improve performance...
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+
+ if i!=127
+ s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
+ s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
+ end
+ end
+
+else // BUFFER_STORE
+ v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
+ v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+ v_mul_i32_i24 v2, v3, 8 // tid*8
+ v_mov_b32 v3, 256*2
+ s_mov_b32 m0, 0x10000
+ s_mov_b32 s0, s_save_buf_rsrc3
+ s_and_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0xFF7FFFFF // disable add_tid
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0x58000 //DFMT
+
+L_SAVE_LDS_LOOP_VECTOR:
+ ds_read_b64 v[0:1], v2 //x =LDS[a], byte address
+ s_waitcnt lgkmcnt(0)
+ buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1
+// s_waitcnt vmcnt(0)
+ v_add_u32 v2, vcc[0:1], v2, v3
+ v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+ s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
+
+ // restore rsrc3
+ s_mov_b32 s_save_buf_rsrc3, s0
+
+end
+
+L_SAVE_LDS_DONE:
+
+
+ /* save VGPRs - set the Rest VGPRs */
+ //////////////////////////////////////////////////////////////////////////////////////
+ L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ // TODO rearrange the RSRC words to use swizzle for VGPR save...
+
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ s_mov_b32 m0, 4 // skip first 4 VGPRs
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
+
+ s_set_gpr_idx_on m0, 0x1 // This will change M0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
+L_SAVE_VGPR_LOOP:
+ v_mov_b32 v0, v0 // v0 = v[0+m0]
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+
+
+ buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ s_add_u32 m0, m0, 4
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+L_SAVE_VGPR_LOOP_END:
+
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+else
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =0
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+
+ s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later
+
+ L_SAVE_VGPR_LOOP:
+ v_mov_b32 v0, v0 //v0 = v[0+m0]
+ v_mov_b32 v1, v1 //v0 = v[0+m0]
+ v_mov_b32 v2, v2 //v0 = v[0+m0]
+ v_mov_b32 v3, v3 //v0 = v[0+m0]
+
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+ end
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+end
+
+L_SAVE_VGPR_END:
+
+
+
+
+
+
+ /* S_PGM_END_SAVED */ //FIXME graphics ONLY
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+ s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+ s_rfe_b64 s_save_pc_lo //Return to the main shader program
+ else
+ end
+
+// Save Done timestamp
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_save_d
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+ // Need reset rsrc2??
+ s_mov_b32 m0, s_save_mem_offset
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
+end
+
+
+ s_branch L_END_PGM
+
+
+
+/**************************************************************************/
+/* restore routine */
+/**************************************************************************/
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_restore_tmp, v9, 0
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
+ s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
+ s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
+ else
+ end
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_s
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+ // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
+ s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
+ s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
+end
+
+
+
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+
+ /* global mem offset */
+// s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
+
+ /* the first wave in the threadgroup */
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+ //////////////////////////////
+ L_RESTORE_LDS:
+
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
+
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+ L_RESTORE_LDS_LOOP:
+ if (SAVE_LDS)
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
+ end
+ s_add_u32 m0, m0, 256*2 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete?
+
+
+ /* restore VGPRs */
+ //////////////////////////////
+ L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+if G8SR_VGPR_SR_IN_DWX4
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ s_mov_b32 m0, s_restore_alloc_size
+ s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
+
+L_RESTORE_VGPR_LOOP:
+ buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ s_waitcnt vmcnt(0)
+ s_sub_u32 m0, m0, 4
+ v_mov_b32 v0, v0 // v[0+m0] = v0
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_cmp_eq_u32 m0, 0x8000
+ s_cbranch_scc0 L_RESTORE_VGPR_LOOP
+ s_set_gpr_idx_off
+
+ s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
+
+else
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 1
+ s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
+
+ L_RESTORE_VGPR_LOOP:
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+ end
+ s_waitcnt vmcnt(0) //ensure data ready
+ v_mov_b32 v0, v0 //v[0+m0] = v0
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
+ s_set_gpr_idx_off
+ /* VGPR restore on v0 */
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ end
+
+end
+
+ /* restore SGPRs */
+ //////////////////////////////
+
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 16*4 // restore SGPR from S[n] to S[0], by 16 sgprs group
+ // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+ if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
+ else
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+ end
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ /* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
+ However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
+ */
+ s_mov_b32 m0, s_restore_alloc_size
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) //PV: further performance improvement can be made
+ s_waitcnt lgkmcnt(0) //ensure data ready
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP //SGPR restore (except s0) is complete?
+
+ /* restore HW registers */
+ //////////////////////////////
+ L_RESTORE_HWREG:
+
+
+if G8SR_DEBUG_TIMESTAMP
+ s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
+ s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
+end
+
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //EXEC
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset) //STATUS
+ read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset) //TRAPSTS
+ read_hwreg_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_LO
+ read_hwreg_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_HI
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) //MODE
+ read_hwreg_from_mem(tba_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //TBA_LO
+ read_hwreg_from_mem(tba_hi, s_restore_buf_rsrc0, s_restore_mem_offset) //TBA_HI
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+ s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+ end
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
+ s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+ end
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+ //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
+ //reuse s_restore_m0 as a temp register
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
+ s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
+ s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
+ s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+ s_waitcnt lgkmcnt(0)
+end
+
+// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+ s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+
+
+/**************************************************************************/
+/* the END */
+/**************************************************************************/
+L_END_PGM:
+ s_endpgm
+
+end
+
+
+/**************************************************************************/
+/* the helper functions */
+/**************************************************************************/
+
+//Only for save hwreg to mem
+function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+end
+
+
+// HWREG are saved before SGPRs, so all HWREG could be use.
+function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc
+end
+
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+end
+
+
+
+function get_lds_size_bytes(s_lds_size_byte)
+ // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
+ s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) // lds_size
+ s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible
+end
+
+function get_sgpr_size_bytes(s_sgpr_size_byte)
+ s_getreg_b32 s_sgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_sgpr_size_byte, s_sgpr_size_byte, 1
+ s_lshl_b32 s_sgpr_size_byte, s_sgpr_size_byte, 6 //Number of SGPRs = (sgpr_size + 1) * 16 *4 (non-zero value)
+end
+
+function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+end
+
+
+#endif
+
+static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0xbf820001, 0xbf820123,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+ 0x00000400, 0xbf850011,
+ 0xc00a1e37, 0x00000000,
+ 0xbf8c007f, 0x87777978,
+ 0xbf840002, 0xb974f802,
+ 0xbe801d78, 0xb8f5f803,
+ 0x8675ff75, 0x000001ff,
+ 0xbf850002, 0x80708470,
+ 0x82718071, 0x8671ff71,
+ 0x0000ffff, 0xb974f802,
+ 0xbe801f70, 0xb8f5f803,
+ 0x8675ff75, 0x00000100,
+ 0xbf840006, 0xbefa0080,
+ 0xb97a0203, 0x8671ff71,
+ 0x0000ffff, 0x80f08870,
+ 0x82f18071, 0xbefa0080,
+ 0xb97a0283, 0xbef60068,
+ 0xbef70069, 0xb8fa1c07,
+ 0x8e7a9c7a, 0x87717a71,
+ 0xb8fa03c7, 0x8e7a9b7a,
+ 0x87717a71, 0xb8faf807,
+ 0x867aff7a, 0x00007fff,
+ 0xb97af807, 0xbef2007e,
+ 0xbef3007f, 0xbefe0180,
+ 0xbf900004, 0xbf8e0002,
+ 0xbf88fffe, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+ 0x00807fac, 0x867aff7f,
+ 0x08000000, 0x8f7a837a,
+ 0x877b7a7b, 0x867aff7f,
+ 0x70000000, 0x8f7a817a,
+ 0x877b7a7b, 0xbeef007c,
+ 0xbeee0080, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x806e7a6e,
+ 0xbefa0084, 0xbefa00ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc006e, 0xc0611bfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611c3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611c7c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611cbc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611cfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611d3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xb8f5f803,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611d7c, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611dbc, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611dfc, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xb8eff801, 0xbefe007c,
+ 0xbefc006e, 0xc0611bfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611b3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611b7c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbef30080,
+ 0x8773737a, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8f51605, 0x80758175,
+ 0x8e758475, 0x8e7a8275,
+ 0xbefa00ff, 0x01000000,
+ 0xbef60178, 0x80786e78,
+ 0x82798079, 0xbefc0080,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003c, 0x00000000,
+ 0xc06b013c, 0x00000010,
+ 0xc06b023c, 0x00000020,
+ 0xc06b033c, 0x00000030,
+ 0x8078c078, 0x82798079,
+ 0x807c907c, 0xbf0a757c,
+ 0xbf85ffeb, 0xbef80176,
+ 0xbeee0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbefa00ff,
+ 0x01000000, 0xe0724000,
+ 0x6e1e0000, 0xe0724100,
+ 0x6e1e0100, 0xe0724200,
+ 0x6e1e0200, 0xe0724300,
+ 0x6e1e0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f54306,
+ 0x8675c175, 0xbf84002c,
+ 0xbf8a0000, 0x867aff73,
+ 0x04000000, 0xbf840028,
+ 0x8e758675, 0x8e758275,
+ 0xbefa0075, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x806e7a6e,
+ 0x806eff6e, 0x00000080,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe80007b,
+ 0x867bff7b, 0xff7fffff,
+ 0x877bff7b, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8c007f, 0xe0765000,
+ 0x6e1e0002, 0x32040702,
+ 0xd0c9006a, 0x0000eb02,
+ 0xbf87fff7, 0xbefb0000,
+ 0xbeee00ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f52a05, 0x80758175,
+ 0x8e758275, 0x8e7a8875,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0084, 0xbf0a757c,
+ 0xbf840015, 0xbf11017c,
+ 0x8075ff75, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x6e1e0000,
+ 0xe0724100, 0x6e1e0100,
+ 0xe0724200, 0x6e1e0200,
+ 0xe0724300, 0x6e1e0300,
+ 0x807c847c, 0x806eff6e,
+ 0x00000400, 0xbf0a757c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200ca, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+ 0x00807fac, 0x8676ff7f,
+ 0x08000000, 0x8f768376,
+ 0x877b767b, 0x8676ff7f,
+ 0x70000000, 0x8f768176,
+ 0x877b767b, 0x8676ff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f34306, 0x8673c173,
+ 0xbf840019, 0x8e738673,
+ 0x8e738273, 0xbefa0073,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0x8072ff72,
+ 0x00000080, 0xbefa00ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x721e0000,
+ 0xe0510100, 0x721e0000,
+ 0x807cff7c, 0x00000200,
+ 0x8072ff72, 0x00000200,
+ 0xbf0a737c, 0xbf85fff6,
+ 0xbef20080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f32a05,
+ 0x80738173, 0x8e738273,
+ 0x8e7a8873, 0xbefa00ff,
+ 0x01000000, 0xbef60072,
+ 0x8072ff72, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x8073ff73, 0x00008000,
+ 0xe0524000, 0x721e0000,
+ 0xe0524100, 0x721e0100,
+ 0xe0524200, 0x721e0200,
+ 0xe0524300, 0x721e0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8072ff72, 0x00000400,
+ 0xbf0a737c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x761e0000, 0xe0524100,
+ 0x761e0100, 0xe0524200,
+ 0x761e0200, 0xe0524300,
+ 0x761e0300, 0xb8f22a05,
+ 0x80728172, 0x8e728a72,
+ 0xb8f61605, 0x80768176,
+ 0x8e768676, 0x80727672,
+ 0x80f2c072, 0xb8f31605,
+ 0x80738173, 0x8e738473,
+ 0x8e7a8273, 0xbefa00ff,
+ 0x01000000, 0xbefc0073,
+ 0xc031003c, 0x00000072,
+ 0x80f2c072, 0xbf8c007f,
+ 0x80fc907c, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff1, 0xb8f22a05,
+ 0x80728172, 0x8e728a72,
+ 0xb8f61605, 0x80768176,
+ 0x8e768676, 0x80727672,
+ 0xbefa0084, 0xbefa00ff,
+ 0x01000000, 0xc0211cfc,
+ 0x00000072, 0x80728472,
+ 0xc0211c3c, 0x00000072,
+ 0x80728472, 0xc0211c7c,
+ 0x00000072, 0x80728472,
+ 0xc0211bbc, 0x00000072,
+ 0x80728472, 0xc0211bfc,
+ 0x00000072, 0x80728472,
+ 0xc0211d3c, 0x00000072,
+ 0x80728472, 0xc0211d7c,
+ 0x00000072, 0x80728472,
+ 0xc0211a3c, 0x00000072,
+ 0x80728472, 0xc0211a7c,
+ 0x00000072, 0x80728472,
+ 0xc0211dfc, 0x00000072,
+ 0x80728472, 0xc0211b3c,
+ 0x00000072, 0x80728472,
+ 0xc0211b7c, 0x00000072,
+ 0x80728472, 0xbf8c007f,
+ 0x8671ff71, 0x0000ffff,
+ 0xbefc0073, 0xbefe006e,
+ 0xbeff006f, 0x867375ff,
+ 0x000003ff, 0xb9734803,
+ 0x867375ff, 0xfffff800,
+ 0x8f738b73, 0xb973a2c3,
+ 0xb977f801, 0x8673ff71,
+ 0xf0000000, 0x8f739c73,
+ 0x8e739073, 0xbef60080,
+ 0x87767376, 0x8673ff71,
+ 0x08000000, 0x8f739b73,
+ 0x8e738f73, 0x87767376,
+ 0x8673ff74, 0x00800000,
+ 0x8f739773, 0xb976f807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb974f802, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
+};
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 505d391..cd679cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -117,7 +118,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
return -EPERM;
}
- process = kfd_create_process(current);
+ process = kfd_create_process(filep);
if (IS_ERR(process))
return PTR_ERR(process);
@@ -206,6 +207,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
+ q_properties->ctl_stack_size = args->ctl_stack_size;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
@@ -431,6 +433,38 @@ out:
return err;
}
+static int kfd_ioctl_set_trap_handler(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_set_trap_handler_args *args = data;
+ struct kfd_dev *dev;
+ int err = 0;
+ struct kfd_process_device *pdd;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = -ESRCH;
+ goto out;
+ }
+
+ if (dev->dqm->ops.set_trap_handler(dev->dqm,
+ &pdd->qpd,
+ args->tba_addr,
+ args->tma_addr))
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&p->mutex);
+
+ return err;
+}
+
static int kfd_ioctl_dbg_register(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -493,7 +527,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status;
dev = kfd_device_by_id(args->gpu_id);
- if (!dev)
+ if (!dev || !dev->dbgmgr)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -792,12 +826,155 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
return 0;
}
+static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_process_apertures_new_args *args = data;
+ struct kfd_process_device_apertures *pa;
+ struct kfd_process_device *pdd;
+ uint32_t nodes = 0;
+ int ret;
+
+ dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+
+ if (args->num_of_nodes == 0) {
+ /* Return number of nodes, so that user space can alloacate
+ * sufficient memory
+ */
+ mutex_lock(&p->mutex);
+
+ if (!kfd_has_process_device_data(p))
+ goto out_unlock;
+
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+ do {
+ args->num_of_nodes++;
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd);
+
+ goto out_unlock;
+ }
+
+ /* Fill in process-aperture information for all available
+ * nodes, but not more than args->num_of_nodes as that is
+ * the amount of memory allocated by user
+ */
+ pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
+ args->num_of_nodes), GFP_KERNEL);
+ if (!pa)
+ return -ENOMEM;
+
+ mutex_lock(&p->mutex);
+
+ if (!kfd_has_process_device_data(p)) {
+ args->num_of_nodes = 0;
+ kfree(pa);
+ goto out_unlock;
+ }
+
+ /* Run over all pdd of the process */
+ pdd = kfd_get_first_process_device_data(p);
+ do {
+ pa[nodes].gpu_id = pdd->dev->id;
+ pa[nodes].lds_base = pdd->lds_base;
+ pa[nodes].lds_limit = pdd->lds_limit;
+ pa[nodes].gpuvm_base = pdd->gpuvm_base;
+ pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
+ pa[nodes].scratch_base = pdd->scratch_base;
+ pa[nodes].scratch_limit = pdd->scratch_limit;
+
+ dev_dbg(kfd_device,
+ "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device,
+ "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device,
+ "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device,
+ "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device,
+ "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device,
+ "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+ nodes++;
+
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd && (nodes < args->num_of_nodes));
+ mutex_unlock(&p->mutex);
+
+ args->num_of_nodes = nodes;
+ ret = copy_to_user(
+ (void __user *)args->kfd_process_device_apertures_ptr,
+ pa,
+ (nodes * sizeof(struct kfd_process_device_apertures)));
+ kfree(pa);
+ return ret ? -EFAULT : 0;
+
+out_unlock:
+ mutex_unlock(&p->mutex);
+ return 0;
+}
+
static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_event_args *args = data;
int err;
+ /* For dGPUs the event page is allocated in user mode. The
+ * handle is passed to KFD with the first call to this IOCTL
+ * through the event_page_offset field.
+ */
+ if (args->event_page_offset) {
+ struct kfd_dev *kfd;
+ struct kfd_process_device *pdd;
+ void *mem, *kern_addr;
+ uint64_t size;
+
+ if (p->signal_page) {
+ pr_err("Event page is already set\n");
+ return -EINVAL;
+ }
+
+ kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
+ if (!kfd) {
+ pr_err("Getting device by id failed in %s\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&p->mutex);
+ pdd = kfd_bind_process_to_device(kfd, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto out_unlock;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->event_page_offset));
+ if (!mem) {
+ pr_err("Can't find BO, offset is 0x%llx\n",
+ args->event_page_offset);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ mutex_unlock(&p->mutex);
+
+ err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+ mem, &kern_addr, &size);
+ if (err) {
+ pr_err("Failed to map event page to kernel\n");
+ return err;
+ }
+
+ err = kfd_event_page_set(p, kern_addr, size);
+ if (err) {
+ pr_err("Failed to set event page\n");
+ return err;
+ }
+ }
+
err = kfd_event_create(filp, p, args->event_type,
args->auto_reset != 0, args->node_id,
&args->event_id, &args->event_trigger_data,
@@ -805,6 +982,10 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
&args->event_slot_index);
return err;
+
+out_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
}
static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
@@ -868,7 +1049,8 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
- if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0)
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
+ pdd->qpd.vmid != 0)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -921,6 +1103,371 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
return 0;
}
+static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
+ void *data)
+{
+ struct kfd_ioctl_acquire_vm_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ struct file *drm_file;
+ int ret;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ drm_file = fget(args->drm_fd);
+ if (!drm_file)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (pdd->drm_file) {
+ ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
+ goto err_unlock;
+ }
+
+ ret = kfd_process_device_init_vm(pdd, drm_file);
+ if (ret)
+ goto err_unlock;
+ /* On success, the PDD keeps the drm_file reference */
+ mutex_unlock(&p->mutex);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&p->mutex);
+ fput(drm_file);
+ return ret;
+}
+
+bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+{
+ struct kfd_local_mem_info mem_info;
+
+ if (debug_largebar) {
+ pr_debug("Simulate large-bar allocation on non large-bar machine\n");
+ return true;
+ }
+
+ if (dev->device_info->needs_iommu_device)
+ return false;
+
+ dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+ if (mem_info.local_mem_size_private == 0 &&
+ mem_info.local_mem_size_public > 0)
+ return true;
+ return false;
+}
+
+static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
+ struct kfd_process_device *pdd;
+ void *mem;
+ struct kfd_dev *dev;
+ int idr_handle;
+ long err;
+ uint64_t offset = args->mmap_offset;
+ uint32_t flags = args->flags;
+
+ if (args->size == 0)
+ return -EINVAL;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
+ (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
+ !kfd_dev_is_large_bar(dev)) {
+ pr_err("Alloc host visible vram on small bar is not allowed\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto err_unlock;
+ }
+
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+ pdd->vm, (struct kgd_mem **) &mem, &offset,
+ flags);
+
+ if (err)
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+ if (idr_handle < 0) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+ args->mmap_offset = offset;
+
+ return 0;
+
+err_free:
+ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_free_memory_of_gpu_args *args = data;
+ struct kfd_process_device *pdd;
+ void *mem;
+ struct kfd_dev *dev;
+ int ret;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ mem = kfd_process_device_translate_handle(
+ pdd, GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+
+ /* If freeing the buffer failed, leave the handle in place for
+ * clean-up during process tear-down.
+ */
+ if (!ret)
+ kfd_process_device_remove_obj_handle(
+ pdd, GET_IDR_HANDLE(args->handle));
+
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return ret;
+}
+
+static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_map_memory_to_gpu_args *args = data;
+ struct kfd_process_device *pdd, *peer_pdd;
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+ int i;
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ if (!args->n_devices) {
+ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+ if (args->n_success > args->n_devices) {
+ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+ devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr),
+ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+ (void __user *)args->device_ids_array_ptr,
+ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+ }
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto bind_process_to_device_failed;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ err = -ENOMEM;
+ goto get_mem_obj_from_handle_failed;
+ }
+
+ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ pr_debug("Getting device by id failed for 0x%x\n",
+ devices_arr[i]);
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+
+ peer_pdd = kfd_bind_process_to_device(peer, p);
+ if (IS_ERR(peer_pdd)) {
+ err = PTR_ERR(peer_pdd);
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = peer->kfd2kgd->map_memory_to_gpu(
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to map to gpu %d/%d\n",
+ i, args->n_devices);
+ goto map_memory_to_gpu_failed;
+ }
+ args->n_success = i+1;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+ if (err) {
+ pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ goto sync_memory_failed;
+ }
+
+ /* Flush TLBs after waiting for the page table updates to complete */
+ for (i = 0; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (WARN_ON_ONCE(!peer))
+ continue;
+ peer_pdd = kfd_get_process_device_data(peer, p);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd);
+ }
+
+ kfree(devices_arr);
+
+ return err;
+
+bind_process_to_device_failed:
+get_mem_obj_from_handle_failed:
+map_memory_to_gpu_failed:
+ mutex_unlock(&p->mutex);
+copy_from_user_failed:
+sync_memory_failed:
+ kfree(devices_arr);
+
+ return err;
+}
+
+static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
+ struct kfd_process_device *pdd, *peer_pdd;
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+ uint32_t *devices_arr = NULL, i;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ if (!args->n_devices) {
+ pr_debug("Device IDs array empty\n");
+ return -EINVAL;
+ }
+ if (args->n_success > args->n_devices) {
+ pr_debug("n_success exceeds n_devices\n");
+ return -EINVAL;
+ }
+
+ devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr),
+ GFP_KERNEL);
+ if (!devices_arr)
+ return -ENOMEM;
+
+ err = copy_from_user(devices_arr,
+ (void __user *)args->device_ids_array_ptr,
+ args->n_devices * sizeof(*devices_arr));
+ if (err != 0) {
+ err = -EFAULT;
+ goto copy_from_user_failed;
+ }
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ err = PTR_ERR(pdd);
+ goto bind_process_to_device_failed;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ err = -ENOMEM;
+ goto get_mem_obj_from_handle_failed;
+ }
+
+ for (i = args->n_success; i < args->n_devices; i++) {
+ peer = kfd_device_by_id(devices_arr[i]);
+ if (!peer) {
+ err = -EINVAL;
+ goto get_mem_obj_from_handle_failed;
+ }
+
+ peer_pdd = kfd_get_process_device_data(peer, p);
+ if (!peer_pdd) {
+ err = -ENODEV;
+ goto get_mem_obj_from_handle_failed;
+ }
+ err = dev->kfd2kgd->unmap_memory_to_gpu(
+ peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
+ if (err) {
+ pr_err("Failed to unmap from gpu %d/%d\n",
+ i, args->n_devices);
+ goto unmap_memory_from_gpu_failed;
+ }
+ args->n_success = i+1;
+ }
+ kfree(devices_arr);
+
+ mutex_unlock(&p->mutex);
+
+ return 0;
+
+bind_process_to_device_failed:
+get_mem_obj_from_handle_failed:
+unmap_memory_from_gpu_failed:
+ mutex_unlock(&p->mutex);
+copy_from_user_failed:
+ kfree(devices_arr);
+ return err;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -979,7 +1526,29 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_scratch_backing_va, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
- kfd_ioctl_get_tile_config, 0)
+ kfd_ioctl_get_tile_config, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+ kfd_ioctl_set_trap_handler, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
+ kfd_ioctl_get_process_apertures_new, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
+ kfd_ioctl_acquire_vm, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
+ kfd_ioctl_alloc_memory_of_gpu, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
+ kfd_ioctl_free_memory_of_gpu, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
+ kfd_ioctl_map_memory_to_gpu, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
+ kfd_ioctl_unmap_memory_from_gpu, 0),
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -1088,6 +1657,10 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
KFD_MMAP_EVENTS_MASK) {
vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
return kfd_event_mmap(process, vma);
+ } else if ((vma->vm_pgoff & KFD_MMAP_RESERVED_MEM_MASK) ==
+ KFD_MMAP_RESERVED_MEM_MASK) {
+ vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_RESERVED_MEM_MASK;
+ return kfd_reserved_mem_mmap(process, vma);
}
return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
new file mode 100644
index 0000000..4f126ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -0,0 +1,1262 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include "kfd_crat.h"
+#include "kfd_priv.h"
+#include "kfd_topology.h"
+#include "kfd_iommu.h"
+
+/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
+ * GPU processor ID are expressed with Bit[31]=1.
+ * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
+ * used in the CRAT.
+ */
+static uint32_t gpu_processor_id_low = 0x80001000;
+
+/* Return the next available gpu_processor_id and increment it for next GPU
+ * @total_cu_count - Total CUs present in the GPU including ones
+ * masked off
+ */
+static inline unsigned int get_and_inc_gpu_processor_id(
+ unsigned int total_cu_count)
+{
+ int current_id = gpu_processor_id_low;
+
+ gpu_processor_id_low += total_cu_count;
+ return current_id;
+}
+
+/* Static table to describe GPU Cache information */
+struct kfd_gpu_cache_info {
+ uint32_t cache_size;
+ uint32_t cache_level;
+ uint32_t flags;
+ /* Indicates how many Compute Units share this cache
+ * Value = 1 indicates the cache is not shared
+ */
+ uint32_t num_cu_shared;
+};
+
+static struct kfd_gpu_cache_info kaveri_cache_info[] = {
+ {
+ /* TCP L1 Cache per CU */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 1,
+
+ },
+ {
+ /* Scalar L1 Instruction Cache (in SQC module) per bank */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 2,
+ },
+ {
+ /* Scalar L1 Data Cache (in SQC module) per bank */
+ .cache_size = 8,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 2,
+ },
+
+ /* TODO: Add L2 Cache information */
+};
+
+
+static struct kfd_gpu_cache_info carrizo_cache_info[] = {
+ {
+ /* TCP L1 Cache per CU */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 1,
+ },
+ {
+ /* Scalar L1 Instruction Cache (in SQC module) per bank */
+ .cache_size = 8,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 4,
+ },
+ {
+ /* Scalar L1 Data Cache (in SQC module) per bank. */
+ .cache_size = 4,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 4,
+ },
+
+ /* TODO: Add L2 Cache information */
+};
+
+/* NOTE: In future if more information is added to struct kfd_gpu_cache_info
+ * the following ASICs may need a separate table.
+ */
+#define hawaii_cache_info kaveri_cache_info
+#define tonga_cache_info carrizo_cache_info
+#define fiji_cache_info carrizo_cache_info
+#define polaris10_cache_info carrizo_cache_info
+#define polaris11_cache_info carrizo_cache_info
+
+static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+ dev->node_props.cpu_core_id_base = cu->processor_id_low;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+ pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
+ cu->processor_id_low);
+}
+
+static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ dev->node_props.simd_id_base = cu->processor_id_low;
+ dev->node_props.simd_count = cu->num_simd_cores;
+ dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+ dev->node_props.max_waves_per_simd = cu->max_waves_simd;
+ dev->node_props.wave_front_size = cu->wave_front_size;
+ dev->node_props.array_count = cu->array_count;
+ dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
+ dev->node_props.simd_per_cu = cu->num_simd_per_cu;
+ dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
+ dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
+ pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
+}
+
+/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
+ struct list_head *device_list)
+{
+ struct kfd_topology_device *dev;
+
+ pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+ cu->proximity_domain, cu->hsa_capability);
+ list_for_each_entry(dev, device_list, list) {
+ if (cu->proximity_domain == dev->proximity_domain) {
+ if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
+ kfd_populated_cu_info_cpu(dev, cu);
+
+ if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
+ kfd_populated_cu_info_gpu(dev, cu);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
+ struct list_head *device_list)
+{
+ struct kfd_mem_properties *props;
+ struct kfd_topology_device *dev;
+
+ pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
+ mem->proximity_domain);
+ list_for_each_entry(dev, device_list, list) {
+ if (mem->proximity_domain == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ /* We're on GPU node */
+ if (dev->node_props.cpu_cores_count == 0) {
+ /* APU */
+ if (mem->visibility_type == 0)
+ props->heap_type =
+ HSA_MEM_HEAP_TYPE_FB_PRIVATE;
+ /* dGPU */
+ else
+ props->heap_type = mem->visibility_type;
+ } else
+ props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+
+ if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
+ props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
+ props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+
+ props->size_in_bytes =
+ ((uint64_t)mem->length_high << 32) +
+ mem->length_low;
+ props->width = mem->width;
+
+ dev->node_props.mem_banks_count++;
+ list_add_tail(&props->list, &dev->mem_props);
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+ struct list_head *device_list)
+{
+ struct kfd_cache_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t id;
+ uint32_t total_num_of_cu;
+
+ id = cache->processor_id_low;
+
+ pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, device_list, list) {
+ total_num_of_cu = (dev->node_props.array_count *
+ dev->node_props.cu_per_simd_array);
+
+ /* Cache infomration in CRAT doesn't have proximity_domain
+ * information as it is associated with a CPU core or GPU
+ * Compute Unit. So map the cache using CPU core Id or SIMD
+ * (GPU) ID.
+ * TODO: This works because currently we can safely assume that
+ * Compute Units are parsed before caches are parsed. In
+ * future, remove this dependency
+ */
+ if ((id >= dev->node_props.cpu_core_id_base &&
+ id <= dev->node_props.cpu_core_id_base +
+ dev->node_props.cpu_cores_count) ||
+ (id >= dev->node_props.simd_id_base &&
+ id < dev->node_props.simd_id_base +
+ total_num_of_cu)) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->processor_id_low = id;
+ props->cache_level = cache->cache_level;
+ props->cache_size = cache->cache_size;
+ props->cacheline_size = cache->cache_line_size;
+ props->cachelines_per_tag = cache->lines_per_tag;
+ props->cache_assoc = cache->associativity;
+ props->cache_latency = cache->cache_latency;
+ memcpy(props->sibling_map, cache->sibling_map,
+ sizeof(props->sibling_map));
+
+ if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_DATA;
+ if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
+ if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_CPU;
+ if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_HSACU;
+
+ dev->cache_count++;
+ dev->node_props.caches_count++;
+ list_add_tail(&props->list, &dev->cache_props);
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ struct list_head *device_list)
+{
+ struct kfd_iolink_properties *props = NULL, *props2;
+ struct kfd_topology_device *dev, *cpu_dev;
+ uint32_t id_from;
+ uint32_t id_to;
+
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+ pr_debug("Found IO link entry in CRAT table with id_from=%d\n",
+ id_from);
+ list_for_each_entry(dev, device_list, list) {
+ if (id_from == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->node_from = id_from;
+ props->node_to = id_to;
+ props->ver_maj = iolink->version_major;
+ props->ver_min = iolink->version_minor;
+ props->iolink_type = iolink->io_interface_type;
+
+ if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
+ props->weight = 20;
+ else
+ props->weight = node_distance(id_from, id_to);
+
+ props->min_latency = iolink->minimum_latency;
+ props->max_latency = iolink->maximum_latency;
+ props->min_bandwidth = iolink->minimum_bandwidth_mbs;
+ props->max_bandwidth = iolink->maximum_bandwidth_mbs;
+ props->rec_transfer_size =
+ iolink->recommended_transfer_size;
+
+ dev->io_link_count++;
+ dev->node_props.io_links_count++;
+ list_add_tail(&props->list, &dev->io_link_props);
+ break;
+ }
+ }
+
+ /* CPU topology is created before GPUs are detected, so CPU->GPU
+ * links are not built at that time. If a PCIe type is discovered, it
+ * means a GPU is detected and we are adding GPU->CPU to the topology.
+ * At this time, also add the corresponded CPU->GPU link.
+ */
+ if (props && props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) {
+ cpu_dev = kfd_topology_device_by_proximity_domain(id_to);
+ if (!cpu_dev)
+ return -ENODEV;
+ /* same everything but the other direction */
+ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ props2->node_from = id_to;
+ props2->node_to = id_from;
+ props2->kobj = NULL;
+ cpu_dev->io_link_count++;
+ cpu_dev->node_props.io_links_count++;
+ list_add_tail(&props2->list, &cpu_dev->io_link_props);
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
+ * present in the device_list
+ * @sub_type_hdr - subtype section of crat_image
+ * @device_list - list of topology devices present in this crat_image
+ */
+static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ struct list_head *device_list)
+{
+ struct crat_subtype_computeunit *cu;
+ struct crat_subtype_memory *mem;
+ struct crat_subtype_cache *cache;
+ struct crat_subtype_iolink *iolink;
+ int ret = 0;
+
+ switch (sub_type_hdr->type) {
+ case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ ret = kfd_parse_subtype_cu(cu, device_list);
+ break;
+ case CRAT_SUBTYPE_MEMORY_AFFINITY:
+ mem = (struct crat_subtype_memory *)sub_type_hdr;
+ ret = kfd_parse_subtype_mem(mem, device_list);
+ break;
+ case CRAT_SUBTYPE_CACHE_AFFINITY:
+ cache = (struct crat_subtype_cache *)sub_type_hdr;
+ ret = kfd_parse_subtype_cache(cache, device_list);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_debug("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+ iolink = (struct crat_subtype_iolink *)sub_type_hdr;
+ ret = kfd_parse_subtype_iolink(iolink, device_list);
+ break;
+ default:
+ pr_warn("Unknown subtype %d in CRAT\n",
+ sub_type_hdr->type);
+ }
+
+ return ret;
+}
+
+/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
+ * create a kfd_topology_device and add in to device_list. Also parse
+ * CRAT subtypes and attach it to appropriate kfd_topology_device
+ * @crat_image - input image containing CRAT
+ * @device_list - [OUT] list of kfd_topology_device generated after
+ * parsing crat_image
+ * @proximity_domain - Proximity domain of the first device in the table
+ *
+ * Return - 0 if successful else -ve value
+ */
+int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+ uint32_t proximity_domain)
+{
+ struct kfd_topology_device *top_dev = NULL;
+ struct crat_subtype_generic *sub_type_hdr;
+ uint16_t node_id;
+ int ret = 0;
+ struct crat_header *crat_table = (struct crat_header *)crat_image;
+ uint16_t num_nodes;
+ uint32_t image_len;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ if (!list_empty(device_list)) {
+ pr_warn("Error device list should be empty\n");
+ return -EINVAL;
+ }
+
+ num_nodes = crat_table->num_domains;
+ image_len = crat_table->length;
+
+ pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+
+ for (node_id = 0; node_id < num_nodes; node_id++) {
+ top_dev = kfd_create_topology_device(device_list);
+ if (!top_dev)
+ break;
+ top_dev->proximity_domain = proximity_domain++;
+ }
+
+ if (!top_dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
+ memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
+ CRAT_OEMTABLEID_LENGTH);
+ top_dev->oem_revision = crat_table->oem_revision;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+ while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
+ ((char *)crat_image) + image_len) {
+ if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
+ ret = kfd_parse_subtype(sub_type_hdr, device_list);
+ if (ret)
+ break;
+ }
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ }
+
+err:
+ if (ret)
+ kfd_release_topology_device_list(device_list);
+
+ return ret;
+}
+
+/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
+static int fill_in_pcache(struct crat_subtype_cache *pcache,
+ struct kfd_gpu_cache_info *pcache_info,
+ struct kfd_cu_info *cu_info,
+ int mem_available,
+ int cu_bitmask,
+ int cache_type, unsigned int cu_processor_id,
+ int cu_block)
+{
+ unsigned int cu_sibling_map_mask;
+ int first_active_cu;
+
+ /* First check if enough memory is available */
+ if (sizeof(struct crat_subtype_cache) > mem_available)
+ return -ENOMEM;
+
+ cu_sibling_map_mask = cu_bitmask;
+ cu_sibling_map_mask >>= cu_block;
+ cu_sibling_map_mask &=
+ ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ first_active_cu = ffs(cu_sibling_map_mask);
+
+ /* CU could be inactive. In case of shared cache find the first active
+ * CU. and incase of non-shared cache check if the CU is inactive. If
+ * inactive active skip it
+ */
+ if (first_active_cu) {
+ memset(pcache, 0, sizeof(struct crat_subtype_cache));
+ pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
+ pcache->length = sizeof(struct crat_subtype_cache);
+ pcache->flags = pcache_info[cache_type].flags;
+ pcache->processor_id_low = cu_processor_id
+ + (first_active_cu - 1);
+ pcache->cache_level = pcache_info[cache_type].cache_level;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+
+ /* Sibling map is w.r.t processor_id_low, so shift out
+ * inactive CU
+ */
+ cu_sibling_map_mask =
+ cu_sibling_map_mask >> (first_active_cu - 1);
+
+ pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+ pcache->sibling_map[1] =
+ (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+ pcache->sibling_map[2] =
+ (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+ pcache->sibling_map[3] =
+ (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+ return 0;
+ }
+ return 1;
+}
+
+/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
+ * tables
+ *
+ * @kdev - [IN] GPU device
+ * @gpu_processor_id - [IN] GPU processor ID to which these caches
+ * associate
+ * @available_size - [IN] Amount of memory available in pcache
+ * @cu_info - [IN] Compute Unit info obtained from KGD
+ * @pcache - [OUT] memory into which cache data is to be filled in.
+ * @size_filled - [OUT] amount of data used up in pcache.
+ * @num_of_entries - [OUT] number of caches added
+ */
+static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ int gpu_processor_id,
+ int available_size,
+ struct kfd_cu_info *cu_info,
+ struct crat_subtype_cache *pcache,
+ int *size_filled,
+ int *num_of_entries)
+{
+ struct kfd_gpu_cache_info *pcache_info;
+ int num_of_cache_types = 0;
+ int i, j, k;
+ int ct = 0;
+ int mem_available = available_size;
+ unsigned int cu_processor_id;
+ int ret;
+
+ switch (kdev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ pcache_info = kaveri_cache_info;
+ num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
+ break;
+ case CHIP_HAWAII:
+ pcache_info = hawaii_cache_info;
+ num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
+ break;
+ case CHIP_CARRIZO:
+ pcache_info = carrizo_cache_info;
+ num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
+ break;
+ case CHIP_TONGA:
+ pcache_info = tonga_cache_info;
+ num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
+ break;
+ case CHIP_FIJI:
+ pcache_info = fiji_cache_info;
+ num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
+ break;
+ case CHIP_POLARIS10:
+ pcache_info = polaris10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
+ break;
+ case CHIP_POLARIS11:
+ pcache_info = polaris11_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *size_filled = 0;
+ *num_of_entries = 0;
+
+ /* For each type of cache listed in the kfd_gpu_cache_info table,
+ * go through all available Compute Units.
+ * The [i,j,k] loop will
+ * if kfd_gpu_cache_info.num_cu_shared = 1
+ * will parse through all available CU
+ * If (kfd_gpu_cache_info.num_cu_shared != 1)
+ * then it will consider only one CU from
+ * the shared unit
+ */
+
+ for (ct = 0; ct < num_of_cache_types; ct++) {
+ cu_processor_id = gpu_processor_id;
+ for (i = 0; i < cu_info->num_shader_engines; i++) {
+ for (j = 0; j < cu_info->num_shader_arrays_per_engine;
+ j++) {
+ for (k = 0; k < cu_info->num_cu_per_sh;
+ k += pcache_info[ct].num_cu_shared) {
+
+ ret = fill_in_pcache(pcache,
+ pcache_info,
+ cu_info,
+ mem_available,
+ cu_info->cu_bitmap[i][j],
+ ct,
+ cu_processor_id,
+ k);
+
+ if (ret < 0)
+ break;
+
+ if (!ret) {
+ pcache++;
+ (*num_of_entries)++;
+ mem_available -=
+ sizeof(*pcache);
+ (*size_filled) +=
+ sizeof(*pcache);
+ }
+
+ /* Move to next CU block */
+ cu_processor_id +=
+ pcache_info[ct].num_cu_shared;
+ }
+ }
+ }
+ }
+
+ pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
+
+ return 0;
+}
+
+/*
+ * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
+ * copies CRAT from ACPI (if available).
+ * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
+ *
+ * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
+ * crat_image will be NULL
+ * @size: [OUT] size of crat_image
+ *
+ * Return 0 if successful else return error code
+ */
+int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+{
+ struct acpi_table_header *crat_table;
+ acpi_status status;
+ void *pcrat_image;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ *crat_image = NULL;
+
+ /* Fetch the CRAT table from ACPI */
+ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+ if (status == AE_NOT_FOUND) {
+ pr_warn("CRAT table not found\n");
+ return -ENODATA;
+ } else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+
+ pr_err("CRAT table error: %s\n", err);
+ return -EINVAL;
+ }
+
+ if (ignore_crat) {
+ pr_info("CRAT table disabled by module option\n");
+ return -ENODATA;
+ }
+
+ pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+
+ memcpy(pcrat_image, crat_table, crat_table->length);
+
+ *crat_image = pcrat_image;
+ *size = crat_table->length;
+
+ return 0;
+}
+
+/* Memory required to create Virtual CRAT.
+ * Since there is no easy way to predict the amount of memory required, the
+ * following amount are allocated for CPU and GPU Virtual CRAT. This is
+ * expected to cover all known conditions. But to be safe additional check
+ * is put in the code to ensure we don't overwrite.
+ */
+#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
+#define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE)
+
+/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
+ *
+ * @numa_node_id: CPU NUMA node id
+ * @avail_size: Available size in the memory
+ * @sub_type_hdr: Memory into which compute info will be filled in
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
+ int proximity_domain,
+ struct crat_subtype_computeunit *sub_type_hdr)
+{
+ const struct cpumask *cpumask;
+
+ *avail_size -= sizeof(struct crat_subtype_computeunit);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ cpumask = cpumask_of_node(numa_node_id);
+
+ /* Fill in CU data */
+ sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
+ sub_type_hdr->proximity_domain = proximity_domain;
+ sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
+ if (sub_type_hdr->processor_id_low == -1)
+ return -EINVAL;
+
+ sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
+
+ return 0;
+}
+
+/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
+ *
+ * @numa_node_id: CPU NUMA node id
+ * @avail_size: Available size in the memory
+ * @sub_type_hdr: Memory into which compute info will be filled in
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
+ int proximity_domain,
+ struct crat_subtype_memory *sub_type_hdr)
+{
+ uint64_t mem_in_bytes = 0;
+ pg_data_t *pgdat;
+ int zone_type;
+
+ *avail_size -= sizeof(struct crat_subtype_memory);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_memory);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in Memory Subunit data */
+
+ /* Unlike si_meminfo, si_meminfo_node is not exported. So
+ * the following lines are duplicated from si_meminfo_node
+ * function
+ */
+ pgdat = NODE_DATA(numa_node_id);
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
+ mem_in_bytes <<= PAGE_SHIFT;
+
+ sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
+ sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
+ sub_type_hdr->proximity_domain = proximity_domain;
+
+ return 0;
+}
+
+static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
+ uint32_t *num_entries,
+ struct crat_subtype_iolink *sub_type_hdr)
+{
+ int nid;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ uint8_t link_type;
+
+ if (c->x86_vendor == X86_VENDOR_AMD)
+ link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
+ else
+ link_type = CRAT_IOLINK_TYPE_QPI_1_1;
+
+ *num_entries = 0;
+
+ /* Create IO links from this node to other CPU nodes */
+ for_each_online_node(nid) {
+ if (nid == numa_node_id) /* node itself */
+ continue;
+
+ *avail_size -= sizeof(struct crat_subtype_iolink);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in IO link data */
+ sub_type_hdr->proximity_domain_from = numa_node_id;
+ sub_type_hdr->proximity_domain_to = nid;
+ sub_type_hdr->io_interface_type = link_type;
+
+ (*num_entries)++;
+ sub_type_hdr++;
+ }
+
+ return 0;
+}
+
+/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
+ *
+ * @pcrat_image: Fill in VCRAT for CPU
+ * @size: [IN] allocated size of crat_image.
+ * [OUT] actual size of data filled in crat_image
+ */
+static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+{
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct acpi_table_header *acpi_table;
+ acpi_status status;
+ struct crat_subtype_generic *sub_type_hdr;
+ int avail_size = *size;
+ int numa_node_id;
+ uint32_t entries = 0;
+ int ret = 0;
+
+ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+ return -EINVAL;
+
+ /* Fill in CRAT Header.
+ * Modify length and total_entries as subunits are added.
+ */
+ avail_size -= sizeof(struct crat_header);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ memset(crat_table, 0, sizeof(struct crat_header));
+ memcpy(&crat_table->signature, CRAT_SIGNATURE,
+ sizeof(crat_table->signature));
+ crat_table->length = sizeof(struct crat_header);
+
+ status = acpi_get_table("DSDT", 0, &acpi_table);
+ if (status != AE_OK)
+ pr_warn("DSDT table not found for OEM information\n");
+ else {
+ crat_table->oem_revision = acpi_table->revision;
+ memcpy(crat_table->oem_id, acpi_table->oem_id,
+ CRAT_OEMID_LENGTH);
+ memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
+ CRAT_OEMTABLEID_LENGTH);
+ }
+ crat_table->total_entries = 0;
+ crat_table->num_domains = 0;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+
+ for_each_online_node(numa_node_id) {
+ if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
+ continue;
+
+ /* Fill in Subtype: Compute Unit */
+ ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
+ crat_table->num_domains,
+ (struct crat_subtype_computeunit *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ /* Fill in Subtype: Memory */
+ ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
+ crat_table->num_domains,
+ (struct crat_subtype_memory *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ /* Fill in Subtype: IO Link */
+ ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
+ &entries,
+ (struct crat_subtype_iolink *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+
+ crat_table->num_domains++;
+ }
+
+ /* TODO: Add cache Subtype for CPU.
+ * Currently, CPU cache information is available in function
+ * detect_cache_attributes(cpu) defined in the file
+ * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
+ * exported and to get the same information the code needs to be
+ * duplicated.
+ */
+
+ *size = crat_table->length;
+ pr_info("Virtual CRAT table created for CPU\n");
+
+ return 0;
+}
+
+static int kfd_fill_gpu_memory_affinity(int *avail_size,
+ struct kfd_dev *kdev, uint8_t type, uint64_t size,
+ struct crat_subtype_memory *sub_type_hdr,
+ uint32_t proximity_domain,
+ const struct kfd_local_mem_info *local_mem_info)
+{
+ *avail_size -= sizeof(struct crat_subtype_memory);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
+ sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_memory);
+ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ sub_type_hdr->proximity_domain = proximity_domain;
+
+ pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
+ type, size);
+
+ sub_type_hdr->length_low = lower_32_bits(size);
+ sub_type_hdr->length_high = upper_32_bits(size);
+
+ sub_type_hdr->width = local_mem_info->vram_width;
+ sub_type_hdr->visibility_type = type;
+
+ return 0;
+}
+
+/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
+ * to its NUMA node
+ * @avail_size: Available size in the memory
+ * @kdev - [IN] GPU device
+ * @sub_type_hdr: Memory into which io link info will be filled in
+ * @proximity_domain - proximity domain of the GPU node
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ struct kfd_dev *kdev,
+ struct crat_subtype_iolink *sub_type_hdr,
+ uint32_t proximity_domain)
+{
+ *avail_size -= sizeof(struct crat_subtype_iolink);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in IOLINK subtype.
+ * TODO: Fill-in other fields of iolink subtype
+ */
+ sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
+ sub_type_hdr->proximity_domain_from = proximity_domain;
+#ifdef CONFIG_NUMA
+ if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
+ sub_type_hdr->proximity_domain_to = 0;
+ else
+ sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
+#else
+ sub_type_hdr->proximity_domain_to = 0;
+#endif
+ return 0;
+}
+
+/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
+ *
+ * @pcrat_image: Fill in VCRAT for GPU
+ * @size: [IN] allocated size of crat_image.
+ * [OUT] actual size of data filled in crat_image
+ */
+static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ size_t *size, struct kfd_dev *kdev,
+ uint32_t proximity_domain)
+{
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
+ struct crat_subtype_computeunit *cu;
+ struct kfd_cu_info cu_info;
+ int avail_size = *size;
+ uint32_t total_num_of_cu;
+ int num_of_cache_entries = 0;
+ int cache_mem_filled = 0;
+ int ret = 0;
+ struct kfd_local_mem_info local_mem_info;
+
+ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
+ return -EINVAL;
+
+ /* Fill the CRAT Header.
+ * Modify length and total_entries as subunits are added.
+ */
+ avail_size -= sizeof(struct crat_header);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ memset(crat_table, 0, sizeof(struct crat_header));
+
+ memcpy(&crat_table->signature, CRAT_SIGNATURE,
+ sizeof(crat_table->signature));
+ /* Change length as we add more subtypes*/
+ crat_table->length = sizeof(struct crat_header);
+ crat_table->num_domains = 1;
+ crat_table->total_entries = 0;
+
+ /* Fill in Subtype: Compute Unit
+ * First fill in the sub type header and then sub type data
+ */
+ avail_size -= sizeof(struct crat_subtype_computeunit);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
+
+ sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill CU subtype data */
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
+ cu->proximity_domain = proximity_domain;
+
+ kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+ cu->num_simd_per_cu = cu_info.simd_per_cu;
+ cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
+ cu->max_waves_simd = cu_info.max_waves_per_simd;
+
+ cu->wave_front_size = cu_info.wave_front_size;
+ cu->array_count = cu_info.num_shader_arrays_per_engine *
+ cu_info.num_shader_engines;
+ total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
+ cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
+ cu->num_cu_per_array = cu_info.num_cu_per_sh;
+ cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
+ cu->num_banks = cu_info.num_shader_engines;
+ cu->lds_size_in_kb = cu_info.lds_size;
+
+ cu->hsa_capability = 0;
+
+ /* Check if this node supports IOMMU. During parsing this flag will
+ * translate to HSA_CAP_ATS_PRESENT
+ */
+ if (!kfd_iommu_check_device(kdev))
+ cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
+
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ /* Fill in Subtype: Memory. Only on systems with large BAR (no
+ * private FB), report memory as public. On other systems
+ * report the total FB size (public+private) as a single
+ * private heap.
+ */
+ kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ if (debug_largebar)
+ local_mem_info.local_mem_size_private = 0;
+
+ if (local_mem_info.local_mem_size_private == 0)
+ ret = kfd_fill_gpu_memory_affinity(&avail_size,
+ kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
+ local_mem_info.local_mem_size_public,
+ (struct crat_subtype_memory *)sub_type_hdr,
+ proximity_domain,
+ &local_mem_info);
+ else
+ ret = kfd_fill_gpu_memory_affinity(&avail_size,
+ kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
+ local_mem_info.local_mem_size_public +
+ local_mem_info.local_mem_size_private,
+ (struct crat_subtype_memory *)sub_type_hdr,
+ proximity_domain,
+ &local_mem_info);
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += sizeof(struct crat_subtype_memory);
+ crat_table->total_entries++;
+
+ /* TODO: Fill in cache information. This information is NOT readily
+ * available in KGD
+ */
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
+ avail_size,
+ &cu_info,
+ (struct crat_subtype_cache *)sub_type_hdr,
+ &cache_mem_filled,
+ &num_of_cache_entries);
+
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += cache_mem_filled;
+ crat_table->total_entries += num_of_cache_entries;
+ avail_size -= cache_mem_filled;
+
+ /* Fill in Subtype: IO_LINKS
+ * Only direct links are added here which is Link from GPU to
+ * to its NUMA node. Indirect links are added by userspace.
+ */
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ cache_mem_filled);
+ ret = kfd_fill_gpu_direct_io_link(&avail_size, kdev,
+ (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
+
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ *size = crat_table->length;
+ pr_info("Virtual CRAT table created for GPU\n");
+
+ return ret;
+}
+
+/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
+ * creates a Virtual CRAT (VCRAT) image
+ *
+ * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
+ *
+ * @crat_image: VCRAT image created because ACPI does not have a
+ * CRAT for this device
+ * @size: [OUT] size of virtual crat_image
+ * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
+ * COMPUTE_UNIT_GPU - Create VCRAT for GPU
+ * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
+ * -- this option is not currently implemented.
+ * The assumption is that all AMD APUs will have CRAT
+ * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
+ *
+ * Return 0 if successful else return -ve value
+ */
+int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ int flags, struct kfd_dev *kdev,
+ uint32_t proximity_domain)
+{
+ void *pcrat_image = NULL;
+ int ret = 0;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ *crat_image = NULL;
+
+ /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
+ * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
+ * all the current conditions. A check is put not to overwrite beyond
+ * allocated size
+ */
+ switch (flags) {
+ case COMPUTE_UNIT_CPU:
+ pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_CPU;
+ ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
+ break;
+ case COMPUTE_UNIT_GPU:
+ if (!kdev)
+ return -EINVAL;
+ pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_GPU;
+ ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
+ proximity_domain);
+ break;
+ case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
+ /* TODO: */
+ ret = -EINVAL;
+ pr_err("VCRAT not implemented for APU\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ *crat_image = pcrat_image;
+ else
+ kfree(pcrat_image);
+
+ return ret;
+}
+
+
+/* kfd_destroy_crat_image
+ *
+ * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
+ *
+ */
+void kfd_destroy_crat_image(void *crat_image)
+{
+ kfree(crat_image);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index a374fa3..b5cd182 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -44,6 +44,10 @@
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+/* Compute Unit flags */
+#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
+#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
+
struct crat_header {
uint32_t signature;
uint32_t length;
@@ -105,7 +109,7 @@ struct crat_subtype_computeunit {
uint8_t wave_front_size;
uint8_t num_banks;
uint16_t micro_engine_id;
- uint8_t num_arrays;
+ uint8_t array_count;
uint8_t num_cu_per_array;
uint8_t num_simd_per_cu;
uint8_t max_slots_scatch_cu;
@@ -127,13 +131,14 @@ struct crat_subtype_memory {
uint8_t length;
uint16_t reserved;
uint32_t flags;
- uint32_t promixity_domain;
+ uint32_t proximity_domain;
uint32_t base_addr_low;
uint32_t base_addr_high;
uint32_t length_low;
uint32_t length_high;
uint32_t width;
- uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH];
+ uint8_t visibility_type; /* for virtual (dGPU) CRAT */
+ uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH - 1];
};
/*
@@ -222,9 +227,12 @@ struct crat_subtype_ccompute {
/*
* HSA IO Link Affinity structure and definitions
*/
-#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001
-#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002
-#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc
+#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
+#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
/*
* IO interface types
@@ -232,10 +240,18 @@ struct crat_subtype_ccompute {
#define CRAT_IOLINK_TYPE_UNDEFINED 0
#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
#define CRAT_IOLINK_TYPE_PCIEXPRESS 2
-#define CRAT_IOLINK_TYPE_OTHER 3
+#define CRAT_IOLINK_TYPE_AMBA 3
+#define CRAT_IOLINK_TYPE_MIPI 4
+#define CRAT_IOLINK_TYPE_QPI_1_1 5
+#define CRAT_IOLINK_TYPE_RESERVED1 6
+#define CRAT_IOLINK_TYPE_RESERVED2 7
+#define CRAT_IOLINK_TYPE_RAPID_IO 8
+#define CRAT_IOLINK_TYPE_INFINIBAND 9
+#define CRAT_IOLINK_TYPE_RESERVED3 10
+#define CRAT_IOLINK_TYPE_OTHER 11
#define CRAT_IOLINK_TYPE_MAX 255
-#define CRAT_IOLINK_RESERVED_LENGTH 24
+#define CRAT_IOLINK_RESERVED_LENGTH 24
struct crat_subtype_iolink {
uint8_t type;
@@ -291,4 +307,14 @@ struct cdit_header {
#pragma pack()
+struct kfd_dev;
+
+int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+void kfd_destroy_crat_image(void *crat_image);
+int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+ uint32_t proximity_domain);
+int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ int flags, struct kfd_dev *kdev,
+ uint32_t proximity_domain);
+
#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c407f6b..afb26f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -95,7 +95,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
ib_packet->bitfields3.ib_base_hi = largep->u.high_part;
ib_packet->control = (1 << 23) | (1 << 31) |
- ((size_in_bytes / sizeof(uint32_t)) & 0xfffff);
+ ((size_in_bytes / 4) & 0xfffff);
ib_packet->bitfields5.pasid = pasid;
@@ -126,8 +126,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->header.opcode = IT_RELEASE_MEM;
rm_packet->header.type = PM4_TYPE_3;
- rm_packet->header.count = sizeof(struct pm4__release_mem) /
- sizeof(unsigned int) - 2;
+ rm_packet->header.count = sizeof(struct pm4__release_mem) / 4 - 2;
rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
rm_packet->bitfields2.event_index =
@@ -652,8 +651,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[0].header.opcode = IT_SET_UCONFIG_REG;
packets_vec[0].header.type = PM4_TYPE_3;
packets_vec[0].bitfields2.reg_offset =
- GRBM_GFX_INDEX / (sizeof(uint32_t)) -
- USERCONFIG_REG_BASE;
+ GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
packets_vec[0].bitfields2.insert_vmid = 0;
packets_vec[0].reg_data[0] = reg_gfx_index.u32All;
@@ -661,8 +659,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[1].header.count = 1;
packets_vec[1].header.opcode = IT_SET_CONFIG_REG;
packets_vec[1].header.type = PM4_TYPE_3;
- packets_vec[1].bitfields2.reg_offset = SQ_CMD / (sizeof(uint32_t)) -
- AMD_CONFIG_REG_BASE;
+ packets_vec[1].bitfields2.reg_offset = SQ_CMD / 4 - AMD_CONFIG_REG_BASE;
packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET;
packets_vec[1].bitfields2.insert_vmid = 1;
@@ -678,8 +675,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
packets_vec[2].bitfields2.reg_offset =
- GRBM_GFX_INDEX / (sizeof(uint32_t)) -
- USERCONFIG_REG_BASE;
+ GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
packets_vec[2].bitfields2.insert_vmid = 0;
packets_vec[2].reg_data[0] = reg_gfx_index.u32All;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 3da25f7..9d4af96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -33,6 +33,7 @@
#include "kfd_pm4_headers_diq.h"
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
+#include "kfd_device_queue_manager.h"
static DEFINE_MUTEX(kfd_dbgmgr_mutex);
@@ -83,7 +84,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
}
/* get actual type of DBGDevice cpsch or not */
- if (sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
type = DBGDEV_TYPE_NODIQ;
kfd_dbgdev_init(new_buff->dbgdev, pdev, type);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
new file mode 100644
index 0000000..4bd6ebf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "kfd_priv.h"
+
+static struct dentry *debugfs_root;
+
+static int kfd_debugfs_open(struct inode *inode, struct file *file)
+{
+ int (*show)(struct seq_file *, void *) = inode->i_private;
+
+ return single_open(file, show, NULL);
+}
+
+static const struct file_operations kfd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kfd_debugfs_init(void)
+{
+ struct dentry *ent;
+
+ debugfs_root = debugfs_create_dir("kfd", NULL);
+ if (!debugfs_root || debugfs_root == ERR_PTR(-ENODEV)) {
+ pr_warn("Failed to create kfd debugfs dir\n");
+ return;
+ }
+
+ ent = debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_mqds_by_process,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create mqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("hqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_hqds_by_device,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create hqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+}
+
+void kfd_debugfs_fini(void)
+{
+ debugfs_remove_recursive(debugfs_root);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 621a3b5..334669996 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -20,16 +20,22 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
#include <linux/amd-iommu.h>
+#endif
#include <linux/bsearch.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_vi.h"
+#include "cwsr_trap_handler_gfx8.asm"
+#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
+static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
.max_pasid_bits = 16,
@@ -38,7 +44,10 @@ static const struct kfd_device_info kaveri_device_info = {
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = false,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -49,16 +58,127 @@ static const struct kfd_device_info carrizo_device_info = {
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = false,
};
+#endif
+
+static const struct kfd_device_info hawaii_device_info = {
+ .asic_family = CHIP_HAWAII,
+ .max_pasid_bits = 16,
+ /* max num of queues for KV.TODO should be a dynamic value */
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = false,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+};
+
+static const struct kfd_device_info tonga_device_info = {
+ .asic_family = CHIP_TONGA,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = false,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+};
+
+static const struct kfd_device_info tonga_vf_device_info = {
+ .asic_family = CHIP_TONGA,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = false,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+};
+
+static const struct kfd_device_info fiji_device_info = {
+ .asic_family = CHIP_FIJI,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+};
+
+static const struct kfd_device_info fiji_vf_device_info = {
+ .asic_family = CHIP_FIJI,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+};
+
+
+static const struct kfd_device_info polaris10_device_info = {
+ .asic_family = CHIP_POLARIS10,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+};
+
+static const struct kfd_device_info polaris10_vf_device_info = {
+ .asic_family = CHIP_POLARIS10,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+};
+
+static const struct kfd_device_info polaris11_device_info = {
+ .asic_family = CHIP_POLARIS11,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+};
+
struct kfd_deviceid {
unsigned short did;
const struct kfd_device_info *device_info;
};
-/* Please keep this sorted by increasing device id. */
static const struct kfd_deviceid supported_devices[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
{ 0x1304, &kaveri_device_info }, /* Kaveri */
{ 0x1305, &kaveri_device_info }, /* Kaveri */
{ 0x1306, &kaveri_device_info }, /* Kaveri */
@@ -85,7 +205,51 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9874, &carrizo_device_info }, /* Carrizo */
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info } /* Carrizo */
+ { 0x9877, &carrizo_device_info }, /* Carrizo */
+#endif
+ { 0x67A0, &hawaii_device_info }, /* Hawaii */
+ { 0x67A1, &hawaii_device_info }, /* Hawaii */
+ { 0x67A2, &hawaii_device_info }, /* Hawaii */
+ { 0x67A8, &hawaii_device_info }, /* Hawaii */
+ { 0x67A9, &hawaii_device_info }, /* Hawaii */
+ { 0x67AA, &hawaii_device_info }, /* Hawaii */
+ { 0x67B0, &hawaii_device_info }, /* Hawaii */
+ { 0x67B1, &hawaii_device_info }, /* Hawaii */
+ { 0x67B8, &hawaii_device_info }, /* Hawaii */
+ { 0x67B9, &hawaii_device_info }, /* Hawaii */
+ { 0x67BA, &hawaii_device_info }, /* Hawaii */
+ { 0x67BE, &hawaii_device_info }, /* Hawaii */
+ { 0x6920, &tonga_device_info }, /* Tonga */
+ { 0x6921, &tonga_device_info }, /* Tonga */
+ { 0x6928, &tonga_device_info }, /* Tonga */
+ { 0x6929, &tonga_device_info }, /* Tonga */
+ { 0x692B, &tonga_device_info }, /* Tonga */
+ { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
+ { 0x6938, &tonga_device_info }, /* Tonga */
+ { 0x6939, &tonga_device_info }, /* Tonga */
+ { 0x7300, &fiji_device_info }, /* Fiji */
+ { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
+ { 0x67C0, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C1, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C2, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C4, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C7, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C8, &polaris10_device_info }, /* Polaris10 */
+ { 0x67C9, &polaris10_device_info }, /* Polaris10 */
+ { 0x67CA, &polaris10_device_info }, /* Polaris10 */
+ { 0x67CC, &polaris10_device_info }, /* Polaris10 */
+ { 0x67CF, &polaris10_device_info }, /* Polaris10 */
+ { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
+ { 0x67DF, &polaris10_device_info }, /* Polaris10 */
+ { 0x67E0, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E1, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E3, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E7, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E8, &polaris11_device_info }, /* Polaris11 */
+ { 0x67E9, &polaris11_device_info }, /* Polaris11 */
+ { 0x67EB, &polaris11_device_info }, /* Polaris11 */
+ { 0x67EF, &polaris11_device_info }, /* Polaris11 */
+ { 0x67FF, &polaris11_device_info }, /* Polaris11 */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -124,6 +288,21 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
return NULL;
}
+ if (device_info->needs_pci_atomics) {
+ /* Allow BIF to recode atomics to PCIe 3.0
+ * AtomicOps. 32 and 64-bit requests are possible and
+ * must be supported.
+ */
+ if (pci_enable_atomic_ops_to_root(pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
+ dev_info(kfd_device,
+ "skipped device %x:%x, PCI rejects atomics",
+ pdev->vendor, pdev->device);
+ return NULL;
+ }
+ }
+
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
return NULL;
@@ -141,75 +320,15 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
return kfd;
}
-static bool device_iommu_pasid_init(struct kfd_dev *kfd)
+static void kfd_cwsr_init(struct kfd_dev *kfd)
{
- const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
- AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
- AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
-
- struct amd_iommu_device_info iommu_info;
- unsigned int pasid_limit;
- int err;
-
- err = amd_iommu_device_info(kfd->pdev, &iommu_info);
- if (err < 0) {
- dev_err(kfd_device,
- "error getting iommu info. is the iommu enabled?\n");
- return false;
- }
-
- if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
- dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
- != 0);
- return false;
- }
-
- pasid_limit = min_t(unsigned int,
- (unsigned int)(1 << kfd->device_info->max_pasid_bits),
- iommu_info.max_pasids);
+ if (cwsr_enable && kfd->device_info->supports_cwsr) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
- if (!kfd_set_pasid_limit(pasid_limit)) {
- dev_err(kfd_device, "error setting pasid limit\n");
- return false;
+ kfd->cwsr_isa = cwsr_trap_gfx8_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
+ kfd->cwsr_enabled = true;
}
-
- return true;
-}
-
-static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
-{
- struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
-
- if (dev)
- kfd_process_iommu_unbind_callback(dev, pasid);
-}
-
-/*
- * This function called by IOMMU driver on PPR failure
- */
-static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
- unsigned long address, u16 flags)
-{
- struct kfd_dev *dev;
-
- dev_warn(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
- PCI_BUS_NUM(pdev->devfn),
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn),
- pasid,
- address,
- flags);
-
- dev = kfd_device_by_pci_dev(pdev);
- if (!WARN_ON(!dev))
- kfd_signal_iommu_event(dev, pasid, address,
- flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
-
- return AMD_IOMMU_INV_PRI_RSP_INVALID;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
@@ -224,6 +343,17 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
+ /* Verify module parameters regarding mapped process number*/
+ if ((hws_max_conc_proc < 0)
+ || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
+ dev_err(kfd_device,
+ "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
+ hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
+ kfd->vm_info.vmid_num_kfd);
+ kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
+ } else
+ kfd->max_proc_per_quantum = hws_max_conc_proc;
+
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -279,13 +409,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
- if (!device_iommu_pasid_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing iommuv2 for device %x:%x\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto device_iommu_pasid_error;
+ if (kfd_iommu_device_init(kfd)) {
+ dev_err(kfd_device, "Error initializing iommuv2\n");
+ goto device_iommu_error;
}
+ kfd_cwsr_init(kfd);
+
if (kfd_resume(kfd))
goto kfd_resume_error;
@@ -296,12 +426,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n",
- sched_policy);
+ kfd->dqm->sched_policy);
goto out;
kfd_resume_error:
-device_iommu_pasid_error:
+device_iommu_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
kfd_interrupt_exit(kfd);
@@ -340,40 +470,45 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
if (!kfd->init_complete)
return;
- kfd->dqm->ops.stop(kfd->dqm);
+ /* For first KFD device suspend all the KFD processes */
+ if (atomic_inc_return(&kfd_device_suspended) == 1)
+ kfd_suspend_all_processes();
- kfd_unbind_processes_from_device(kfd);
+ kfd->dqm->ops.stop(kfd->dqm);
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
- amd_iommu_free_device(kfd->pdev);
+ kfd_iommu_suspend(kfd);
}
int kgd2kfd_resume(struct kfd_dev *kfd)
{
+ int ret, count;
+
if (!kfd->init_complete)
return 0;
- return kfd_resume(kfd);
+ ret = kfd_resume(kfd);
+ if (ret)
+ return ret;
+ count = atomic_dec_return(&kfd_device_suspended);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+
+ return ret;
}
static int kfd_resume(struct kfd_dev *kfd)
{
int err = 0;
- unsigned int pasid_limit = kfd_get_pasid_limit();
-
- err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err)
- return -ENXIO;
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev,
- iommu_invalid_ppr_cb);
- err = kfd_bind_processes_to_device(kfd);
- if (err)
- goto processes_bind_error;
+ err = kfd_iommu_resume(kfd);
+ if (err) {
+ dev_err(kfd_device,
+ "Failed to resume IOMMU for device %x:%x\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ return err;
+ }
err = kfd->dqm->ops.start(kfd->dqm);
if (err) {
@@ -386,9 +521,7 @@ static int kfd_resume(struct kfd_dev *kfd)
return err;
dqm_start_error:
-processes_bind_error:
- amd_iommu_free_device(kfd->pdev);
-
+ kfd_iommu_suspend(kfd);
return err;
}
@@ -408,6 +541,54 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
spin_unlock(&kfd->interrupt_lock);
}
+/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
+ * prepare for safe eviction of KFD BOs that belong to the specified
+ * process.
+ *
+ * @mm: mm_struct that identifies the specified KFD process
+ * @fence: eviction fence attached to KFD process BOs
+ *
+ */
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence)
+{
+ struct kfd_process *p;
+ unsigned long active_time;
+ unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
+
+ if (!fence)
+ return -EINVAL;
+
+ if (dma_fence_is_signaled(fence))
+ return 0;
+
+ p = kfd_lookup_process_by_mm(mm);
+ if (!p)
+ return -ENODEV;
+
+ if (fence->seqno == p->last_eviction_seqno)
+ goto out;
+
+ p->last_eviction_seqno = fence->seqno;
+
+ /* Avoid KFD process starvation. Wait for at least
+ * PROCESS_ACTIVE_TIME_MS before evicting the process again
+ */
+ active_time = get_jiffies_64() - p->last_restore_timestamp;
+ if (delay_jiffies > active_time)
+ delay_jiffies -= active_time;
+ else
+ delay_jiffies = 0;
+
+ /* During process initialization eviction_work.dwork is initialized
+ * to kfd_evict_bo_worker
+ */
+ schedule_delayed_work(&p->eviction_work, delay_jiffies);
+out:
+ kfd_unref_process(p);
+ return 0;
+}
+
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e202921..d55d29d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -21,10 +21,11 @@
*
*/
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/printk.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include "kfd_priv.h"
@@ -118,9 +119,8 @@ static int allocate_vmid(struct device_queue_manager *dqm,
if (dqm->vmid_bitmap == 0)
return -ENOMEM;
- bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
- dqm->dev->vm_info.vmid_num_kfd);
- clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+ bit = ffs(dqm->vmid_bitmap) - 1;
+ dqm->vmid_bitmap &= ~(1 << bit);
allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
pr_debug("vmid allocation %d\n", allocated_vmid);
@@ -130,27 +130,56 @@ static int allocate_vmid(struct device_queue_manager *dqm,
set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
+ /* qpd->page_table_base is set earlier when register_process()
+ * is called, i.e. when the first queue is created.
+ */
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
+ qpd->vmid,
+ qpd->page_table_base);
+ /* invalidate the VM context after pasid and vmid mapping is set up */
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
return 0;
}
+static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+ struct qcm_process_device *qpd)
+{
+ uint32_t len;
+
+ if (!qpd->ib_kaddr)
+ return -ENOMEM;
+
+ len = pm_create_release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
+
+ return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+ qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len);
+}
+
static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
+ /* On GFX v7, CP doesn't flush TC at dequeue */
+ if (q->device->device_info->asic_family == CHIP_HAWAII)
+ if (flush_texture_cache_nocpsch(q->device, qpd))
+ pr_err("Failed to flush TC\n");
+
+ kfd_flush_tlb(qpd_to_pdd(qpd));
+
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
- set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
+ dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
- struct qcm_process_device *qpd,
- int *allocated_vmid)
+ struct qcm_process_device *qpd)
{
int retval;
@@ -170,8 +199,18 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_unlock;
}
- *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
+ /*
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+ if (qpd->evicted)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
@@ -181,10 +220,8 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
retval = -EINVAL;
if (retval) {
- if (list_empty(&qpd->queues_list)) {
+ if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
- *allocated_vmid = 0;
- }
goto out_unlock;
}
@@ -224,12 +261,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
continue;
if (dqm->allocated_queues[pipe] != 0) {
- bit = find_first_bit(
- (unsigned long *)&dqm->allocated_queues[pipe],
- get_queues_per_pipe(dqm));
-
- clear_bit(bit,
- (unsigned long *)&dqm->allocated_queues[pipe]);
+ bit = ffs(dqm->allocated_queues[pipe]) - 1;
+ dqm->allocated_queues[pipe] &= ~(1 << bit);
q->pipe = pipe;
q->queue = bit;
set = true;
@@ -250,7 +283,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
static inline void deallocate_hqd(struct device_queue_manager *dqm,
struct queue *q)
{
- set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
+ dqm->allocated_queues[q->pipe] |= (1 << q->queue);
}
static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
@@ -372,21 +405,35 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
bool prev_active = false;
mutex_lock(&dqm->lock);
+ pdd = kfd_get_process_device_data(q->device, q->process);
+ if (!pdd) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
retval = -ENOMEM;
goto out_unlock;
}
+ /*
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+ if (pdd->qpd.evicted)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
/* Save previous activity state for counters */
prev_active = q->properties.is_active;
/* Make sure the queue is unmapped before updating the MQD */
- if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
retval = unmap_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval) {
@@ -418,7 +465,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
- if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
@@ -452,10 +499,193 @@ static struct mqd_manager *get_mqd_manager(
return mqd;
}
+static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q;
+ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+ mutex_lock(&dqm->lock);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+ goto out;
+
+ pdd = qpd_to_pdd(qpd);
+ pr_info_ratelimited("Evicting PASID %u queues\n",
+ pdd->process->pasid);
+
+ /* unactivate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) { /* should not be here */
+ pr_err("Cannot evict queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval)
+ goto out;
+ dqm->queue_count--;
+ }
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q;
+ struct kfd_process_device *pdd;
+ int retval = 0;
+
+ mutex_lock(&dqm->lock);
+ if (qpd->evicted++ > 0) /* already evicted, do nothing */
+ goto out;
+
+ pdd = qpd_to_pdd(qpd);
+ pr_info_ratelimited("Evicting PASID %u queues\n",
+ pdd->process->pasid);
+
+ /* unactivate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+ q->properties.is_evicted = true;
+ q->properties.is_active = false;
+ dqm->queue_count--;
+ }
+ retval = execute_queues_cpsch(dqm,
+ qpd->is_debug ?
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q;
+ struct mqd_manager *mqd;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
+ mutex_lock(&dqm->lock);
+ if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
+ goto out;
+ if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
+ qpd->evicted--;
+ goto out;
+ }
+
+ pr_info_ratelimited("Restoring PASID %u queues\n",
+ pdd->process->pasid);
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+ pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ if (!list_empty(&qpd->queues_list)) {
+ dqm->dev->kfd2kgd->set_vm_context_page_table_base(
+ dqm->dev->kgd,
+ qpd->vmid,
+ qpd->page_table_base);
+ kfd_flush_tlb(pdd);
+ }
+
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+ continue;
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) { /* should not be here */
+ pr_err("Cannot restore queue, mqd mgr is NULL\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+ goto out;
+ dqm->queue_count++;
+ }
+ qpd->evicted = 0;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
+ int retval = 0;
+
+ pdd = qpd_to_pdd(qpd);
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
+ mutex_lock(&dqm->lock);
+ if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
+ goto out;
+ if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
+ qpd->evicted--;
+ goto out;
+ }
+
+ pr_info_ratelimited("Restoring PASID %u queues\n",
+ pdd->process->pasid);
+
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+ pr_debug("Updated PD address to 0x%08x\n", pd_base);
+
+ /* activate all active queues on the qpd */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_evicted)
+ continue;
+ q->properties.is_evicted = false;
+ q->properties.is_active = true;
+ dqm->queue_count++;
+ }
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (!retval)
+ qpd->evicted = 0;
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
static int register_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
+ struct kfd_process_device *pdd;
+ uint32_t pd_base;
int retval;
n = kzalloc(sizeof(*n), GFP_KERNEL);
@@ -464,9 +694,16 @@ static int register_process(struct device_queue_manager *dqm,
n->qpd = qpd;
+ pdd = qpd_to_pdd(qpd);
+ /* Retrieve PD base */
+ pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+
mutex_lock(&dqm->lock);
list_add(&n->list, &dqm->queues);
+ /* Update PD Base in QPD */
+ qpd->page_table_base = pd_base;
+
retval = dqm->asic_ops.update_qpd(dqm, qpd);
dqm->processes_count++;
@@ -574,11 +811,12 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
init_interrupts(dqm);
- return 0;
+ return pm_init(&dqm->packets, dqm);
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
+ pm_uninit(&dqm->packets);
return 0;
}
@@ -590,10 +828,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
if (dqm->sdma_bitmap == 0)
return -ENOMEM;
- bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
- CIK_SDMA_QUEUES);
-
- clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
+ bit = ffs(dqm->sdma_bitmap) - 1;
+ dqm->sdma_bitmap &= ~(1 << bit);
*sdma_queue_id = bit;
return 0;
@@ -604,7 +840,7 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
{
if (sdma_queue_id >= CIK_SDMA_QUEUES)
return;
- set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
+ dqm->sdma_bitmap |= (1 << sdma_queue_id);
}
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
@@ -809,29 +1045,26 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd, int *allocate_vmid)
+ struct qcm_process_device *qpd)
{
int retval;
struct mqd_manager *mqd;
retval = 0;
- if (allocate_vmid)
- *allocate_vmid = 0;
-
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
- goto out;
+ goto out_unlock;
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
- goto out;
+ goto out_unlock;
q->properties.sdma_queue_id =
q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id =
@@ -842,14 +1075,25 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (!mqd) {
retval = -ENOMEM;
- goto out;
+ goto out_deallocate_sdma_queue;
}
+ /*
+ * Eviction state logic: we only mark active queues as evicted
+ * to avoid the overhead of restoring inactive queues later
+ */
+ if (qpd->evicted)
+ q->properties.is_evicted = (q->properties.queue_size > 0 &&
+ q->properties.queue_percent > 0 &&
+ q->properties.queue_address != 0);
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
- goto out;
+ goto out_deallocate_sdma_queue;
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
@@ -870,7 +1114,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
-out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+
+out_deallocate_sdma_queue:
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ deallocate_sdma_queue(dqm, q->sdma_id);
+out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
@@ -1014,13 +1264,13 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
- if (q->properties.is_active)
+ if (q->properties.is_active) {
dqm->queue_count--;
-
- retval = execute_queues_cpsch(dqm,
+ retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- if (retval == -ETIME)
- qpd->reset_wavefronts = true;
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
+ }
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -1034,7 +1284,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mutex_unlock(&dqm->lock);
- return 0;
+ return retval;
failed:
failed_try_destroy_debugged_queue:
@@ -1098,7 +1348,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
alternate_aperture_base,
alternate_aperture_size);
- if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
+ if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
@@ -1110,6 +1360,26 @@ out:
return retval;
}
+static int set_trap_handler(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ uint64_t tba_addr,
+ uint64_t tma_addr)
+{
+ uint64_t *tma;
+
+ if (dqm->dev->cwsr_enabled) {
+ /* Jump from CWSR trap handler to user trap */
+ tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+ tma[0] = tba_addr;
+ tma[1] = tma_addr;
+ } else {
+ qpd->tba_addr = tba_addr;
+ qpd->tma_addr = tma_addr;
+ }
+
+ return 0;
+}
+
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -1169,8 +1439,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ }
if (q->properties.is_active)
dqm->queue_count--;
@@ -1223,8 +1495,24 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (!dqm)
return NULL;
+ switch (dev->device_info->asic_family) {
+ /* HWS is not available on Hawaii. */
+ case CHIP_HAWAII:
+ /* HWS depends on CWSR for timely dequeue. CWSR is not
+ * available on Tonga.
+ *
+ * FIXME: This argument also applies to Kaveri.
+ */
+ case CHIP_TONGA:
+ dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
+ break;
+ default:
+ dqm->sched_policy = sched_policy;
+ break;
+ }
+
dqm->dev = dev;
- switch (sched_policy) {
+ switch (dqm->sched_policy) {
case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */
@@ -1241,7 +1529,10 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_cpsch;
+ dqm->ops.evict_process_queues = evict_process_queues_cpsch;
+ dqm->ops.restore_process_queues = restore_process_queues_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1256,10 +1547,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_nocpsch;
dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_nocpsch;
+ dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
+ dqm->ops.restore_process_queues =
+ restore_process_queues_nocpsch;
break;
default:
- pr_err("Invalid scheduling policy %d\n", sched_policy);
+ pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
goto out_free;
}
@@ -1271,6 +1566,17 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_KAVERI:
device_queue_manager_init_cik(&dqm->asic_ops);
break;
+
+ case CHIP_HAWAII:
+ device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
+ break;
+
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ device_queue_manager_init_vi_tonga(&dqm->asic_ops);
+ break;
default:
WARN(1, "Unexpected ASIC family %u",
dev->device_info->asic_family);
@@ -1290,3 +1596,74 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
dqm->ops.uninitialize(dqm);
kfree(dqm);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void seq_reg_dump(struct seq_file *m,
+ uint32_t (*dump)[2], uint32_t n_regs)
+{
+ uint32_t i, count;
+
+ for (i = 0, count = 0; i < n_regs; i++) {
+ if (count == 0 ||
+ dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
+ seq_printf(m, "%s %08x: %08x",
+ i ? "\n" : "",
+ dump[i][0], dump[i][1]);
+ count = 7;
+ } else {
+ seq_printf(m, " %08x", dump[i][1]);
+ count--;
+ }
+ }
+
+ seq_puts(m, "\n");
+}
+
+int dqm_debugfs_hqds(struct seq_file *m, void *data)
+{
+ struct device_queue_manager *dqm = data;
+ uint32_t (*dump)[2], n_regs;
+ int pipe, queue;
+ int r = 0;
+
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
+ if (!test_bit(pipe_offset + queue,
+ dqm->dev->shared_resources.queue_bitmap))
+ continue;
+
+ r = dqm->dev->kfd2kgd->hqd_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+ break;
+
+ seq_printf(m, " CP Pipe %d, Queue %d\n",
+ pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
+
+ kfree(dump);
+ }
+ }
+
+ for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
+ for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ r = dqm->dev->kfd2kgd->hqd_sdma_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+ break;
+
+ seq_printf(m, " SDMA Engine %d, RLC %d\n",
+ pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
+
+ kfree(dump);
+ }
+ }
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 5b77cb6..412beff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -79,13 +79,16 @@ struct device_process_node {
*
* @process_termination: Clears all process queues belongs to that device.
*
+ * @evict_process_queues: Evict all active queues of a process
+ *
+ * @restore_process_queues: Restore all evicted queues queues of a process
+ *
*/
struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
- struct qcm_process_device *qpd,
- int *allocate_vmid);
+ struct qcm_process_device *qpd);
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -123,8 +126,18 @@ struct device_queue_manager_ops {
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+ int (*set_trap_handler)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ uint64_t tba_addr,
+ uint64_t tma_addr);
+
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+
+ int (*evict_process_queues)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int (*restore_process_queues)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
};
struct device_queue_manager_asic_ops {
@@ -176,12 +189,17 @@ struct device_queue_manager {
unsigned int *fence_addr;
struct kfd_mem_obj *fence_mem;
bool active_runlist;
+ int sched_policy;
};
void device_queue_manager_init_cik(
struct device_queue_manager_asic_ops *asic_ops);
+void device_queue_manager_init_cik_hawaii(
+ struct device_queue_manager_asic_ops *asic_ops);
void device_queue_manager_init_vi(
struct device_queue_manager_asic_ops *asic_ops);
+void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
unsigned int get_queues_num(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 28e48c9..aed4c21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -34,8 +34,13 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
uint64_t alternate_aperture_size);
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
void device_queue_manager_init_cik(
struct device_queue_manager_asic_ops *asic_ops)
@@ -45,6 +50,14 @@ void device_queue_manager_init_cik(
asic_ops->init_sdma_vm = init_sdma_vm;
}
+void device_queue_manager_init_cik_hawaii(
+ struct device_queue_manager_asic_ops *asic_ops)
+{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+ asic_ops->update_qpd = update_qpd_cik_hawaii;
+ asic_ops->init_sdma_vm = init_sdma_vm_hawaii;
+}
+
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -132,6 +145,36 @@ static int update_qpd_cik(struct device_queue_manager *dqm,
return 0;
}
+static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
+ DEFAULT_MTYPE(MTYPE_NONCACHED) |
+ APE1_MTYPE(MTYPE_NONCACHED);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ * aperture addresses.
+ */
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+ return 0;
+}
+
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
@@ -147,3 +190,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
+
+static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ * aperture addresses.
+ */
+ q->properties.sdma_vm_addr =
+ ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 2fbce57..fd60a11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -33,10 +33,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
+static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
void device_queue_manager_init_vi(
struct device_queue_manager_asic_ops *asic_ops)
@@ -46,6 +57,14 @@ void device_queue_manager_init_vi(
asic_ops->init_sdma_vm = init_sdma_vm;
}
+void device_queue_manager_init_vi_tonga(
+ struct device_queue_manager_asic_ops *asic_ops)
+{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
+ asic_ops->update_qpd = update_qpd_vi_tonga;
+ asic_ops->init_sdma_vm = init_sdma_vm_tonga;
+}
+
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -103,6 +122,33 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
return true;
}
+static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ uint32_t default_mtype;
+ uint32_t ape1_mtype;
+
+ default_mtype = (default_policy == cache_policy_coherent) ?
+ MTYPE_UC :
+ MTYPE_NC;
+
+ ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+ MTYPE_UC :
+ MTYPE_NC;
+
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
+
+ return true;
+}
+
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -144,6 +190,40 @@ static int update_qpd_vi(struct device_queue_manager *dqm,
return 0;
}
+static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct kfd_process_device *pdd;
+ unsigned int temp;
+
+ pdd = qpd_to_pdd(qpd);
+
+ /* check if sh_mem_config register already configured */
+ if (qpd->sh_mem_config == 0) {
+ qpd->sh_mem_config =
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+ MTYPE_UC <<
+ SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+ MTYPE_UC <<
+ SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ }
+
+ /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ * aperture addresses.
+ */
+ temp = get_sh_mem_bases_nybble_64(pdd);
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+
+ pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ temp, qpd->sh_mem_bases);
+
+ return 0;
+}
+
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
@@ -159,3 +239,16 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
+
+static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
+{
+ /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
+ * aperture addresses.
+ */
+ q->properties.sdma_vm_addr =
+ ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+ SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index feb76c2..ebb4da14 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -116,8 +116,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size);
- pr_debug("doorbell kernel address == 0x%08lX\n",
- (uintptr_t)kfd->doorbell_kernel_ptr);
+ pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
return 0;
}
@@ -194,8 +193,8 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
- " kernel address == 0x%08lX\n",
- *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
+ " kernel address == %p\n",
+ *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
return kfd->doorbell_kernel_ptr + inx;
}
@@ -215,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{
if (db) {
writel(value, db);
- pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
+ pr_debug("Writing %d to doorbell address %p\n", value, db);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index cb92d4b..4890a90 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -30,6 +30,7 @@
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
+#include "kfd_iommu.h"
#include <linux/device.h>
/*
@@ -51,6 +52,7 @@ struct kfd_event_waiter {
struct kfd_signal_page {
uint64_t *kernel_address;
uint64_t __user *user_address;
+ bool need_to_free_pages;
};
@@ -78,6 +80,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
KFD_SIGNAL_EVENT_LIMIT * 8);
page->kernel_address = backing_store;
+ page->need_to_free_pages = true;
pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
@@ -268,8 +271,9 @@ static void shutdown_signal_page(struct kfd_process *p)
struct kfd_signal_page *page = p->signal_page;
if (page) {
- free_pages((unsigned long)page->kernel_address,
- get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
+ if (page->need_to_free_pages)
+ free_pages((unsigned long)page->kernel_address,
+ get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
kfree(page);
}
}
@@ -291,6 +295,30 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
return ev->type == KFD_EVENT_TYPE_SIGNAL;
}
+int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
+ uint64_t size)
+{
+ struct kfd_signal_page *page;
+
+ if (p->signal_page)
+ return -EBUSY;
+
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ /* Initialize all events to unsignaled */
+ memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
+ KFD_SIGNAL_EVENT_LIMIT * 8);
+
+ page->kernel_address = kernel_address;
+
+ p->signal_page = page;
+ p->signal_mapped_size = size;
+
+ return 0;
+}
+
int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint32_t event_type, bool auto_reset, uint32_t node_id,
uint32_t *event_id, uint32_t *event_trigger_data,
@@ -441,7 +469,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -493,7 +521,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
}
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
@@ -837,6 +865,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
}
}
+#ifdef KFD_SUPPORT_IOMMU_V2
void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
unsigned long address, bool is_write_requested,
bool is_execute_requested)
@@ -847,7 +876,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct mm_struct *mm;
@@ -860,7 +889,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
*/
mm = get_task_mm(p->lead_thread);
if (!mm) {
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
return; /* Process is exiting */
}
@@ -903,15 +932,16 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
&memory_exception_data);
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
+#endif /* KFD_SUPPORT_IOMMU_V2 */
void kfd_signal_hw_exception_event(unsigned int pasid)
{
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -924,5 +954,5 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index c59384b..66852de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -278,21 +278,28 @@
#define MAKE_GPUVM_APP_BASE(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
-#define MAKE_GPUVM_APP_LIMIT(base) \
- (((uint64_t)(base) & \
- 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
+#define MAKE_GPUVM_APP_LIMIT(base, size) \
+ (((uint64_t)(base) & 0xFFFFFF0000000000UL) + (size) - 1)
-#define MAKE_SCRATCH_APP_BASE(gpu_num) \
- (((uint64_t)(gpu_num) << 61) + 0x100000000L)
+#define MAKE_SCRATCH_APP_BASE() \
+ (((uint64_t)(0x1UL) << 61) + 0x100000000L)
#define MAKE_SCRATCH_APP_LIMIT(base) \
(((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
-#define MAKE_LDS_APP_BASE(gpu_num) \
- (((uint64_t)(gpu_num) << 61) + 0x0)
+#define MAKE_LDS_APP_BASE() \
+ (((uint64_t)(0x1UL) << 61) + 0x0)
#define MAKE_LDS_APP_LIMIT(base) \
(((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
+/* User mode manages most of the SVM aperture address space. The low
+ * 16MB are reserved for kernel use (CWSR trap handler and kernel IB
+ * for now).
+ */
+#define SVM_USER_BASE 0x1000000ull
+#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
+#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
+
int kfd_init_apertures(struct kfd_process *process)
{
uint8_t id = 0;
@@ -300,16 +307,21 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_process_device *pdd;
/*Iterating over all devices*/
- while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
+ while (kfd_topology_enum_kfd_devices(id, &dev) == 0 &&
id < NUM_OF_SUPPORTED_GPUS) {
+ if (!dev) {
+ id++; /* Skip non GPU devices */
+ continue;
+ }
+
pdd = kfd_create_process_device_data(dev, process);
if (!pdd) {
pr_err("Failed to create process device data\n");
return -1;
}
/*
- * For 64 bit process aperture will be statically reserved in
+ * For 64 bit process apertures will be statically reserved in
* the x86_64 non canonical process address space
* amdkfd doesn't currently support apertures for 32 bit process
*/
@@ -318,23 +330,35 @@ int kfd_init_apertures(struct kfd_process *process)
pdd->gpuvm_base = pdd->gpuvm_limit = 0;
pdd->scratch_base = pdd->scratch_limit = 0;
} else {
- /*
- * node id couldn't be 0 - the three MSB bits of
- * aperture shoudn't be 0
+ /* Same LDS and scratch apertures can be used
+ * on all GPUs. This allows using more dGPUs
+ * than placement options for apertures.
*/
- pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
-
+ pdd->lds_base = MAKE_LDS_APP_BASE();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
-
- pdd->gpuvm_limit =
- MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
-
- pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
-
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE();
pdd->scratch_limit =
MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+
+ if (dev->device_info->needs_iommu_device) {
+ /* APUs: GPUVM aperture in
+ * non-canonical address space
+ */
+ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
+ pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(
+ pdd->gpuvm_base,
+ dev->shared_resources.gpuvm_size);
+ } else {
+ /* dGPUs: SVM aperture starting at 0
+ * with small reserved space for kernel
+ */
+ pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_limit =
+ dev->shared_resources.gpuvm_size - 1;
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
+ }
}
dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
new file mode 100644
index 0000000..c718179
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/printk.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/amd-iommu.h>
+#include "kfd_priv.h"
+#include "kfd_dbgmgr.h"
+#include "kfd_topology.h"
+#include "kfd_iommu.h"
+
+static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+/** kfd_iommu_check_device - Check whether IOMMU is available for device
+ */
+int kfd_iommu_check_device(struct kfd_dev *kfd)
+{
+ struct amd_iommu_device_info iommu_info;
+ int err;
+
+ if (!kfd->device_info->needs_iommu_device)
+ return -ENODEV;
+
+ iommu_info.flags = 0;
+ err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+ if (err)
+ return err;
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
+ return -ENODEV;
+
+ return 0;
+}
+
+/** kfd_iommu_device_init - Initialize IOMMU for device
+ */
+int kfd_iommu_device_init(struct kfd_dev *kfd)
+{
+ struct amd_iommu_device_info iommu_info;
+ unsigned int pasid_limit;
+ int err;
+
+ if (!kfd->device_info->needs_iommu_device)
+ return 0;
+
+ iommu_info.flags = 0;
+ err = amd_iommu_device_info(kfd->pdev, &iommu_info);
+ if (err < 0) {
+ dev_err(kfd_device,
+ "error getting iommu info. is the iommu enabled?\n");
+ return -ENODEV;
+ }
+
+ if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
+ dev_err(kfd_device,
+ "error required iommu flags ats %i, pri %i, pasid %i\n",
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+ != 0);
+ return -ENODEV;
+ }
+
+ pasid_limit = min_t(unsigned int,
+ (unsigned int)(1 << kfd->device_info->max_pasid_bits),
+ iommu_info.max_pasids);
+
+ if (!kfd_set_pasid_limit(pasid_limit)) {
+ dev_err(kfd_device, "error setting pasid limit\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
+ *
+ * Binds the given process to the given device using its PASID. This
+ * enables IOMMUv2 address translation for the process on the device.
+ *
+ * This function assumes that the process mutex is held.
+ */
+int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+ struct kfd_process *p = pdd->process;
+ int err;
+
+ if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND)
+ return 0;
+
+ if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
+ pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
+ return -EINVAL;
+ }
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
+ if (!err)
+ pdd->bound = PDD_BOUND;
+
+ return err;
+}
+
+/** kfd_iommu_unbind_process - Unbind process from all devices
+ *
+ * This removes all IOMMU device bindings of the process. To be used
+ * before process termination.
+ */
+void kfd_iommu_unbind_process(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ if (pdd->bound == PDD_BOUND)
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+}
+
+/* Callback for process shutdown invoked by the IOMMU driver */
+static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
+{
+ struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+
+ if (!dev)
+ return;
+
+ /*
+ * Look for the process that matches the pasid. If there is no such
+ * process, we either released it in amdkfd's own notifier, or there
+ * is a bug. Unfortunately, there is no way to tell...
+ */
+ p = kfd_lookup_process_by_pasid(pasid);
+ if (!p)
+ return;
+
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+
+ if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
+ }
+
+ mutex_unlock(kfd_get_dbgmgr_mutex());
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd)
+ /* For GPU relying on IOMMU, we need to dequeue here
+ * when PASID is still bound.
+ */
+ kfd_process_dequeue_from_device(pdd);
+
+ mutex_unlock(&p->mutex);
+
+ kfd_unref_process(p);
+}
+
+/* This function called by IOMMU driver on PPR failure */
+static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
+ unsigned long address, u16 flags)
+{
+ struct kfd_dev *dev;
+
+ dev_warn(kfd_device,
+ "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ PCI_BUS_NUM(pdev->devfn),
+ PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn),
+ pasid,
+ address,
+ flags);
+
+ dev = kfd_device_by_pci_dev(pdev);
+ if (!WARN_ON(!dev))
+ kfd_signal_iommu_event(dev, pasid, address,
+ flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
+
+ return AMD_IOMMU_INV_PRI_RSP_INVALID;
+}
+
+/*
+ * Bind processes do the device that have been temporarily unbound
+ * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
+ */
+static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+ int err = 0;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(kfd, p);
+
+ if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
+ mutex_unlock(&p->mutex);
+ continue;
+ }
+
+ err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
+ p->lead_thread);
+ if (err < 0) {
+ pr_err("Unexpected pasid %d binding failure\n",
+ p->pasid);
+ mutex_unlock(&p->mutex);
+ break;
+ }
+
+ pdd->bound = PDD_BOUND;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return err;
+}
+
+/*
+ * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
+ * processes will be restored to PDD_BOUND state in
+ * kfd_bind_processes_to_device.
+ */
+static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(kfd, p);
+
+ if (WARN_ON(!pdd)) {
+ mutex_unlock(&p->mutex);
+ continue;
+ }
+
+ if (pdd->bound == PDD_BOUND)
+ pdd->bound = PDD_BOUND_SUSPENDED;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
+
+/** kfd_iommu_suspend - Prepare IOMMU for suspend
+ *
+ * This unbinds processes from the device and disables the IOMMU for
+ * the device.
+ */
+void kfd_iommu_suspend(struct kfd_dev *kfd)
+{
+ if (!kfd->device_info->needs_iommu_device)
+ return;
+
+ kfd_unbind_processes_from_device(kfd);
+
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
+}
+
+/** kfd_iommu_resume - Restore IOMMU after resume
+ *
+ * This reinitializes the IOMMU for the device and re-binds previously
+ * suspended processes to the device.
+ */
+int kfd_iommu_resume(struct kfd_dev *kfd)
+{
+ unsigned int pasid_limit;
+ int err;
+
+ if (!kfd->device_info->needs_iommu_device)
+ return 0;
+
+ pasid_limit = kfd_get_pasid_limit();
+
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err)
+ return -ENXIO;
+
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev,
+ iommu_invalid_ppr_cb);
+
+ err = kfd_bind_processes_to_device(kfd);
+ if (err) {
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
+ return err;
+ }
+
+ return 0;
+}
+
+extern bool amd_iommu_pc_supported(void);
+extern u8 amd_iommu_pc_get_max_banks(u16 devid);
+extern u8 amd_iommu_pc_get_max_counters(u16 devid);
+
+/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
+ */
+int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+{
+ struct kfd_perf_properties *props;
+
+ if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
+ return 0;
+
+ if (!amd_iommu_pc_supported())
+ return 0;
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+ strcpy(props->block_name, "iommu");
+ props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
+ amd_iommu_pc_get_max_counters(0); /* assume one iommu */
+ list_add_tail(&props->list, &kdev->perf_props);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
new file mode 100644
index 0000000..dd23d9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __KFD_IOMMU_H__
+#define __KFD_IOMMU_H__
+
+#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
+
+#define KFD_SUPPORT_IOMMU_V2
+
+int kfd_iommu_check_device(struct kfd_dev *kfd);
+int kfd_iommu_device_init(struct kfd_dev *kfd);
+
+int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd);
+void kfd_iommu_unbind_process(struct kfd_process *p);
+
+void kfd_iommu_suspend(struct kfd_dev *kfd);
+int kfd_iommu_resume(struct kfd_dev *kfd);
+
+int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev);
+
+#else
+
+static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
+{
+ return -ENODEV;
+}
+static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kfd_iommu_bind_process_to_device(
+ struct kfd_process_device *pdd)
+{
+ return 0;
+}
+static inline void kfd_iommu_unbind_process(struct kfd_process *p)
+{
+ /* empty */
+}
+
+static inline void kfd_iommu_suspend(struct kfd_dev *kfd)
+{
+ /* empty */
+}
+static inline int kfd_iommu_resume(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_AMD_IOMMU_V2) */
+
+#endif /* __KFD_IOMMU_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b0c064..69f4964 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -218,7 +218,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
- queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
+ queue_size_dwords = kq->queue->properties.queue_size / 4;
pr_debug("rptr: %d\n", rptr);
pr_debug("wptr: %d\n", wptr);
@@ -297,10 +297,15 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
switch (dev->device_info->asic_family) {
case CHIP_CARRIZO:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
case CHIP_KAVERI:
+ case CHIP_HAWAII:
kernel_queue_init_cik(&kq->ops_asic_specific);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index f744cae..e0c07d2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -43,6 +43,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
.interrupt = kgd2kfd_interrupt,
.suspend = kgd2kfd_suspend,
.resume = kgd2kfd_resume,
+ .schedule_evict_and_restore_process =
+ kgd2kfd_schedule_evict_and_restore_process,
};
int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -50,6 +52,15 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
+int hws_max_conc_proc = 8;
+module_param(hws_max_conc_proc, int, 0444);
+MODULE_PARM_DESC(hws_max_conc_proc,
+ "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+
+int cwsr_enable = 1;
+module_param(cwsr_enable, int, 0444);
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
@@ -60,6 +71,16 @@ module_param(send_sigterm, int, 0444);
MODULE_PARM_DESC(send_sigterm,
"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+int debug_largebar;
+module_param(debug_largebar, int, 0444);
+MODULE_PARM_DESC(debug_largebar,
+ "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
+
+int ignore_crat;
+module_param(ignore_crat, int, 0444);
+MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+
static int amdkfd_init_completed;
int kgd2kfd_init(unsigned int interface_version,
@@ -112,7 +133,11 @@ static int __init kfd_module_init(void)
if (err < 0)
goto err_topology;
- kfd_process_create_wq();
+ err = kfd_process_create_wq();
+ if (err < 0)
+ goto err_create_wq;
+
+ kfd_debugfs_init();
amdkfd_init_completed = 1;
@@ -120,6 +145,8 @@ static int __init kfd_module_init(void)
return 0;
+err_create_wq:
+ kfd_topology_shutdown();
err_topology:
kfd_chardev_exit();
err_ioctl:
@@ -130,6 +157,7 @@ static void __exit kfd_module_exit(void)
{
amdkfd_init_completed = 0;
+ kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index dfd260e..ee7061e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -29,8 +29,15 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
switch (dev->device_info->asic_family) {
case CHIP_KAVERI:
return mqd_manager_init_cik(type, dev);
+ case CHIP_HAWAII:
+ return mqd_manager_init_cik_hawaii(type, dev);
case CHIP_CARRIZO:
return mqd_manager_init_vi(type, dev);
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ return mqd_manager_init_vi_tonga(type, dev);
default:
WARN(1, "Unexpected ASIC family %u",
dev->device_info->asic_family);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 1f3a6ba..8972bcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -85,6 +85,10 @@ struct mqd_manager {
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
+#if defined(CONFIG_DEBUG_FS)
+ int (*debugfs_show_mqd)(struct seq_file *m, void *data);
+#endif
+
struct mutex mqd_mutex;
struct kfd_dev *dev;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4728fad..c00c325 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -36,6 +36,11 @@ static inline struct cik_mqd *get_mqd(void *mqd)
return (struct cik_mqd *)mqd;
}
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -149,7 +154,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
@@ -160,24 +165,30 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ (uint32_t __user *)p->write_ptr,
+ mms);
}
-static int update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q, unsigned int atc_bit)
{
struct cik_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
- DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
+ DEFAULT_MIN_AVAIL_SIZE;
+ m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+ if (atc_bit) {
+ m->cp_hqd_pq_control |= PQ_ATC_EN;
+ m->cp_hqd_ib_control |= IB_ATC_EN;
+ }
/*
* Calculating queue size which is log base 2 of actual queue size -1
* dwords and another -1 for ffs
*/
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
@@ -191,18 +202,31 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0);
+ q->queue_percent > 0 &&
+ !q->is_evicted);
return 0;
}
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ return __update_mqd(mm, mqd, q, 1);
+}
+
+static int update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ return __update_mqd(mm, mqd, q, 0);
+}
+
static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
@@ -222,7 +246,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0);
+ q->queue_percent > 0 &&
+ !q->is_evicted);
return 0;
}
@@ -343,8 +368,7 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
* Calculating queue size which is log base 2 of actual queue
* size -1 dwords
*/
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
@@ -355,20 +379,31 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0);
+ q->queue_percent > 0 &&
+ !q->is_evicted);
return 0;
}
-struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
+#if defined(CONFIG_DEBUG_FS)
- m = (struct cik_sdma_rlc_registers *)mqd;
+static int debugfs_show_mqd(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct cik_mqd), false);
+ return 0;
+}
- return m;
+static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct cik_sdma_rlc_registers), false);
+ return 0;
}
+#endif
+
+
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
@@ -392,6 +427,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
@@ -400,6 +438,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
@@ -408,6 +449,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+#endif
break;
default:
kfree(mqd);
@@ -417,3 +461,15 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
return mqd;
}
+struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ struct mqd_manager *mqd;
+
+ mqd = mqd_manager_init_cik(type, dev);
+ if (!mqd)
+ return NULL;
+ if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ mqd->update_mqd = update_mqd_hawaii;
+ return mqd;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 4ea854f..89e4242 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -30,7 +30,7 @@
#include "vi_structs.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_enum.h"
-
+#include "oss/oss_3_0_sh_mask.h"
#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -38,6 +38,11 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct vi_sdma_mqd *)mqd;
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -84,6 +89,28 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_iq_rptr = 1;
+ if (q->tba_addr) {
+ m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8);
+ m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8);
+ m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8);
+ m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8);
+ m->compute_pgm_rsrc2 |=
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+ lower_32_bits(q->ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_base_addr_hi =
+ upper_32_bits(q->ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
+ m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
+ m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
+ m->cp_hqd_wg_state_offset = q->ctl_stack_size;
+ }
+
*mqd = m;
if (gart_addr)
*gart_addr = addr;
@@ -98,7 +125,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
@@ -116,8 +143,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
- m->cp_hqd_pq_control |=
- ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
@@ -125,6 +151,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
+ m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_doorbell_control =
q->doorbell_off <<
@@ -147,7 +175,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control |= min(0xA,
- ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
+ order_base_2(q->eop_ring_buffer_size / 4) - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -163,9 +191,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
+ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control =
+ atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
+ mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0);
+ q->queue_percent > 0 &&
+ !q->is_evicted);
return 0;
}
@@ -177,6 +211,12 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
}
+static int update_mqd_tonga(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ return __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+}
+
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
@@ -234,6 +274,118 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
return retval;
}
+static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ struct vi_sdma_mqd *m;
+
+
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct vi_sdma_mqd),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct vi_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr;
+
+ memset(m, 0, sizeof(struct vi_sdma_mqd));
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ (uint32_t __user *)p->write_ptr,
+ mms);
+}
+
+static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_sdma_mqd *m;
+
+ m = get_sdma_mqd(mqd);
+ m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+
+ m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
+ m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
+ m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->sdmax_rlcx_doorbell =
+ q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
+
+ m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr;
+
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0 &&
+ !q->is_evicted);
+
+ return 0;
+}
+
+/*
+ * * preempt type here is ignored because there is only one way
+ * * to preempt sdma queue
+ */
+static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+}
+
+static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int debugfs_show_mqd(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct vi_mqd), false);
+ return 0;
+}
+
+static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct vi_sdma_mqd), false);
+ return 0;
+}
+
+#endif
+
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
@@ -257,6 +409,9 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
@@ -265,8 +420,20 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_SDMA:
+ mqd->init_mqd = init_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->load_mqd = load_mqd_sdma;
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+#endif
break;
default:
kfree(mqd);
@@ -275,3 +442,16 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
return mqd;
}
+
+struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev)
+{
+ struct mqd_manager *mqd;
+
+ mqd = mqd_manager_init_vi(type, dev);
+ if (!mqd)
+ return NULL;
+ if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ mqd->update_mqd = update_mqd_tonga;
+ return mqd;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 16da8ad..89ba4c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -45,7 +45,7 @@ static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
header.u32All = 0;
header.opcode = opcode;
- header.count = packet_size/sizeof(uint32_t) - 2;
+ header.count = packet_size / 4 - 2;
header.type = PM4_TYPE_3;
return header.u32All;
@@ -55,15 +55,27 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
- unsigned int process_count, queue_count;
+ unsigned int process_count, queue_count, compute_queue_count;
unsigned int map_queue_size;
+ unsigned int max_proc_per_quantum = 1;
+ struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
+ compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
- /* check if there is over subscription*/
+ /* check if there is over subscription
+ * Note: the arbitration between the number of VMIDs and
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
*over_subscription = false;
- if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
+
+ if (dev->max_proc_per_quantum > 1)
+ max_proc_per_quantum = dev->max_proc_per_quantum;
+
+ if ((process_count > max_proc_per_quantum) ||
+ compute_queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -116,10 +128,24 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
struct pm4_mes_runlist *packet;
+ int concurrent_proc_cnt = 0;
+ struct kfd_dev *kfd = pm->dqm->dev;
if (WARN_ON(!ib))
return -EFAULT;
+ /* Determine the number of processes to map together to HW:
+ * it can not exceed the number of VMIDs available to the
+ * scheduler, and it is determined by the smaller of the number
+ * of processes in the runlist and kfd module parameter
+ * hws_max_conc_proc.
+ * Note: the arbitration between the number of VMIDs and
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
packet = (struct pm4_mes_runlist *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_runlist));
@@ -130,6 +156,7 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields4.chain = chain ? 1 : 0;
packet->bitfields4.offload_polling = 0;
packet->bitfields4.valid = 1;
+ packet->bitfields4.process_cnt = concurrent_proc_cnt;
packet->ordinal2 = lower_32_bits(ib);
packet->bitfields3.ib_base_hi = upper_32_bits(ib);
@@ -161,8 +188,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
- /* TODO: scratch support */
- packet->sh_hidden_private_base_vmid = 0;
+ packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
@@ -251,6 +277,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
return retval;
*rl_size_bytes = alloc_size_bytes;
+ pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count);
@@ -328,6 +355,43 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
return retval;
}
+/* pm_create_release_mem - Create a RELEASE_MEM packet and return the size
+ * of this packet
+ * @gpu_addr - GPU address of the packet. It's a virtual address.
+ * @buffer - buffer to fill up with the packet. It's a CPU kernel pointer
+ * Return - length of the packet
+ */
+uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer)
+{
+ struct pm4_mec_release_mem *packet;
+
+ WARN_ON(!buffer);
+
+ packet = (struct pm4_mec_release_mem *)buffer;
+ memset(buffer, 0, sizeof(*packet));
+
+ packet->header.u32All = build_pm4_header(IT_RELEASE_MEM,
+ sizeof(*packet));
+
+ packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
+ packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
+ packet->bitfields2.tcl1_action_ena = 1;
+ packet->bitfields2.tc_action_ena = 1;
+ packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
+ packet->bitfields2.atc = 0;
+
+ packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
+ packet->bitfields3.int_sel =
+ int_sel___release_mem__send_interrupt_after_write_confirm;
+
+ packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
+ packet->address_hi = upper_32_bits(gpu_addr);
+
+ packet->data_lo = 0;
+
+ return sizeof(*packet) / sizeof(unsigned int);
+}
+
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
pm->dqm = dqm;
@@ -564,3 +628,26 @@ void pm_release_ib(struct packet_manager *pm)
}
mutex_unlock(&pm->lock);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int pm_debugfs_runlist(struct seq_file *m, void *data)
+{
+ struct packet_manager *pm = data;
+
+ mutex_lock(&pm->lock);
+
+ if (!pm->allocated) {
+ seq_puts(m, " No active runlist\n");
+ goto out;
+ }
+
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
+
+out:
+ mutex_unlock(&pm->lock);
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index d6a7961..15fff44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -59,7 +59,7 @@ unsigned int kfd_pasid_alloc(void)
struct kfd_dev *dev = NULL;
unsigned int i = 0;
- while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
+ while ((kfd_topology_enum_kfd_devices(i, &dev)) == 0) {
if (dev && dev->kfd2kgd) {
kfd2kgd = dev->kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9e4134c..96a9cc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -33,14 +33,17 @@
#include <linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
+#include <linux/seq_file.h>
+#include <linux/kref.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
#define KFD_SYSFS_FILE_MODE 0444
-#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
-#define KFD_MMAP_EVENTS_MASK 0x4000000000000
+#define KFD_MMAP_DOORBELL_MASK 0x8000000000000ull
+#define KFD_MMAP_EVENTS_MASK 0x4000000000000ull
+#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000ull
/*
* When working with cp scheduler we should assign the HIQ manually or via
@@ -63,6 +66,15 @@
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
+ * Size of the per-process TBA+TMA buffer: 2 pages
+ *
+ * The first page is the TBA used for the CWSR ISA code. The second
+ * page is used as TMA for daisy changing a user-mode trap handler.
+ */
+#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
+#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
+
+/*
* Kernel module parameter to specify maximum number of supported queues per
* device
*/
@@ -79,11 +91,31 @@ extern int max_num_of_queues_per_device;
extern int sched_policy;
/*
+ * Kernel module parameter to specify the maximum process
+ * number per HW scheduler
+ */
+extern int hws_max_conc_proc;
+
+extern int cwsr_enable;
+
+/*
* Kernel module parameter to specify whether to send sigterm to HSA process on
* unhandled exception
*/
extern int send_sigterm;
+/*
+ * This kernel module is used to simulate large bar machine on non-large bar
+ * enabled machines.
+ */
+extern int debug_largebar;
+
+/*
+ * Ignore CRAT table during KFD initialization, can be used to work around
+ * broken CRAT tables on some AMD systems
+ */
+extern int ignore_crat;
+
/**
* enum kfd_sched_policy
*
@@ -131,6 +163,9 @@ struct kfd_device_info {
size_t ih_ring_entry_size;
uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
+ bool supports_cwsr;
+ bool needs_iommu_device;
+ bool needs_pci_atomics;
};
struct kfd_mem_obj {
@@ -200,6 +235,14 @@ struct kfd_dev {
/* Debug manager */
struct kfd_dbgmgr *dbgmgr;
+
+ /* Maximum process number mapped to HW scheduler */
+ unsigned int max_proc_per_quantum;
+
+ /* CWSR */
+ bool cwsr_enabled;
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
};
/* KGD2KFD callbacks */
@@ -298,7 +341,11 @@ enum kfd_queue_format {
* @is_interop: Defines if this is a interop queue. Interop queue means that
* the queue can access both graphics and compute resources.
*
- * @is_active: Defines if the queue is active or not.
+ * @is_evicted: Defines if the queue is evicted. Only active queues
+ * are evicted, rendering them inactive.
+ *
+ * @is_active: Defines if the queue is active or not. @is_active and
+ * @is_evicted are protected by the DQM lock.
*
* @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
* of the queue.
@@ -320,6 +367,7 @@ struct queue_properties {
uint32_t __iomem *doorbell_ptr;
uint32_t doorbell_off;
bool is_interop;
+ bool is_evicted;
bool is_active;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
@@ -332,6 +380,9 @@ struct queue_properties {
uint32_t eop_ring_buffer_size;
uint64_t ctx_save_restore_area_address;
uint32_t ctx_save_restore_area_size;
+ uint32_t ctl_stack_size;
+ uint64_t tba_addr;
+ uint64_t tma_addr;
};
/**
@@ -420,6 +471,7 @@ struct qcm_process_device {
unsigned int queue_count;
unsigned int vmid;
bool is_debug;
+ unsigned int evicted; /* eviction counter, 0=active */
/* This flag tells if we should reset all wavefronts on
* process termination
@@ -439,8 +491,37 @@ struct qcm_process_device {
uint32_t num_gws;
uint32_t num_oac;
uint32_t sh_hidden_private_base;
+
+ /* CWSR memory */
+ void *cwsr_kaddr;
+ uint64_t cwsr_base;
+ uint64_t tba_addr;
+ uint64_t tma_addr;
+
+ /* IB memory */
+ uint64_t ib_base;
+ void *ib_kaddr;
};
+/* KFD Memory Eviction */
+
+/* Approx. wait time before attempting to restore evicted BOs */
+#define PROCESS_RESTORE_TIME_MS 100
+/* Approx. back off time if restore fails due to lack of memory */
+#define PROCESS_BACK_OFF_TIME_MS 100
+/* Approx. time before evicting the process again */
+#define PROCESS_ACTIVE_TIME_MS 10
+
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
+
+/* 8 byte handle containing GPU ID in the most significant 4 bytes and
+ * idr_handle in the least significant 4 bytes
+ */
+#define MAKE_HANDLE(gpu_id, idr_handle) \
+ (((uint64_t)(gpu_id) << 32) + idr_handle)
+#define GET_GPU_ID(handle) (handle >> 32)
+#define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
enum kfd_pdd_bound {
PDD_UNBOUND = 0,
@@ -473,8 +554,12 @@ struct kfd_process_device {
uint64_t scratch_base;
uint64_t scratch_limit;
- /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
- enum kfd_pdd_bound bound;
+ /* VM context for GPUVM allocations */
+ struct file *drm_file;
+ void *vm;
+
+ /* GPUVM allocations storage */
+ struct idr alloc_idr;
/* Flag used to tell the pdd has dequeued from the dqm.
* This is used to prevent dev->dqm->ops.process_termination() from
@@ -482,6 +567,9 @@ struct kfd_process_device {
* function.
*/
bool already_dequeued;
+
+ /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
+ enum kfd_pdd_bound bound;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -501,6 +589,9 @@ struct kfd_process {
*/
void *mm;
+ struct kref ref;
+ struct work_struct release_work;
+
struct mutex mutex;
/*
@@ -541,8 +632,30 @@ struct kfd_process {
size_t signal_mapped_size;
size_t signal_event_count;
bool signal_event_limit_reached;
+
+ /* Information used for memory eviction */
+ void *kgd_process_info;
+ /* Eviction fence that is attached to all the BOs of this process. The
+ * fence will be triggered during eviction and new one will be created
+ * during restore
+ */
+ struct dma_fence *ef;
+
+ /* Work items for evicting and restoring BOs */
+ struct delayed_work eviction_work;
+ struct delayed_work restore_work;
+ /* seqno of the last scheduled eviction */
+ unsigned int last_eviction_seqno;
+ /* Approx. the last timestamp (in jiffies) when the process was
+ * restored after an eviction
+ */
+ unsigned long last_restore_timestamp;
};
+#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
+extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
+extern struct srcu_struct kfd_processes_srcu;
+
/**
* Ioctl function type.
*
@@ -561,22 +674,36 @@ struct amdkfd_ioctl_desc {
const char *name;
};
-void kfd_process_create_wq(void);
+int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
-struct kfd_process *kfd_create_process(const struct task_struct *);
+struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *);
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
+void kfd_unref_process(struct kfd_process *p);
+void kfd_suspend_all_processes(void);
+int kfd_resume_all_processes(void);
+int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ struct file *drm_file);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
-int kfd_bind_processes_to_device(struct kfd_dev *dev);
-void kfd_unbind_processes_from_device(struct kfd_dev *dev);
-void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
+int kfd_reserved_mem_mmap(struct kfd_process *process,
+ struct vm_area_struct *vma);
+
+/* KFD process API for creating and translating handles */
+int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem);
+void *kfd_process_device_translate_handle(struct kfd_process_device *p,
+ int handle);
+void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
+ int handle);
+
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p);
@@ -624,9 +751,12 @@ int kfd_topology_init(void);
void kfd_topology_shutdown(void);
int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
+int kfd_numa_node_to_apic_id(int numa_node_id);
/* Interrupts */
int kfd_interrupt_init(struct kfd_dev *dev);
@@ -643,8 +773,6 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
-struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-
int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
@@ -654,8 +782,12 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
+struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
+struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
@@ -699,6 +831,7 @@ struct packet_manager {
struct mutex lock;
bool allocated;
struct kfd_mem_obj *ib_buffer_obj;
+ unsigned int ib_size_bytes;
};
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
@@ -716,6 +849,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
void pm_release_ib(struct packet_manager *pm);
+uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer);
+
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
/* Events */
@@ -737,12 +872,35 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
void kfd_signal_hw_exception_event(unsigned int pasid);
int kfd_set_event(struct kfd_process *p, uint32_t event_id);
int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
+int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
+ uint64_t size);
int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint32_t event_type, bool auto_reset, uint32_t node_id,
uint32_t *event_id, uint32_t *event_trigger_data,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_flush_tlb(struct kfd_process_device *pdd);
+
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+/* Debugfs */
+#if defined(CONFIG_DEBUG_FS)
+
+void kfd_debugfs_init(void);
+void kfd_debugfs_fini(void);
+int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
+int pqm_debugfs_mqds(struct seq_file *m, void *data);
+int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
+int dqm_debugfs_hqds(struct seq_file *m, void *data);
+int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
+int pm_debugfs_runlist(struct seq_file *m, void *data);
+
+#else
+
+static inline void kfd_debugfs_init(void) {}
+static inline void kfd_debugfs_fini(void) {}
+
+#endif
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1f5ccd28..1711ad0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -24,40 +24,63 @@
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
struct mm_struct;
#include "kfd_priv.h"
+#include "kfd_device_queue_manager.h"
#include "kfd_dbgmgr.h"
+#include "kfd_iommu.h"
/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
-#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
-static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
+DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
static DEFINE_MUTEX(kfd_processes_mutex);
-DEFINE_STATIC_SRCU(kfd_processes_srcu);
+DEFINE_SRCU(kfd_processes_srcu);
+/* For process termination handling */
static struct workqueue_struct *kfd_process_wq;
-struct kfd_process_release_work {
- struct work_struct kfd_work;
- struct kfd_process *p;
-};
+/* Ordered, single-threaded workqueue for restoring evicted
+ * processes. Restoring multiple processes concurrently under memory
+ * pressure can lead to processes blocking each other from validating
+ * their BOs and result in a live-lock situation where processes
+ * remain evicted indefinitely.
+ */
+static struct workqueue_struct *kfd_restore_wq;
static struct kfd_process *find_process(const struct task_struct *thread);
-static struct kfd_process *create_process(const struct task_struct *thread);
+static void kfd_process_ref_release(struct kref *ref);
+static struct kfd_process *create_process(const struct task_struct *thread,
+ struct file *filep);
+
+static void evict_process_worker(struct work_struct *work);
+static void restore_process_worker(struct work_struct *work);
+
-void kfd_process_create_wq(void)
+int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
+ if (!kfd_restore_wq)
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+
+ if (!kfd_process_wq || !kfd_restore_wq) {
+ kfd_process_destroy_wq();
+ return -ENOMEM;
+ }
+
+ return 0;
}
void kfd_process_destroy_wq(void)
@@ -66,11 +89,122 @@ void kfd_process_destroy_wq(void)
destroy_workqueue(kfd_process_wq);
kfd_process_wq = NULL;
}
+ if (kfd_restore_wq) {
+ destroy_workqueue(kfd_restore_wq);
+ kfd_restore_wq = NULL;
+ }
+}
+
+static void kfd_process_free_gpuvm(struct kgd_mem *mem,
+ struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+
+ dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
+ dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+}
+
+/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
+ * This function should be only called right after the process
+ * is created and when kfd_processes_mutex is still being held
+ * to avoid concurrency. Because of that exclusiveness, we do
+ * not need to take p->mutex.
+ */
+static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
+ uint64_t gpu_va, uint32_t size,
+ uint32_t flags, void **kptr)
+{
+ struct kfd_dev *kdev = pdd->dev;
+ struct kgd_mem *mem = NULL;
+ int handle;
+ int err;
+
+ err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ pdd->vm, &mem, NULL, flags);
+ if (err)
+ goto err_alloc_mem;
+
+ err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+ if (err)
+ goto err_map_mem;
+
+ err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+ if (err) {
+ pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ goto sync_memory_failed;
+ }
+
+ /* Create an obj handle so kfd_process_device_remove_obj_handle
+ * will take care of the bo removal when the process finishes.
+ * We do not need to take p->mutex, because the process is just
+ * created and the ioctls have not had the chance to run.
+ */
+ handle = kfd_process_device_create_obj_handle(pdd, mem);
+
+ if (handle < 0) {
+ err = handle;
+ goto free_gpuvm;
+ }
+
+ if (kptr) {
+ err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+ (struct kgd_mem *)mem, kptr, NULL);
+ if (err) {
+ pr_debug("Map GTT BO to kernel failed\n");
+ goto free_obj_handle;
+ }
+ }
+
+ return err;
+
+free_obj_handle:
+ kfd_process_device_remove_obj_handle(pdd, handle);
+free_gpuvm:
+sync_memory_failed:
+ kfd_process_free_gpuvm(mem, pdd);
+ return err;
+
+err_map_mem:
+ kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+err_alloc_mem:
+ *kptr = NULL;
+ return err;
}
-struct kfd_process *kfd_create_process(const struct task_struct *thread)
+/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
+ * process for IB usage The memory reserved is for KFD to submit
+ * IB to AMDGPU from kernel. If the memory is reserved
+ * successfully, ib_kaddr will have the CPU/kernel
+ * address. Check ib_kaddr before accessing the memory.
+ */
+static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
+{
+ struct qcm_process_device *qpd = &pdd->qpd;
+ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+ ALLOC_MEM_FLAGS_WRITABLE |
+ ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+ if (qpd->ib_kaddr || !qpd->ib_base)
+ return 0;
+
+ /* ib_base is only set for dGPU */
+ ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
+ &kaddr);
+ if (ret)
+ return ret;
+
+ qpd->ib_kaddr = kaddr;
+
+ return 0;
+}
+
+struct kfd_process *kfd_create_process(struct file *filep)
{
struct kfd_process *process;
+ struct task_struct *thread = current;
if (!thread->mm)
return ERR_PTR(-EINVAL);
@@ -79,9 +213,6 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
- /* Take mmap_sem because we call __mmu_notifier_register inside */
- down_write(&thread->mm->mmap_sem);
-
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -93,14 +224,11 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
process = find_process(thread);
if (process)
pr_debug("Process already found\n");
-
- if (!process)
- process = create_process(thread);
+ else
+ process = create_process(thread, filep);
mutex_unlock(&kfd_processes_mutex);
- up_write(&thread->mm->mmap_sem);
-
return process;
}
@@ -144,63 +272,114 @@ static struct kfd_process *find_process(const struct task_struct *thread)
return p;
}
-static void kfd_process_wq_release(struct work_struct *work)
+void kfd_unref_process(struct kfd_process *p)
{
- struct kfd_process_release_work *my_work;
- struct kfd_process_device *pdd, *temp;
- struct kfd_process *p;
+ kref_put(&p->ref, kfd_process_ref_release);
+}
- my_work = (struct kfd_process_release_work *) work;
+static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
+{
+ struct kfd_process *p = pdd->process;
+ void *mem;
+ int id;
- p = my_work->p;
+ /*
+ * Remove all handles from idr and release appropriate
+ * local memory object
+ */
+ idr_for_each_entry(&pdd->alloc_idr, mem, id) {
+ struct kfd_process_device *peer_pdd;
+
+ list_for_each_entry(peer_pdd, &p->per_device_data,
+ per_device_list) {
+ if (!peer_pdd->vm)
+ continue;
+ peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+ peer_pdd->dev->kgd, mem, peer_pdd->vm);
+ }
- pr_debug("Releasing process (pasid %d) in workqueue\n",
- p->pasid);
+ pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+ kfd_process_device_remove_obj_handle(pdd, id);
+ }
+}
- mutex_lock(&p->mutex);
+static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ kfd_process_device_free_bos(pdd);
+}
+
+static void kfd_process_destroy_pdds(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd, *temp;
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
- per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
+ per_device_list) {
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
pdd->dev->id, p->pasid);
- if (pdd->bound == PDD_BOUND)
- amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+ if (pdd->drm_file)
+ fput(pdd->drm_file);
+ else if (pdd->vm)
+ pdd->dev->kfd2kgd->destroy_process_vm(
+ pdd->dev->kgd, pdd->vm);
list_del(&pdd->per_device_list);
+
+ if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
+ free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
+ get_order(KFD_CWSR_TBA_TMA_SIZE));
+
+ idr_destroy(&pdd->alloc_idr);
+
kfree(pdd);
}
+}
+
+/* No process locking is needed in this function, because the process
+ * is not findable any more. We must assume that no other thread is
+ * using it any more, otherwise we couldn't safely free the process
+ * structure in the end.
+ */
+static void kfd_process_wq_release(struct work_struct *work)
+{
+ struct kfd_process *p = container_of(work, struct kfd_process,
+ release_work);
+
+ kfd_iommu_unbind_process(p);
+
+ kfd_process_free_outstanding_kfd_bos(p);
+
+ kfd_process_destroy_pdds(p);
+ dma_fence_put(p->ef);
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
kfd_free_process_doorbells(p);
- mutex_unlock(&p->mutex);
-
mutex_destroy(&p->mutex);
- kfree(p);
+ put_task_struct(p->lead_thread);
- kfree(work);
+ kfree(p);
}
-static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+static void kfd_process_ref_release(struct kref *ref)
{
- struct kfd_process_release_work *work;
- struct kfd_process *p;
-
- p = container_of(rcu, struct kfd_process, rcu);
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
- mmdrop(p->mm);
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+}
- work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+{
+ struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
- if (work) {
- INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
- work->p = p;
- queue_work(kfd_process_wq, (struct work_struct *) work);
- }
+ kfd_unref_process(p);
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
@@ -222,6 +401,9 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
mutex_lock(&p->mutex);
/* Iterate over all process device data structures and if the
@@ -244,15 +426,12 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
mutex_unlock(&p->mutex);
- /*
- * Because we drop mm_count inside kfd_process_destroy_delayed
- * and because the mmu_notifier_unregister function also drop
- * mm_count we need to take an extra count here.
- */
- mmgrab(p->mm);
- mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
+ mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
}
@@ -260,7 +439,74 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.release = kfd_process_notifier_release,
};
-static struct kfd_process *create_process(const struct task_struct *thread)
+static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+{
+ unsigned long offset;
+ struct kfd_process_device *pdd;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ continue;
+
+ offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
+ qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
+ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+ int err = qpd->tba_addr;
+
+ pr_err("Failure to set tba address. error %d.\n", err);
+ qpd->tba_addr = 0;
+ qpd->cwsr_kaddr = NULL;
+ return err;
+ }
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+
+ qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
+ pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
+ qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
+ }
+
+ return 0;
+}
+
+static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+ uint32_t flags = ALLOC_MEM_FLAGS_GTT |
+ ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ void *kaddr;
+ int ret;
+
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
+ return 0;
+
+ /* cwsr_base is only set for dGPU */
+ ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
+ KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
+ if (ret)
+ return ret;
+
+ qpd->cwsr_kaddr = kaddr;
+ qpd->tba_addr = qpd->cwsr_base;
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+
+ qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
+ pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
+ qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
+
+ return 0;
+}
+
+static struct kfd_process *create_process(const struct task_struct *thread,
+ struct file *filep)
{
struct kfd_process *process;
int err = -ENOMEM;
@@ -277,13 +523,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (kfd_alloc_process_doorbells(process) < 0)
goto err_alloc_doorbells;
+ kref_init(&process->ref);
+
mutex_init(&process->mutex);
process->mm = thread->mm;
/* register notifier */
process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
- err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
+ err = mmu_notifier_register(&process->mmu_notifier, process->mm);
if (err)
goto err_mmu_notifier;
@@ -291,6 +539,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
(uintptr_t)process->mm);
process->lead_thread = thread->group_leader;
+ get_task_struct(process->lead_thread);
INIT_LIST_HEAD(&process->per_device_data);
@@ -306,8 +555,19 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_init_apertures;
+ INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
+ INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
+ process->last_restore_timestamp = get_jiffies_64();
+
+ err = kfd_process_init_cwsr_apu(process, filep);
+ if (err)
+ goto err_init_cwsr;
+
return process;
+err_init_cwsr:
+ kfd_process_free_outstanding_kfd_bos(process);
+ kfd_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
@@ -343,20 +603,86 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process_device *pdd = NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
- if (pdd != NULL) {
- pdd->dev = dev;
- INIT_LIST_HEAD(&pdd->qpd.queues_list);
- INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
- pdd->qpd.dqm = dev->dqm;
- pdd->process = p;
- pdd->bound = PDD_UNBOUND;
- pdd->already_dequeued = false;
- list_add(&pdd->per_device_list, &p->per_device_data);
- }
+ if (!pdd)
+ return NULL;
+
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ pdd->qpd.pqm = &p->pqm;
+ pdd->qpd.evicted = 0;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
+ list_add(&pdd->per_device_list, &p->per_device_data);
+
+ /* Init idr used for memory handle translation */
+ idr_init(&pdd->alloc_idr);
return pdd;
}
+/**
+ * kfd_process_device_init_vm - Initialize a VM for a process-device
+ *
+ * @pdd: The process-device
+ * @drm_file: Optional pointer to a DRM file descriptor
+ *
+ * If @drm_file is specified, it will be used to acquire the VM from
+ * that file descriptor. If successful, the @pdd takes ownership of
+ * the file descriptor.
+ *
+ * If @drm_file is NULL, a new VM is created.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int kfd_process_device_init_vm(struct kfd_process_device *pdd,
+ struct file *drm_file)
+{
+ struct kfd_process *p;
+ struct kfd_dev *dev;
+ int ret;
+
+ if (pdd->vm)
+ return drm_file ? -EBUSY : 0;
+
+ p = pdd->process;
+ dev = pdd->dev;
+
+ if (drm_file)
+ ret = dev->kfd2kgd->acquire_process_vm(
+ dev->kgd, drm_file,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
+ else
+ ret = dev->kfd2kgd->create_process_vm(
+ dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
+ if (ret) {
+ pr_err("Failed to create process VM object\n");
+ return ret;
+ }
+
+ ret = kfd_process_device_reserve_ib_mem(pdd);
+ if (ret)
+ goto err_reserve_ib_mem;
+ ret = kfd_process_device_init_cwsr_dgpu(pdd);
+ if (ret)
+ goto err_init_cwsr;
+
+ pdd->drm_file = drm_file;
+
+ return 0;
+
+err_init_cwsr:
+err_reserve_ib_mem:
+ kfd_process_device_free_bos(pdd);
+ if (!drm_file)
+ dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+ pdd->vm = NULL;
+
+ return ret;
+}
+
/*
* Direct the IOMMU to bind the process (specifically the pasid->mm)
* to the device.
@@ -376,161 +702,368 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
- if (pdd->bound == PDD_BOUND) {
- return pdd;
- } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
- pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
- return ERR_PTR(-EINVAL);
- }
-
- err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
- if (err < 0)
+ err = kfd_iommu_bind_process_to_device(pdd);
+ if (err)
return ERR_PTR(err);
- pdd->bound = PDD_BOUND;
+ err = kfd_process_device_init_vm(pdd, NULL);
+ if (err)
+ return ERR_PTR(err);
return pdd;
}
-/*
- * Bind processes do the device that have been temporarily unbound
- * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p)
+{
+ return list_first_entry(&p->per_device_data,
+ struct kfd_process_device,
+ per_device_list);
+}
+
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
+ struct kfd_process_device *pdd)
+{
+ if (list_is_last(&pdd->per_device_list, &p->per_device_data))
+ return NULL;
+ return list_next_entry(pdd, per_device_list);
+}
+
+bool kfd_has_process_device_data(struct kfd_process *p)
+{
+ return !(list_empty(&p->per_device_data));
+}
+
+/* Create specific handle mapped to mem from process local memory idr
+ * Assumes that the process lock is held.
*/
-int kfd_bind_processes_to_device(struct kfd_dev *dev)
+int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
+ void *mem)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p;
+ return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
+}
+
+/* Translate specific handle from process local memory idr
+ * Assumes that the process lock is held.
+ */
+void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
+ int handle)
+{
+ if (handle < 0)
+ return NULL;
+
+ return idr_find(&pdd->alloc_idr, handle);
+}
+
+/* Remove specific handle from process local memory idr
+ * Assumes that the process lock is held.
+ */
+void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
+ int handle)
+{
+ if (handle >= 0)
+ idr_remove(&pdd->alloc_idr, handle);
+}
+
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+{
+ struct kfd_process *p, *ret_p = NULL;
unsigned int temp;
- int err = 0;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(dev, p);
- if (pdd->bound != PDD_BOUND_SUSPENDED) {
- mutex_unlock(&p->mutex);
- continue;
- }
-
- err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
- p->lead_thread);
- if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
- p->pasid);
- mutex_unlock(&p->mutex);
+ if (p->pasid == pasid) {
+ kref_get(&p->ref);
+ ret_p = p;
break;
}
-
- pdd->bound = PDD_BOUND;
- mutex_unlock(&p->mutex);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return err;
+ return ret_p;
}
-/*
- * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
- * processes will be restored to PDD_BOUND state in
- * kfd_bind_processes_to_device.
- */
-void kfd_unbind_processes_from_device(struct kfd_dev *dev)
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
{
- struct kfd_process_device *pdd;
struct kfd_process *p;
- unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(dev, p);
-
- if (pdd->bound == PDD_BOUND)
- pdd->bound = PDD_BOUND_SUSPENDED;
- mutex_unlock(&p->mutex);
- }
+ p = find_process_by_mm(mm);
+ if (p)
+ kref_get(&p->ref);
srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return p;
}
-void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
+/* process_evict_queues - Evict all user queues of a process
+ *
+ * Eviction is reference-counted per process-device. This means multiple
+ * evictions from different sources can be nested safely.
+ */
+static int process_evict_queues(struct kfd_process *p)
{
- struct kfd_process *p;
struct kfd_process_device *pdd;
+ int r = 0;
+ unsigned int n_evicted = 0;
- /*
- * Look for the process that matches the pasid. If there is no such
- * process, we either released it in amdkfd's own notifier, or there
- * is a bug. Unfortunately, there is no way to tell...
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
+ &pdd->qpd);
+ if (r) {
+ pr_err("Failed to evict process queues\n");
+ goto fail;
+ }
+ n_evicted++;
+ }
+
+ return r;
+
+fail:
+ /* To keep state consistent, roll back partial eviction by
+ * restoring queues
*/
- p = kfd_lookup_process_by_pasid(pasid);
- if (!p)
- return;
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ if (n_evicted == 0)
+ break;
+ if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
+ &pdd->qpd))
+ pr_err("Failed to restore queues\n");
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ n_evicted--;
+ }
- mutex_lock(kfd_get_dbgmgr_mutex());
+ return r;
+}
- if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
- if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
- kfd_dbgmgr_destroy(dev->dbgmgr);
- dev->dbgmgr = NULL;
+/* process_restore_queues - Restore all user queues of a process */
+static int process_restore_queues(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ int r, ret = 0;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
+ &pdd->qpd);
+ if (r) {
+ pr_err("Failed to restore process queues\n");
+ if (!ret)
+ ret = r;
}
}
- mutex_unlock(kfd_get_dbgmgr_mutex());
+ return ret;
+}
- pdd = kfd_get_process_device_data(dev, p);
- if (pdd)
- /* For GPU relying on IOMMU, we need to dequeue here
- * when PASID is still bound.
- */
- kfd_process_dequeue_from_device(pdd);
+static void evict_process_worker(struct work_struct *work)
+{
+ int ret;
+ struct kfd_process *p;
+ struct delayed_work *dwork;
- mutex_unlock(&p->mutex);
+ dwork = to_delayed_work(work);
+
+ /* Process termination destroys this worker thread. So during the
+ * lifetime of this thread, kfd_process p will be valid
+ */
+ p = container_of(dwork, struct kfd_process, eviction_work);
+ WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
+ "Eviction fence mismatch\n");
+
+ /* Narrow window of overlap between restore and evict work
+ * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
+ * unreserves KFD BOs, it is possible to evicted again. But
+ * restore has few more steps of finish. So lets wait for any
+ * previous restore work to complete
+ */
+ flush_delayed_work(&p->restore_work);
+
+ pr_debug("Started evicting pasid %d\n", p->pasid);
+ ret = process_evict_queues(p);
+ if (!ret) {
+ dma_fence_signal(p->ef);
+ dma_fence_put(p->ef);
+ p->ef = NULL;
+ queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+
+ pr_debug("Finished evicting pasid %d\n", p->pasid);
+ } else
+ pr_err("Failed to evict queues of pasid %d\n", p->pasid);
}
-struct kfd_process_device *kfd_get_first_process_device_data(
- struct kfd_process *p)
+static void restore_process_worker(struct work_struct *work)
{
- return list_first_entry(&p->per_device_data,
- struct kfd_process_device,
- per_device_list);
+ struct delayed_work *dwork;
+ struct kfd_process *p;
+ struct kfd_process_device *pdd;
+ int ret = 0;
+
+ dwork = to_delayed_work(work);
+
+ /* Process termination destroys this worker thread. So during the
+ * lifetime of this thread, kfd_process p will be valid
+ */
+ p = container_of(dwork, struct kfd_process, restore_work);
+
+ /* Call restore_process_bos on the first KGD device. This function
+ * takes care of restoring the whole process including other devices.
+ * Restore can fail if enough memory is not available. If so,
+ * reschedule again.
+ */
+ pdd = list_first_entry(&p->per_device_data,
+ struct kfd_process_device,
+ per_device_list);
+
+ pr_debug("Started restoring pasid %d\n", p->pasid);
+
+ /* Setting last_restore_timestamp before successful restoration.
+ * Otherwise this would have to be set by KGD (restore_process_bos)
+ * before KFD BOs are unreserved. If not, the process can be evicted
+ * again before the timestamp is set.
+ * If restore fails, the timestamp will be set again in the next
+ * attempt. This would mean that the minimum GPU quanta would be
+ * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
+ * functions)
+ */
+
+ p->last_restore_timestamp = get_jiffies_64();
+ ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+ &p->ef);
+ if (ret) {
+ pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
+ WARN(!ret, "reschedule restore work failed\n");
+ return;
+ }
+
+ ret = process_restore_queues(p);
+ if (!ret)
+ pr_debug("Finished restoring pasid %d\n", p->pasid);
+ else
+ pr_err("Failed to restore queues of pasid %d\n", p->pasid);
}
-struct kfd_process_device *kfd_get_next_process_device_data(
- struct kfd_process *p,
- struct kfd_process_device *pdd)
+void kfd_suspend_all_processes(void)
{
- if (list_is_last(&pdd->per_device_list, &p->per_device_data))
- return NULL;
- return list_next_entry(pdd, per_device_list);
+ struct kfd_process *p;
+ unsigned int temp;
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ if (process_evict_queues(p))
+ pr_err("Failed to suspend process %d\n", p->pasid);
+ dma_fence_signal(p->ef);
+ dma_fence_put(p->ef);
+ p->ef = NULL;
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
}
-bool kfd_has_process_device_data(struct kfd_process *p)
+int kfd_resume_all_processes(void)
{
- return !(list_empty(&p->per_device_data));
+ struct kfd_process *p;
+ unsigned int temp;
+ int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
+ pr_err("Restore process %d failed during resume\n",
+ p->pasid);
+ ret = -EFAULT;
+ }
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ return ret;
}
-/* This returns with process->mutex locked. */
-struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+int kfd_reserved_mem_mmap(struct kfd_process *process,
+ struct vm_area_struct *vma)
+{
+ struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
+ struct kfd_process_device *pdd;
+ struct qcm_process_device *qpd;
+
+ if (!dev)
+ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
+ pr_err("Incorrect CWSR mapping size.\n");
+ return -EINVAL;
+ }
+
+ pdd = kfd_get_process_device_data(dev, process);
+ if (!pdd)
+ return -EINVAL;
+ qpd = &pdd->qpd;
+
+ qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(KFD_CWSR_TBA_TMA_SIZE));
+ if (!qpd->cwsr_kaddr) {
+ pr_err("Error allocating per process CWSR buffer.\n");
+ return -ENOMEM;
+ }
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ /* Mapping pages to user process */
+ return remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(__pa(qpd->cwsr_kaddr)),
+ KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
+}
+
+void kfd_flush_tlb(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
+
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ /* Nothing to flush until a VMID is assigned, which
+ * only happens when the first queue is created.
+ */
+ if (pdd->qpd.vmid)
+ f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+ } else {
+ f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+ }
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
{
struct kfd_process *p;
unsigned int temp;
+ int r = 0;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (p->pasid == pasid) {
- mutex_lock(&p->mutex);
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->pasid);
+
+ mutex_lock(&p->mutex);
+ r = pqm_debugfs_mqds(m, &p->pqm);
+ mutex_unlock(&p->mutex);
+
+ if (r)
break;
- }
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return p;
+ return r;
}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index a3f1e62..7817e32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -178,10 +178,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
return retval;
if (list_empty(&pdd->qpd.queues_list) &&
- list_empty(&pdd->qpd.priv_queue_list)) {
- pdd->qpd.pqm = pqm;
+ list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
- }
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
@@ -203,15 +201,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
- &q->properties.vmid);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
- if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+ if ((dev->dqm->sched_policy ==
+ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
@@ -224,8 +222,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
- &q->properties.vmid);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -315,6 +312,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval) {
+ pr_debug("Destroy queue failed, returned %d\n", retval);
+ goto err_destroy_queue;
+ }
uninit_queue(pqn->q);
}
@@ -326,6 +327,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
list_empty(&pdd->qpd.priv_queue_list))
dqm->ops.unregister_process(dqm, &pdd->qpd);
+err_destroy_queue:
return retval;
}
@@ -367,4 +369,67 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+#if defined(CONFIG_DEBUG_FS)
+
+int pqm_debugfs_mqds(struct seq_file *m, void *data)
+{
+ struct process_queue_manager *pqm = data;
+ struct process_queue_node *pqn;
+ struct queue *q;
+ enum KFD_MQD_TYPE mqd_type;
+ struct mqd_manager *mqd_manager;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (pqn->q) {
+ q = pqn->q;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_SDMA:
+ seq_printf(m, " SDMA queue on device %x\n",
+ q->device->id);
+ mqd_type = KFD_MQD_TYPE_SDMA;
+ break;
+ case KFD_QUEUE_TYPE_COMPUTE:
+ seq_printf(m, " Compute queue on device %x\n",
+ q->device->id);
+ mqd_type = KFD_MQD_TYPE_CP;
+ break;
+ default:
+ seq_printf(m,
+ " Bad user queue type %d on device %x\n",
+ q->properties.type, q->device->id);
+ continue;
+ }
+ mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ q->device->dqm, mqd_type);
+ } else if (pqn->kq) {
+ q = pqn->kq->queue;
+ mqd_manager = pqn->kq->mqd;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ seq_printf(m, " DIQ on device %x\n",
+ pqn->kq->dev->id);
+ mqd_type = KFD_MQD_TYPE_HIQ;
+ break;
+ default:
+ seq_printf(m,
+ " Bad kernel queue type %d on device %x\n",
+ q->properties.type,
+ pqn->kq->dev->id);
+ continue;
+ }
+ } else {
+ seq_printf(m,
+ " Weird: Queue node with neither kernel nor user queue\n");
+ continue;
+ }
+
+ r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ if (r != 0)
+ break;
+ }
+
+ return r;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 19ce590..ac28abc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -28,27 +28,33 @@
#include <linux/hash.h>
#include <linux/cpufreq.h>
#include <linux/log2.h>
+#include <linux/dmi.h>
+#include <linux/atomic.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
#include "kfd_topology.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_iommu.h"
+/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
-static int topology_crat_parsed;
static struct kfd_system_properties sys_props;
static DECLARE_RWSEM(topology_lock);
+static atomic_t topology_crat_proximity_domain;
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_topology_device *device = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu_id == gpu_id) {
- device = top_dev->gpu;
+ if (top_dev->proximity_domain == proximity_domain) {
+ device = top_dev;
break;
}
@@ -57,7 +63,7 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
return device;
}
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
struct kfd_dev *device = NULL;
@@ -65,7 +71,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu_id == gpu_id) {
device = top_dev->gpu;
break;
}
@@ -75,282 +81,31 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
-{
- struct acpi_table_header *crat_table;
- acpi_status status;
-
- if (!size)
- return -EINVAL;
-
- /*
- * Fetch the CRAT table from ACPI
- */
- status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
- if (status == AE_NOT_FOUND) {
- pr_warn("CRAT table not found\n");
- return -ENODATA;
- } else if (ACPI_FAILURE(status)) {
- const char *err = acpi_format_exception(status);
-
- pr_err("CRAT table error: %s\n", err);
- return -EINVAL;
- }
-
- if (*size >= crat_table->length && crat_image != NULL)
- memcpy(crat_image, crat_table, crat_table->length);
-
- *size = crat_table->length;
-
- return 0;
-}
-
-static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
- struct crat_subtype_computeunit *cu)
-{
- dev->node_props.cpu_cores_count = cu->num_cpu_cores;
- dev->node_props.cpu_core_id_base = cu->processor_id_low;
- if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
- dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
-
- pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
- cu->processor_id_low);
-}
-
-static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
- struct crat_subtype_computeunit *cu)
-{
- dev->node_props.simd_id_base = cu->processor_id_low;
- dev->node_props.simd_count = cu->num_simd_cores;
- dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
- dev->node_props.max_waves_per_simd = cu->max_waves_simd;
- dev->node_props.wave_front_size = cu->wave_front_size;
- dev->node_props.mem_banks_count = cu->num_banks;
- dev->node_props.array_count = cu->num_arrays;
- dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
- dev->node_props.simd_per_cu = cu->num_simd_per_cu;
- dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
- if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
- dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
- pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
- cu->processor_id_low);
-}
-
-/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
-static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
-{
- struct kfd_topology_device *dev;
- int i = 0;
-
- pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
- cu->proximity_domain, cu->hsa_capability);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (cu->proximity_domain == i) {
- if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
- kfd_populated_cu_info_cpu(dev, cu);
-
- if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
- kfd_populated_cu_info_gpu(dev, cu);
- break;
- }
- i++;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_mem is called when the topology mutex is
- * already acquired
- */
-static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
-{
- struct kfd_mem_properties *props;
- struct kfd_topology_device *dev;
- int i = 0;
-
- pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
- mem->promixity_domain);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (mem->promixity_domain == i) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- if (dev->node_props.cpu_cores_count == 0)
- props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
- else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
-
- if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
- if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
-
- props->size_in_bytes =
- ((uint64_t)mem->length_high << 32) +
- mem->length_low;
- props->width = mem->width;
-
- dev->mem_bank_count++;
- list_add_tail(&props->list, &dev->mem_props);
-
- break;
- }
- i++;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_cache is called when the topology mutex
- * is already acquired
- */
-static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
-{
- struct kfd_cache_properties *props;
- struct kfd_topology_device *dev;
- uint32_t id;
-
- id = cache->processor_id_low;
-
- pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
- list_for_each_entry(dev, &topology_device_list, list)
- if (id == dev->node_props.cpu_core_id_base ||
- id == dev->node_props.simd_id_base) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- props->processor_id_low = id;
- props->cache_level = cache->cache_level;
- props->cache_size = cache->cache_size;
- props->cacheline_size = cache->cache_line_size;
- props->cachelines_per_tag = cache->lines_per_tag;
- props->cache_assoc = cache->associativity;
- props->cache_latency = cache->cache_latency;
-
- if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_DATA;
- if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
- if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_CPU;
- if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_HSACU;
-
- dev->cache_count++;
- dev->node_props.caches_count++;
- list_add_tail(&props->list, &dev->cache_props);
-
- break;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_iolink is called when the topology mutex
- * is already acquired
- */
-static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
{
- struct kfd_iolink_properties *props;
- struct kfd_topology_device *dev;
- uint32_t i = 0;
- uint32_t id_from;
- uint32_t id_to;
-
- id_from = iolink->proximity_domain_from;
- id_to = iolink->proximity_domain_to;
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
- pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (id_from == i) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- props->node_from = id_from;
- props->node_to = id_to;
- props->ver_maj = iolink->version_major;
- props->ver_min = iolink->version_minor;
-
- /*
- * weight factor (derived from CDIR), currently always 1
- */
- props->weight = 1;
-
- props->min_latency = iolink->minimum_latency;
- props->max_latency = iolink->maximum_latency;
- props->min_bandwidth = iolink->minimum_bandwidth_mbs;
- props->max_bandwidth = iolink->maximum_bandwidth_mbs;
- props->rec_transfer_size =
- iolink->recommended_transfer_size;
-
- dev->io_link_count++;
- dev->node_props.io_links_count++;
- list_add_tail(&props->list, &dev->io_link_props);
+ down_read(&topology_lock);
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
break;
}
- i++;
- }
-
- return 0;
-}
-static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
-{
- struct crat_subtype_computeunit *cu;
- struct crat_subtype_memory *mem;
- struct crat_subtype_cache *cache;
- struct crat_subtype_iolink *iolink;
- int ret = 0;
-
- switch (sub_type_hdr->type) {
- case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
- cu = (struct crat_subtype_computeunit *)sub_type_hdr;
- ret = kfd_parse_subtype_cu(cu);
- break;
- case CRAT_SUBTYPE_MEMORY_AFFINITY:
- mem = (struct crat_subtype_memory *)sub_type_hdr;
- ret = kfd_parse_subtype_mem(mem);
- break;
- case CRAT_SUBTYPE_CACHE_AFFINITY:
- cache = (struct crat_subtype_cache *)sub_type_hdr;
- ret = kfd_parse_subtype_cache(cache);
- break;
- case CRAT_SUBTYPE_TLB_AFFINITY:
- /*
- * For now, nothing to do here
- */
- pr_info("Found TLB entry in CRAT table (not processing)\n");
- break;
- case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
- /*
- * For now, nothing to do here
- */
- pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
- break;
- case CRAT_SUBTYPE_IOLINK_AFFINITY:
- iolink = (struct crat_subtype_iolink *)sub_type_hdr;
- ret = kfd_parse_subtype_iolink(iolink);
- break;
- default:
- pr_warn("Unknown subtype (%d) in CRAT\n",
- sub_type_hdr->type);
- }
+ up_read(&topology_lock);
- return ret;
+ return device;
}
+/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
struct kfd_mem_properties *mem;
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
+ struct kfd_perf_properties *perf;
list_del(&dev->list);
@@ -375,25 +130,35 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
kfree(iolink);
}
- kfree(dev);
+ while (dev->perf_props.next != &dev->perf_props) {
+ perf = container_of(dev->perf_props.next,
+ struct kfd_perf_properties, list);
+ list_del(&perf->list);
+ kfree(perf);
+ }
- sys_props.num_devices--;
+ kfree(dev);
}
-static void kfd_release_live_view(void)
+void kfd_release_topology_device_list(struct list_head *device_list)
{
struct kfd_topology_device *dev;
- while (topology_device_list.next != &topology_device_list) {
- dev = container_of(topology_device_list.next,
- struct kfd_topology_device, list);
+ while (!list_empty(device_list)) {
+ dev = list_first_entry(device_list,
+ struct kfd_topology_device, list);
kfd_release_topology_device(dev);
+ }
}
+static void kfd_release_live_view(void)
+{
+ kfd_release_topology_device_list(&topology_device_list);
memset(&sys_props, 0, sizeof(sys_props));
}
-static struct kfd_topology_device *kfd_create_topology_device(void)
+struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list)
{
struct kfd_topology_device *dev;
@@ -406,65 +171,13 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
INIT_LIST_HEAD(&dev->mem_props);
INIT_LIST_HEAD(&dev->cache_props);
INIT_LIST_HEAD(&dev->io_link_props);
+ INIT_LIST_HEAD(&dev->perf_props);
- list_add_tail(&dev->list, &topology_device_list);
- sys_props.num_devices++;
+ list_add_tail(&dev->list, device_list);
return dev;
}
-static int kfd_parse_crat_table(void *crat_image)
-{
- struct kfd_topology_device *top_dev;
- struct crat_subtype_generic *sub_type_hdr;
- uint16_t node_id;
- int ret;
- struct crat_header *crat_table = (struct crat_header *)crat_image;
- uint16_t num_nodes;
- uint32_t image_len;
-
- if (!crat_image)
- return -EINVAL;
-
- num_nodes = crat_table->num_domains;
- image_len = crat_table->length;
-
- pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
-
- for (node_id = 0; node_id < num_nodes; node_id++) {
- top_dev = kfd_create_topology_device();
- if (!top_dev) {
- kfd_release_live_view();
- return -ENOMEM;
- }
- }
-
- sys_props.platform_id =
- (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
- sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
- sys_props.platform_rev = crat_table->revision;
-
- sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
- while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
- ((char *)crat_image) + image_len) {
- if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
- ret = kfd_parse_subtype(sub_type_hdr);
- if (ret != 0) {
- kfd_release_live_view();
- return ret;
- }
- }
-
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length);
- }
-
- sys_props.generation_count++;
- topology_crat_parsed = 1;
-
- return 0;
-}
-
#define sysfs_show_gen_prop(buffer, fmt, ...) \
snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
@@ -501,11 +214,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+static void kfd_topology_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static struct kobj_type sysprops_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -541,6 +260,7 @@ static const struct sysfs_ops iolink_ops = {
};
static struct kobj_type iolink_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -568,6 +288,7 @@ static const struct sysfs_ops mem_ops = {
};
static struct kobj_type mem_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -575,7 +296,7 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
ssize_t ret;
- uint32_t i;
+ uint32_t i, j;
struct kfd_cache_properties *cache;
/* Making sure that the buffer is an empty string */
@@ -593,12 +314,18 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
- for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
- ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
- buffer, cache->sibling_map[i],
- (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
- "\n" : ",");
-
+ for (i = 0; i < CRAT_SIBLINGMAP_SIZE; i++)
+ for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++) {
+ /* Check each bit */
+ if (cache->sibling_map[i] & (1 << j))
+ ret = snprintf(buffer, PAGE_SIZE,
+ "%s%d%s", buffer, 1, ",");
+ else
+ ret = snprintf(buffer, PAGE_SIZE,
+ "%s%d%s", buffer, 0, ",");
+ }
+ /* Replace the last "," with end of line */
+ *(buffer + strlen(buffer) - 1) = 0xA;
return ret;
}
@@ -607,9 +334,43 @@ static const struct sysfs_ops cache_ops = {
};
static struct kobj_type cache_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
+/****** Sysfs of Performance Counters ******/
+
+struct kfd_perf_attr {
+ struct kobj_attribute attr;
+ uint32_t data;
+};
+
+static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
+ char *buf)
+{
+ struct kfd_perf_attr *attr;
+
+ buf[0] = 0;
+ attr = container_of(attrs, struct kfd_perf_attr, attr);
+ if (!attr->data) /* invalid data for PMC */
+ return 0;
+ else
+ return sysfs_show_32bit_val(buf, attr->data);
+}
+
+#define KFD_PERF_DESC(_name, _data) \
+{ \
+ .attr = __ATTR(_name, 0444, perf_show, NULL), \
+ .data = _data, \
+}
+
+static struct kfd_perf_attr perf_attr_iommu[] = {
+ KFD_PERF_DESC(max_concurrent, 0),
+ KFD_PERF_DESC(num_counters, 0),
+ KFD_PERF_DESC(counter_ids, 0),
+};
+/****************************************/
+
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
@@ -646,18 +407,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
dev->node_props.simd_count);
-
- if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_info_once("mem_banks_count truncated from %d to %d\n",
- dev->node_props.mem_banks_count,
- dev->mem_bank_count);
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->mem_bank_count);
- } else {
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->node_props.mem_banks_count);
- }
-
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, "caches_count",
dev->node_props.caches_count);
sysfs_show_32bit_prop(buffer, "io_links_count",
@@ -690,6 +441,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
+ sysfs_show_32bit_prop(buffer, "drm_render_minor",
+ dev->node_props.drm_render_minor);
if (dev->gpu) {
log_max_watch_addr =
@@ -705,9 +458,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
+ if (dev->gpu->device_info->asic_family == CHIP_TONGA)
+ dev->node_props.capability |=
+ HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
- dev->gpu->kgd));
+ dev->node_props.max_engine_clk_fcompute);
sysfs_show_64bit_prop(buffer, "local_mem_size",
(unsigned long long int) 0);
@@ -729,6 +485,7 @@ static const struct sysfs_ops node_ops = {
};
static struct kobj_type node_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
@@ -744,6 +501,7 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
+ struct kfd_perf_properties *perf;
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
@@ -780,6 +538,16 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
dev->kobj_mem = NULL;
}
+ if (dev->kobj_perf) {
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ kfree(perf->attr_group);
+ perf->attr_group = NULL;
+ }
+ kobject_del(dev->kobj_perf);
+ kobject_put(dev->kobj_perf);
+ dev->kobj_perf = NULL;
+ }
+
if (dev->kobj_node) {
sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
sysfs_remove_file(dev->kobj_node, &dev->attr_name);
@@ -796,8 +564,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
+ struct kfd_perf_properties *perf;
int ret;
- uint32_t i;
+ uint32_t i, num_attrs;
+ struct attribute **attrs;
if (WARN_ON(dev->kobj_node))
return -EEXIST;
@@ -826,6 +596,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
if (!dev->kobj_iolink)
return -ENOMEM;
+ dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
+ if (!dev->kobj_perf)
+ return -ENOMEM;
+
/*
* Creating sysfs files for node properties
*/
@@ -903,11 +677,38 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
if (ret < 0)
return ret;
i++;
-}
+ }
+
+ /* All hardware blocks have the same number of attributes. */
+ num_attrs = ARRAY_SIZE(perf_attr_iommu);
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
+ * num_attrs + sizeof(struct attribute_group),
+ GFP_KERNEL);
+ if (!perf->attr_group)
+ return -ENOMEM;
+
+ attrs = (struct attribute **)(perf->attr_group + 1);
+ if (!strcmp(perf->block_name, "iommu")) {
+ /* Information of IOMMU's num_counters and counter_ids is shown
+ * under /sys/bus/event_source/devices/amd_iommu. We don't
+ * duplicate here.
+ */
+ perf_attr_iommu[0].data = perf->max_concurrent;
+ for (i = 0; i < num_attrs; i++)
+ attrs[i] = &perf_attr_iommu[i].attr.attr;
+ }
+ perf->attr_group->name = perf->block_name;
+ perf->attr_group->attrs = attrs;
+ ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
+/* Called with write topology lock acquired */
static int kfd_build_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
@@ -924,6 +725,7 @@ static int kfd_build_sysfs_node_tree(void)
return 0;
}
+/* Called with write topology lock acquired */
static void kfd_remove_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
@@ -995,75 +797,235 @@ static void kfd_topology_release_sysfs(void)
}
}
+/* Called with write topology_lock acquired */
+static void kfd_topology_update_device_list(struct list_head *temp_list,
+ struct list_head *master_list)
+{
+ while (!list_empty(temp_list)) {
+ list_move_tail(temp_list->next, master_list);
+ sys_props.num_devices++;
+ }
+}
+
+static void kfd_debug_print_topology(void)
+{
+ struct kfd_topology_device *dev;
+
+ down_read(&topology_lock);
+
+ dev = list_last_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ if (dev) {
+ if (dev->node_props.cpu_cores_count &&
+ dev->node_props.simd_count) {
+ pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
+ dev->node_props.device_id,
+ dev->node_props.vendor_id);
+ } else if (dev->node_props.cpu_cores_count)
+ pr_info("Topology: Add CPU node\n");
+ else if (dev->node_props.simd_count)
+ pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
+ dev->node_props.device_id,
+ dev->node_props.vendor_id);
+ }
+ up_read(&topology_lock);
+}
+
+/* Helper function for intializing platform_xx members of
+ * kfd_system_properties. Uses OEM info from the last CPU/APU node.
+ */
+static void kfd_update_system_properties(void)
+{
+ struct kfd_topology_device *dev;
+
+ down_read(&topology_lock);
+ dev = list_last_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ if (dev) {
+ sys_props.platform_id =
+ (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
+ sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ sys_props.platform_rev = dev->oem_revision;
+ }
+ up_read(&topology_lock);
+}
+
+static void find_system_memory(const struct dmi_header *dm,
+ void *private)
+{
+ struct kfd_mem_properties *mem;
+ u16 mem_width, mem_clock;
+ struct kfd_topology_device *kdev =
+ (struct kfd_topology_device *)private;
+ const u8 *dmi_data = (const u8 *)(dm + 1);
+
+ if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
+ mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
+ mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
+ list_for_each_entry(mem, &kdev->mem_props, list) {
+ if (mem_width != 0xFFFF && mem_width != 0)
+ mem->width = mem_width;
+ if (mem_clock != 0)
+ mem->mem_clk_max = mem_clock;
+ }
+ }
+}
+
+/*
+ * Performance counters information is not part of CRAT but we would like to
+ * put them in the sysfs under topology directory for Thunk to get the data.
+ * This function is called before updating the sysfs.
+ */
+static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
+{
+ /* These are the only counters supported so far */
+ return kfd_iommu_add_perf_counters(kdev);
+}
+
+/* kfd_add_non_crat_information - Add information that is not currently
+ * defined in CRAT but is necessary for KFD topology
+ * @dev - topology device to which addition info is added
+ */
+static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
+{
+ /* Check if CPU only node. */
+ if (!kdev->gpu) {
+ /* Add system memory information */
+ dmi_walk(find_system_memory, kdev);
+ }
+ /* TODO: For GPU node, rearrange code from kfd_topology_add_device */
+}
+
+/* kfd_is_acpi_crat_invalid - CRAT from ACPI is valid only for AMD APU devices.
+ * Ignore CRAT for all other devices. AMD APU is identified if both CPU
+ * and GPU cores are present.
+ * @device_list - topology device list created by parsing ACPI CRAT table.
+ * @return - TRUE if invalid, FALSE is valid.
+ */
+static bool kfd_is_acpi_crat_invalid(struct list_head *device_list)
+{
+ struct kfd_topology_device *dev;
+
+ list_for_each_entry(dev, device_list, list) {
+ if (dev->node_props.cpu_cores_count &&
+ dev->node_props.simd_count)
+ return false;
+ }
+ pr_info("Ignoring ACPI CRAT on non-APU system\n");
+ return true;
+}
+
int kfd_topology_init(void)
{
void *crat_image = NULL;
size_t image_size = 0;
int ret;
-
- /*
- * Initialize the head for the topology device list
+ struct list_head temp_topology_device_list;
+ int cpu_only_node = 0;
+ struct kfd_topology_device *kdev;
+ int proximity_domain;
+
+ /* topology_device_list - Master list of all topology devices
+ * temp_topology_device_list - temporary list created while parsing CRAT
+ * or VCRAT. Once parsing is complete the contents of list is moved to
+ * topology_device_list
*/
+
+ /* Initialize the head for the both the lists */
INIT_LIST_HEAD(&topology_device_list);
+ INIT_LIST_HEAD(&temp_topology_device_list);
init_rwsem(&topology_lock);
- topology_crat_parsed = 0;
memset(&sys_props, 0, sizeof(sys_props));
+ /* Proximity domains in ACPI CRAT tables start counting at
+ * 0. The same should be true for virtual CRAT tables created
+ * at this stage. GPUs added later in kfd_topology_add_device
+ * use a counter.
+ */
+ proximity_domain = 0;
+
/*
- * Get the CRAT image from the ACPI
+ * Get the CRAT image from the ACPI. If ACPI doesn't have one
+ * or if ACPI CRAT is invalid create a virtual CRAT.
+ * NOTE: The current implementation expects all AMD APUs to have
+ * CRAT. If no CRAT is available, it is assumed to be a CPU
*/
- ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
- if (ret == 0 && image_size > 0) {
- pr_info("Found CRAT image with size=%zd\n", image_size);
- crat_image = kmalloc(image_size, GFP_KERNEL);
- if (!crat_image) {
- ret = -ENOMEM;
- pr_err("No memory for allocating CRAT image\n");
- goto err;
+ ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
+ if (!ret) {
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret ||
+ kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+ kfd_release_topology_device_list(
+ &temp_topology_device_list);
+ kfd_destroy_crat_image(crat_image);
+ crat_image = NULL;
}
- ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
-
- if (ret == 0) {
- down_write(&topology_lock);
- ret = kfd_parse_crat_table(crat_image);
- if (ret == 0)
- ret = kfd_topology_update_sysfs();
- up_write(&topology_lock);
- } else {
- pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+
+ if (!crat_image) {
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+ COMPUTE_UNIT_CPU, NULL,
+ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+ return ret;
+ }
+
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
}
- kfree(crat_image);
- } else if (ret == -ENODATA) {
- ret = 0;
- } else {
- pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+
+ kdev = list_first_entry(&temp_topology_device_list,
+ struct kfd_topology_device, list);
+ kfd_add_perf_to_topology(kdev);
+
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+ &topology_device_list);
+ atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+ if (!ret) {
+ sys_props.generation_count++;
+ kfd_update_system_properties();
+ kfd_debug_print_topology();
+ pr_info("Finished initializing topology\n");
+ } else
+ pr_err("Failed to update topology in sysfs ret=%d\n", ret);
+
+ /* For nodes with GPU, this information gets added
+ * when GPU is detected (kfd_topology_add_device).
+ */
+ if (cpu_only_node) {
+ /* Add additional information to CPU only node created above */
+ down_write(&topology_lock);
+ kdev = list_first_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ up_write(&topology_lock);
+ kfd_add_non_crat_information(kdev);
}
err:
- pr_info("Finished initializing topology ret=%d\n", ret);
+ kfd_destroy_crat_image(crat_image);
return ret;
}
void kfd_topology_shutdown(void)
{
+ down_write(&topology_lock);
kfd_topology_release_sysfs();
kfd_release_live_view();
-}
-
-static void kfd_debug_print_topology(void)
-{
- struct kfd_topology_device *dev;
- uint32_t i = 0;
-
- pr_info("DEBUG PRINT OF TOPOLOGY:");
- list_for_each_entry(dev, &topology_device_list, list) {
- pr_info("Node: %d\n", i);
- pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
- pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
- pr_info("\tSIMD count: %d", dev->node_props.simd_count);
- i++;
- }
+ up_write(&topology_lock);
}
static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
@@ -1072,11 +1034,15 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
uint32_t buf[7];
uint64_t local_mem_size;
int i;
+ struct kfd_local_mem_info local_mem_info;
if (!gpu)
return 0;
- local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+ gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+
+ local_mem_size = local_mem_info.local_mem_size_private +
+ local_mem_info.local_mem_size_public;
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor;
@@ -1091,19 +1057,26 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
return hashout;
}
-
+/* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If
+ * the GPU device is not already present in the topology device
+ * list then return NULL. This means a new topology device has to
+ * be created for this GPU.
+ * TODO: Rather than assiging @gpu to first topology device withtout
+ * gpu attached, it will better to have more stringent check.
+ */
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
-
+ up_write(&topology_lock);
return out_dev;
}
@@ -1115,84 +1088,198 @@ static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
*/
}
+/* kfd_fill_mem_clk_max_info - Since CRAT doesn't have memory clock info,
+ * patch this after CRAT parsing.
+ */
+static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *mem;
+ struct kfd_local_mem_info local_mem_info;
+
+ if (!dev)
+ return;
+
+ /* Currently, amdgpu driver (amdgpu_mc) deals only with GPUs with
+ * single bank of VRAM local memory.
+ * for dGPUs - VCRAT reports only one bank of Local Memory
+ * for APUs - If CRAT from ACPI reports more than one bank, then
+ * all the banks will report the same mem_clk_max information
+ */
+ dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
+ &local_mem_info);
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->mem_clk_max = local_mem_info.mem_clk_max;
+}
+
+static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+{
+ struct kfd_iolink_properties *link;
+
+ if (!dev || !dev->gpu)
+ return;
+
+ /* GPU only creates direck links so apply flags setting to all */
+ if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
+ list_for_each_entry(link, &dev->io_link_props, list)
+ link->flags = CRAT_IOLINK_FLAGS_ENABLED |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+}
+
int kfd_topology_add_device(struct kfd_dev *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
- int res;
+ struct kfd_cu_info cu_info;
+ int res = 0;
+ struct list_head temp_topology_device_list;
+ void *crat_image = NULL;
+ size_t image_size = 0;
+ int proximity_domain;
+
+ INIT_LIST_HEAD(&temp_topology_device_list);
gpu_id = kfd_generate_gpu_id(gpu);
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
- down_write(&topology_lock);
- /*
- * Try to assign the GPU to existing topology device (generated from
- * CRAT table
+ proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+
+ /* Check to see if this gpu device exists in the topology_device_list.
+ * If so, assign the gpu to that device,
+ * else create a Virtual CRAT for this gpu device and then parse that
+ * CRAT to create a new topology device. Once created assign the gpu to
+ * that topology device
*/
dev = kfd_assign_gpu(gpu);
if (!dev) {
- pr_info("GPU was not found in the current topology. Extending.\n");
- kfd_debug_print_topology();
- dev = kfd_create_topology_device();
- if (!dev) {
- res = -ENOMEM;
+ res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+ COMPUTE_UNIT_GPU, gpu,
+ proximity_domain);
+ if (res) {
+ pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+ return res;
+ }
+ res = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (res) {
+ pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
goto err;
}
- dev->gpu = gpu;
- /*
- * TODO: Make a call to retrieve topology information from the
- * GPU vBIOS
- */
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+ &topology_device_list);
/* Update the SYSFS tree, since we added another topology
* device
*/
- if (kfd_topology_update_sysfs() < 0)
- kfd_topology_release_sysfs();
-
+ res = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+ if (!res)
+ sys_props.generation_count++;
+ else
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+ if (WARN_ON(!dev)) {
+ res = -ENODEV;
+ goto err;
+ }
}
dev->gpu_id = gpu_id;
gpu->id = gpu_id;
+
+ /* TODO: Move the following lines to function
+ * kfd_add_non_crat_information
+ */
+
+ /* Fill-in additional information that is not available in CRAT but
+ * needed for the topology
+ */
+
+ dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+ dev->node_props.simd_arrays_per_engine =
+ cu_info.num_shader_arrays_per_engine;
+
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
- (gpu->pdev->devfn & 0xffffff);
- /*
- * TODO: Retrieve max engine clock values from KGD
- */
+ dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
+ gpu->pdev->devfn);
+ dev->node_props.max_engine_clk_fcompute =
+ dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ dev->node_props.max_engine_clk_ccompute =
+ cpufreq_quick_get_max(0) / 1000;
+ dev->node_props.drm_render_minor =
+ gpu->shared_resources.drm_render_minor;
+
+ kfd_fill_mem_clk_max_info(dev);
+ kfd_fill_iolink_non_crat_info(dev);
+
+ switch (dev->gpu->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ case CHIP_TONGA:
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ pr_debug("Adding doorbell packet type capability\n");
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->gpu->device_info->asic_family);
+ }
+ /* Fix errors in CZ CRAT.
+ * simd_count: Carrizo CRAT reports wrong simd_count, probably
+ * because it doesn't consider masked out CUs
+ * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
+ * capability flag: Carrizo CRAT doesn't report IOMMU flags
+ */
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
- dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
- pr_info("Adding doorbell packet type capability\n");
+ dev->node_props.simd_count =
+ cu_info.simd_per_cu * cu_info.cu_active_number;
+ dev->node_props.max_waves_per_simd = 10;
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
}
- res = 0;
-
-err:
- up_write(&topology_lock);
+ kfd_debug_print_topology();
- if (res == 0)
+ if (!res)
kfd_notify_gpu_change(gpu_id, 1);
-
+err:
+ kfd_destroy_crat_image(crat_image);
return res;
}
int kfd_topology_remove_device(struct kfd_dev *gpu)
{
- struct kfd_topology_device *dev;
+ struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
int res = -ENODEV;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry_safe(dev, tmp, &topology_device_list, list)
if (dev->gpu == gpu) {
gpu_id = dev->gpu_id;
kfd_remove_sysfs_node_entry(dev);
kfd_release_topology_device(dev);
+ sys_props.num_devices--;
res = 0;
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
@@ -1201,28 +1288,32 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
up_write(&topology_lock);
- if (res == 0)
+ if (!res)
kfd_notify_gpu_change(gpu_id, 0);
return res;
}
-/*
- * When idx is out of bounds, the function will return NULL
+/* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD
+ * topology. If GPU device is found @idx, then valid kfd_dev pointer is
+ * returned through @kdev
+ * Return - 0: On success (@kdev will be NULL for non GPU nodes)
+ * -1: If end of list
*/
-struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
uint8_t device_idx = 0;
+ *kdev = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list) {
if (device_idx == idx) {
- device = top_dev->gpu;
- break;
+ *kdev = top_dev->gpu;
+ up_read(&topology_lock);
+ return 0;
}
device_idx++;
@@ -1230,6 +1321,88 @@ struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
up_read(&topology_lock);
- return device;
+ return -1;
+
+}
+
+static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+{
+ const struct cpuinfo_x86 *cpuinfo;
+ int first_cpu_of_numa_node;
+
+ if (!cpumask || cpumask == cpu_none_mask)
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+ return -1;
+ cpuinfo = &cpu_data(first_cpu_of_numa_node);
+
+ return cpuinfo->apicid;
+}
+
+/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
+ * of the given NUMA node (numa_node_id)
+ * Return -1 on failure
+ */
+int kfd_numa_node_to_apic_id(int numa_node_id)
+{
+ if (numa_node_id == -1) {
+ pr_warn("Invalid NUMA Node. Use online CPU mask\n");
+ return kfd_cpumask_to_apic_id(cpu_online_mask);
+ }
+ return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
+{
+ struct kfd_topology_device *dev;
+ unsigned int i = 0;
+ int r = 0;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (!dev->gpu) {
+ i++;
+ continue;
+ }
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = dqm_debugfs_hqds(m, dev->gpu->dqm);
+ if (r)
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return r;
+}
+
+int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
+{
+ struct kfd_topology_device *dev;
+ unsigned int i = 0;
+ int r = 0;
+ down_read(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (!dev->gpu) {
+ i++;
+ continue;
+ }
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
+ if (r)
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return r;
}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index c3ddb9b..eb54cfc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/list.h>
-#include "kfd_priv.h"
+#include "kfd_crat.h"
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
@@ -39,8 +39,13 @@
#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
-#define HSA_CAP_RESERVED 0xfffff000
-#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
+#define HSA_CAP_RESERVED 0xffffc000
+
+#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
struct kfd_node_properties {
uint32_t cpu_cores_count;
@@ -66,6 +71,7 @@ struct kfd_node_properties {
uint32_t location_id;
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
+ int32_t drm_render_minor;
uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
@@ -91,8 +97,6 @@ struct kfd_mem_properties {
struct attribute attr;
};
-#define KFD_TOPOLOGY_CPU_SIBLINGS 256
-
#define HSA_CACHE_TYPE_DATA 0x00000001
#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
#define HSA_CACHE_TYPE_CPU 0x00000004
@@ -109,7 +113,7 @@ struct kfd_cache_properties {
uint32_t cache_assoc;
uint32_t cache_latency;
uint32_t cache_type;
- uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
struct kobject *kobj;
struct attribute attr;
};
@@ -132,24 +136,36 @@ struct kfd_iolink_properties {
struct attribute attr;
};
+struct kfd_perf_properties {
+ struct list_head list;
+ char block_name[16];
+ uint32_t max_concurrent;
+ struct attribute_group *attr_group;
+};
+
struct kfd_topology_device {
struct list_head list;
uint32_t gpu_id;
+ uint32_t proximity_domain;
struct kfd_node_properties node_props;
- uint32_t mem_bank_count;
struct list_head mem_props;
uint32_t cache_count;
struct list_head cache_props;
uint32_t io_link_count;
struct list_head io_link_props;
+ struct list_head perf_props;
struct kfd_dev *gpu;
struct kobject *kobj_node;
struct kobject *kobj_mem;
struct kobject *kobj_cache;
struct kobject *kobj_iolink;
+ struct kobject *kobj_perf;
struct attribute attr_gpuid;
struct attribute attr_name;
struct attribute attr_props;
+ uint8_t oem_id[CRAT_OEMID_LENGTH];
+ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
};
struct kfd_system_properties {
@@ -164,6 +180,8 @@ struct kfd_system_properties {
struct attribute attr_props;
};
-
+struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list);
+void kfd_release_topology_device_list(struct list_head *device_list);
#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index ec3285f..5b124a6 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -11,7 +11,7 @@ config DRM_AMD_DC
config DRM_AMD_DC_PRE_VEGA
bool "DC support for Polaris and older ASICs"
- default n
+ default y
help
Choose this option to enable the new DC support for older asics
by default. This includes Polaris, Carrizo, Tonga, Bonaire,
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c27c81c..a2c5be4 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -26,17 +26,16 @@
AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
-subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
-
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 4646467..357d5964 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -105,3 +105,6 @@ useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
bypassing the i2c device and goes directly to HW. This should be changed.
+
+21. Remove vector.c from dc/basics. It's used in DDC code which can probably
+be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 2b72009..af16973 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -25,12 +25,16 @@
-AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
endif
+ifneq ($(CONFIG_DEBUG_FS),)
+AMDGPUDM += amdgpu_dm_crc.o
+endif
+
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bb5fa89..e42a28e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -59,9 +59,10 @@
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/irqsrcs_dcn_1_0.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#include "soc15_common.h"
#endif
@@ -319,6 +320,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
crtc_index = acrtc->crtc_id;
drm_handle_vblank(adev->ddev, crtc_index);
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
}
static int dm_set_clockgating_state(void *handle,
@@ -345,23 +347,43 @@ static void hotplug_notify_work_func(struct work_struct *work)
}
#if defined(CONFIG_DRM_AMD_DC_FBC)
-#include "dal_asic_id.h"
/* Allocate memory for FBC compressed data */
-/* TODO: Dynamic allocation */
-#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
-
-static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
- int r;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct dm_comressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
+ struct drm_display_mode *mode;
+ unsigned long max_size = 0;
+
+ if (adev->dm.dc->fbc_compressor == NULL)
+ return;
+
+ if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
- if (!compressor->bo_ptr) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
- &compressor->gpu_addr, &compressor->cpu_addr);
+ if (compressor->bo_ptr)
+ return;
+
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (max_size < mode->htotal * mode->vtotal)
+ max_size = mode->htotal * mode->vtotal;
+ }
+
+ if (max_size) {
+ int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize fbc\n");
+ DRM_ERROR("DM: Failed to initialize FBC\n");
+ else {
+ adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
+ DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ }
+
}
}
@@ -381,12 +403,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
- /* initialize DAL's lock (for SYNC context use) */
- spin_lock_init(&adev->dm.dal_lock);
-
- /* initialize DAL's mutex */
- mutex_init(&adev->dm.dal_mutex);
-
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
@@ -397,7 +413,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->rev_id;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
- init_data.asic_id.vram_width = adev->mc.vram_width;
+ init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
init_data.asic_id.atombios_base_address =
adev->mode_info.atom_context->bios;
@@ -422,18 +438,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else
init_data.log_mask = DC_MIN_LOG_MASK;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- if (adev->family == FAMILY_CZ)
- amdgpu_dm_initialize_fbc(adev);
- init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
-#endif
+ /*
+ * TODO debug why this doesn't work on Raven
+ */
+ if (adev->flags & AMD_IS_APU &&
+ adev->asic_type >= CHIP_CARRIZO &&
+ adev->asic_type < CHIP_RAVEN)
+ init_data.flags.gpu_vm_support = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core initialized!\n");
+ DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
} else {
- DRM_INFO("Display Core failed to initialize!\n");
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -447,6 +466,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
+ amdgpu_dm_init_color_mod();
+
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -540,9 +561,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
static int dm_late_init(void *handle)
{
- struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return detect_mst_link_for_all_connectors(dev);
+ return detect_mst_link_for_all_connectors(adev->ddev);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
@@ -629,11 +650,13 @@ static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
- return 0;
+ ret = amdgpu_dm_display_resume(adev);
+ return ret;
}
int amdgpu_dm_display_resume(struct amdgpu_device *adev)
@@ -791,8 +814,8 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state)
}
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
- .fb_create = amdgpu_user_framebuffer_create,
- .output_poll_changed = amdgpu_output_poll_changed,
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
.atomic_state_alloc = dm_atomic_state_alloc,
@@ -1035,6 +1058,10 @@ static void handle_hpd_rx_irq(void *param)
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -1103,8 +1130,9 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_RAVEN)
- client_id = AMDGPU_IH_CLIENTID_DCE;
+ client_id = SOC15_IH_CLIENTID_DCE;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -1204,7 +1232,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
@@ -1228,7 +1256,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
DRM_ERROR("Failed to add page flip irq id!\n");
return r;
@@ -1249,7 +1277,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
/* HPD */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
DRM_ERROR("Failed to add hpd irq id!\n");
@@ -1279,9 +1307,9 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
/* indicate support of immediate flip */
adev->ddev->mode_config.async_page_flip = true;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -1338,6 +1366,43 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
#endif
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info,
+ int plane_id)
+{
+ struct amdgpu_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[plane_id] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->base.type = mode_info->plane_type[plane_id];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ return ret;
+ }
+
+ return ret;
+}
+
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
@@ -1348,12 +1413,12 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- uint32_t i;
+ int32_t i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
- unsigned long possible_crtcs;
+ int32_t total_overlay_planes, total_primary_planes;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1361,30 +1426,22 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -1;
}
- for (i = 0; i < dm->dc->caps.max_planes; i++) {
- struct amdgpu_plane *plane;
+ /* Identify the number of planes to be initialized */
+ total_overlay_planes = dm->dc->caps.max_slave_planes;
+ total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
- mode_info->planes[i] = plane;
-
- if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ /* First initialize overlay planes, index starting after primary planes */
+ for (i = (total_overlay_planes - 1); i >= 0; i--) {
+ if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
- plane->base.type = mode_info->plane_type[i];
-
- /*
- * HACK: IGT tests expect that each plane can only have one
- * one possible CRTC. For now, set one CRTC for each
- * plane that is not an underlay, but still allow multiple
- * CRTCs for underlay planes.
- */
- possible_crtcs = 1 << i;
- if (i >= dm->dc->caps.max_streams)
- possible_crtcs = 0xff;
+ }
- if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ /* Initialize primary planes */
+ for (i = (total_primary_planes - 1); i >= 0; i--) {
+ if (initialize_plane(dm, mode_info, i)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -1445,6 +1502,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_POLARIS10:
case CHIP_POLARIS12:
case CHIP_VEGA10:
+ case CHIP_VEGA12:
if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -1538,7 +1596,6 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
- .vblank_wait = NULL,
.backlight_set_level =
dm_set_backlight_level,/* called unconditionally */
.backlight_get_level =
@@ -1589,9 +1646,6 @@ static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
- amdgpu_dm_set_irq_funcs(adev);
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -1646,6 +1700,7 @@ static int dm_early_init(void *handle)
adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -1664,6 +1719,8 @@ static int dm_early_init(void *handle)
return -EINVAL;
}
+ amdgpu_dm_set_irq_funcs(adev);
+
if (adev->mode_info.funcs == NULL)
adev->mode_info.funcs = &dm_display_funcs;
@@ -1679,18 +1736,6 @@ static int dm_early_init(void *handle)
return 0;
}
-struct dm_connector_state {
- struct drm_connector_state base;
-
- enum amdgpu_rmx_type scaling;
- uint8_t underscan_vborder;
- uint8_t underscan_hborder;
- bool underscan_enable;
-};
-
-#define to_dm_connector_state(x)\
- container_of((x), struct dm_connector_state, base)
-
static bool modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
@@ -1773,8 +1818,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
return true;
}
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
- uint64_t *tiling_flags,
- uint64_t *fb_location)
+ uint64_t *tiling_flags)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
int r = amdgpu_bo_reserve(rbo, false);
@@ -1786,9 +1830,6 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
return r;
}
- if (fb_location)
- *fb_location = amdgpu_bo_gpu_offset(rbo);
-
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
@@ -1799,12 +1840,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
struct dc_plane_state *plane_state,
- const struct amdgpu_framebuffer *amdgpu_fb,
- bool addReq)
+ const struct amdgpu_framebuffer *amdgpu_fb)
{
uint64_t tiling_flags;
- uint64_t fb_location = 0;
- uint64_t chroma_addr = 0;
unsigned int awidth;
const struct drm_framebuffer *fb = &amdgpu_fb->base;
int ret = 0;
@@ -1812,8 +1850,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
ret = get_fb_info(
amdgpu_fb,
- &tiling_flags,
- addReq == true ? &fb_location:NULL);
+ &tiling_flags);
if (ret)
return ret;
@@ -1851,8 +1888,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
- plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
- plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
plane_state->plane_size.grph.surface_size.x = 0;
plane_state->plane_size.grph.surface_size.y = 0;
plane_state->plane_size.grph.surface_size.width = fb->width;
@@ -1865,15 +1900,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
} else {
awidth = ALIGN(fb->width, 64);
plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->address.video_progressive.luma_addr.low_part
- = lower_32_bits(fb_location);
- plane_state->address.video_progressive.luma_addr.high_part
- = upper_32_bits(fb_location);
- chroma_addr = fb_location + (u64)(awidth * fb->height);
- plane_state->address.video_progressive.chroma_addr.low_part
- = lower_32_bits(chroma_addr);
- plane_state->address.video_progressive.chroma_addr.high_part
- = upper_32_bits(chroma_addr);
plane_state->plane_size.video.luma_size.x = 0;
plane_state->plane_size.video.luma_size.y = 0;
plane_state->plane_size.video.luma_size.width = awidth;
@@ -1922,6 +1948,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
plane_state->tiling_info.gfx9.num_pipes =
@@ -1954,37 +1981,10 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
}
-static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
- struct dc_plane_state *plane_state)
-{
- int i;
- struct dc_gamma *gamma;
- struct drm_color_lut *lut =
- (struct drm_color_lut *) crtc_state->gamma_lut->data;
-
- gamma = dc_create_gamma();
-
- if (gamma == NULL) {
- WARN_ON(1);
- return;
- }
-
- gamma->type = GAMMA_RGB_256;
- gamma->num_entries = GAMMA_RGB_256_ENTRIES;
- for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
- gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
- gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
- gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
- }
-
- plane_state->gamma_correction = gamma;
-}
-
static int fill_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_state *dc_plane_state,
struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state,
- bool addrReq)
+ struct drm_crtc_state *crtc_state)
{
const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(plane_state->fb);
@@ -1998,8 +1998,7 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
ret = fill_plane_attributes_from_fb(
crtc->dev->dev_private,
dc_plane_state,
- amdgpu_fb,
- addrReq);
+ amdgpu_fb);
if (ret)
return ret;
@@ -2009,14 +2008,17 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
if (input_tf == NULL)
return -ENOMEM;
- input_tf->type = TF_TYPE_PREDEFINED;
- input_tf->tf = TRANSFER_FUNCTION_SRGB;
-
dc_plane_state->in_transfer_func = input_tf;
- /* In case of gamma set, update gamma value */
- if (crtc_state->gamma_lut)
- fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+ /*
+ * Always set input transfer function, since plane state is refreshed
+ * every time.
+ */
+ ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
+ if (ret) {
+ dc_transfer_func_release(dc_plane_state->in_transfer_func);
+ dc_plane_state->in_transfer_func = NULL;
+ }
return ret;
}
@@ -2042,30 +2044,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
dst.width = stream->timing.h_addressable;
dst.height = stream->timing.v_addressable;
- rmx_type = dm_state->scaling;
- if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
- if (src.width * dst.height <
- src.height * dst.width) {
- /* height needs less upscaling/more downscaling */
- dst.width = src.width *
- dst.height / src.height;
- } else {
- /* width needs less upscaling/more downscaling */
- dst.height = src.height *
- dst.width / src.width;
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
}
- } else if (rmx_type == RMX_CENTER) {
- dst = src;
- }
- dst.x = (stream->timing.h_addressable - dst.width) / 2;
- dst.y = (stream->timing.v_addressable - dst.height) / 2;
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
- if (dm_state->underscan_enable) {
- dst.x += dm_state->underscan_hborder / 2;
- dst.y += dm_state->underscan_vborder / 2;
- dst.width -= dm_state->underscan_hborder;
- dst.height -= dm_state->underscan_vborder;
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
}
stream->src = src;
@@ -2174,6 +2178,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
+ struct dc_transfer_func *tf = dc_create_transfer_func();
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2217,13 +2222,9 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->output_color_space = get_output_color_space(timing_out);
- {
- struct dc_transfer_func *tf = dc_create_transfer_func();
-
- tf->type = TF_TYPE_PREDEFINED;
- tf->tf = TRANSFER_FUNCTION_SRGB;
- stream->out_transfer_func = tf;
- }
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func = tf;
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -2330,6 +2331,56 @@ static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
return 0;
}
+static void set_multisync_trigger_params(
+ struct dc_stream_state *stream)
+{
+ if (stream->triggered_crtc_reset.enabled) {
+ stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+ }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+ int j, highest_rfr = 0, master_stream = 0;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+ int refresh_rate = 0;
+
+ refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+ master_stream = j;
+ }
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+ stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
+ }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+ int i = 0;
+
+ if (context->stream_count < 2)
+ return;
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+ /* TODO: add a function to read AMD VSDB bits and will set
+ * crtc_sync_master.multi_sync_enabled flag
+ * For now its set to false
+ */
+ set_multisync_trigger_params(context->streams[i]);
+ }
+ set_master_stream(context->streams, context->stream_count);
+}
+
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
@@ -2343,12 +2394,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
- goto drm_connector_null;
- }
-
- if (dm_state == NULL) {
- DRM_ERROR("dm_state is NULL!\n");
- goto dm_state_null;
+ return stream;
}
drm_connector = &aconnector->base;
@@ -2360,18 +2406,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (aconnector->mst_port) {
dm_dp_mst_dc_sink_create(drm_connector);
- goto mst_dc_sink_create_done;
+ return stream;
}
if (create_fake_sink(aconnector))
- goto stream_create_fail;
+ return stream;
}
stream = dc_create_stream_for_sink(aconnector->dc_sink);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
- goto stream_create_fail;
+ return stream;
}
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2397,9 +2443,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
- dm_state->scaling != RMX_OFF);
+ dm_state ? (dm_state->scaling != RMX_OFF) : false);
}
+ if (!dm_state)
+ drm_mode_set_crtcinfo(&mode, 0);
+
fill_stream_properties_from_drm_display_mode(stream,
&mode, &aconnector->base);
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2409,10 +2458,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_connector,
aconnector->dc_sink);
-stream_create_fail:
-dm_state_null:
-drm_connector_null:
-mst_dc_sink_create_done:
+ update_stream_signal(stream);
+
return stream;
}
@@ -2480,6 +2527,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
+
+static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+}
+
+static int dm_enable_vblank(struct drm_crtc *crtc)
+{
+ return dm_set_vblank(crtc, true);
+}
+
+static void dm_disable_vblank(struct drm_crtc *crtc)
+{
+ dm_set_vblank(crtc, false);
+}
+
/* Implemented only the options currently availible for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
@@ -2489,6 +2557,9 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
+ .set_crc_source = amdgpu_dm_crtc_set_crc_source,
+ .enable_vblank = dm_enable_vblank,
+ .disable_vblank = dm_disable_vblank,
};
static enum drm_connector_status
@@ -2713,8 +2784,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
};
struct edid *edid;
- if (!aconnector->base.edid_blob_ptr ||
- !aconnector->base.edid_blob_ptr->data) {
+ if (!aconnector->base.edid_blob_ptr) {
DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
aconnector->base.name);
@@ -2765,6 +2835,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ enum dc_status dc_result = DC_OK;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -2784,21 +2855,22 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
goto fail;
}
- stream = dc_create_stream_for_sink(dc_sink);
+ stream = create_stream_for_sink(aconnector, mode, NULL);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
goto fail;
}
- drm_mode_set_crtcinfo(mode, 0);
- fill_stream_properties_from_drm_display_mode(stream, mode, connector);
-
- stream->src.width = mode->hdisplay;
- stream->src.height = mode->vdisplay;
- stream->dst = stream->src;
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
- if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+ if (dc_result == DC_OK)
result = MODE_OK;
+ else
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ mode->vdisplay,
+ mode->hdisplay,
+ mode->clock,
+ dc_result);
dc_stream_release(stream);
@@ -2940,11 +3012,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
{
struct amdgpu_framebuffer *afb;
struct drm_gem_object *obj;
+ struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
uint64_t chroma_addr = 0;
- int r;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
unsigned int awidth;
+ uint32_t domain;
+ int r;
dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state);
@@ -2958,12 +3032,17 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
obj = afb->obj;
rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ domain = amdgpu_display_framebuffer_domains(adev);
+ else
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ r = amdgpu_bo_pin(rbo, domain, &afb->address);
amdgpu_bo_unreserve(rbo);
@@ -2989,7 +3068,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
= lower_32_bits(afb->address);
plane_state->address.video_progressive.luma_addr.high_part
= upper_32_bits(afb->address);
- chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+ chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
plane_state->address.video_progressive.chroma_addr.low_part
= lower_32_bits(chroma_addr);
plane_state->address.video_progressive.chroma_addr.high_part
@@ -3044,6 +3123,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!dm_plane_state->dc_state)
return 0;
+ if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
+ return -EINVAL;
+
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0;
@@ -3090,8 +3172,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
switch (aplane->base.type) {
case DRM_PLANE_TYPE_PRIMARY:
- aplane->base.format_default = true;
-
res = drm_universal_plane_init(
dm->adev->ddev,
&aplane->base,
@@ -3176,7 +3256,9 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
acrtc->base.enabled = false;
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+ drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+ true, MAX_COLOR_LUT_ENTRIES);
+ drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
return 0;
@@ -3336,8 +3418,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
amdgpu_dm_connector->num_modes =
drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
-
amdgpu_dm_get_native_mode(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
@@ -3354,9 +3434,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct edid *edid = amdgpu_dm_connector->edid;
encoder = helper->best_encoder(connector);
-
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ amdgpu_dm_fbc_init(connector);
+#endif
return amdgpu_dm_connector->num_modes;
}
@@ -3629,7 +3712,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
* constant is the same as PFLIP
*/
int irq_type =
- amdgpu_crtc_idx_to_irq_type(
+ amdgpu_display_crtc_idx_to_irq_type(
adev,
acrtc->crtc_id);
@@ -3824,7 +3907,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
/* Prepare wait for target vblank early - before the fence-waits */
- target_vblank = target - drm_crtc_vblank_count(crtc) +
+ target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
/* TODO This might fail and hence better not used, wait
@@ -3848,9 +3931,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
* targeted by the flip
*/
while ((acrtc->enabled &&
- (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode)
+ (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
@@ -3970,7 +4053,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_do_flip(
crtc,
fb,
- drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
dm_state->context);
}
@@ -3999,6 +4082,19 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
}
+/**
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
+ */
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+{
+ stream_state->mode_changed = crtc_state->mode_changed;
+}
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -4038,11 +4134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
uint32_t i, j;
- uint32_t new_crtcs_count = 0;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
- struct dc_stream_state *new_stream = NULL;
unsigned long flags;
bool wait_for_vblank = true;
struct drm_connector *connector;
@@ -4072,6 +4165,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
+
/* handles headless hotplug case, updating new_state and
* aconnector as needed
*/
@@ -4101,25 +4200,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
}
-
if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
-
- /*
- * this loop saves set mode crtcs
- * we needed to enable vblanks once all
- * resources acquired in dc after dc_commit_streams
- */
-
- /*TODO move all this into dm_crtc_state, get rid of
- * new_crtcs array and use old and new atomic states
- * instead
- */
- new_crtcs[new_crtcs_count] = acrtc;
- new_crtcs_count++;
-
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
acrtc->enabled = true;
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
@@ -4137,31 +4220,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* are removed from freesync module
*/
if (adev->dm.freesync_module) {
- for (i = 0; i < new_crtcs_count; i++) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct amdgpu_dm_connector *aconnector = NULL;
+ struct dm_connector_state *dm_new_con_state = NULL;
+ struct amdgpu_crtc *acrtc = NULL;
+ bool modeset_needed;
- new_crtc_state = drm_atomic_get_new_crtc_state(state,
- &new_crtcs[i]->base);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ modeset_needed = modeset_required(
+ new_crtc_state,
+ dm_new_crtc_state->stream,
+ dm_old_crtc_state->stream);
+ /* We add stream to freesync if:
+ * 1. Said stream is not null, and
+ * 2. A modeset is requested. This means that the
+ * stream was removed previously, and needs to be
+ * replaced.
+ */
+ if (dm_new_crtc_state->stream == NULL ||
+ !modeset_needed)
+ continue;
- new_stream = dm_new_crtc_state->stream;
- aconnector = amdgpu_dm_find_first_crtc_matching_connector(
- state,
- &new_crtcs[i]->base);
+ acrtc = to_amdgpu_crtc(crtc);
+
+ aconnector =
+ amdgpu_dm_find_first_crtc_matching_connector(
+ state, crtc);
if (!aconnector) {
- DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
- "skipping freesync init\n",
- new_crtcs[i]->crtc_id);
+ DRM_DEBUG_DRIVER("Atomic commit: Failed to "
+ "find connector for acrtc "
+ "id:%d skipping freesync "
+ "init\n",
+ acrtc->crtc_id);
continue;
}
mod_freesync_add_stream(adev->dm.freesync_module,
- new_stream, &aconnector->caps);
+ dm_new_crtc_state->stream,
+ &aconnector->caps);
+ new_con_state = drm_atomic_get_new_connector_state(
+ state, &aconnector->base);
+ dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ mod_freesync_set_user_enable(adev->dm.freesync_module,
+ &dm_new_crtc_state->stream,
+ 1,
+ &dm_new_con_state->user_enable);
}
}
- if (dm_state->context)
+ if (dm_state->context) {
+ dm_enable_per_frame_crtc_master_sync(dm_state->context);
WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+ }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -4219,18 +4332,28 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
- for (i = 0; i < new_crtcs_count; i++) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
/*
* loop to enable interrupts on newly arrived crtc
*/
- struct amdgpu_crtc *acrtc = new_crtcs[i];
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ bool modeset_needed;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ modeset_needed = modeset_required(
+ new_crtc_state,
+ dm_new_crtc_state->stream,
+ dm_old_crtc_state->stream);
+
+ if (dm_new_crtc_state->stream == NULL || !modeset_needed)
+ continue;
if (adev->dm.freesync_module)
mod_freesync_notify_mode_change(
- adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+ adev->dm.freesync_module,
+ &dm_new_crtc_state->stream, 1);
manage_dm_interrupts(adev, acrtc, true);
}
@@ -4464,18 +4587,15 @@ static int dm_update_crtcs_state(struct dc *dc,
__func__, acrtc->base.base.id);
break;
}
- }
- if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
- dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
-
- new_crtc_state->mode_changed = false;
-
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
- new_crtc_state->mode_changed);
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
}
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
@@ -4532,6 +4652,7 @@ static int dm_update_crtcs_state(struct dc *dc,
WARN_ON(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = new_stream;
+
dc_stream_retain(new_stream);
DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
@@ -4553,6 +4674,30 @@ next_crtc:
/* Release extra reference */
if (new_stream)
dc_stream_release(new_stream);
+
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!enable || !aconnector || modereset_required(new_crtc_state))
+ continue;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're attempting to enable a CRTC. Which has a...
+ * 2. Valid connector attached, and
+ * 3. User does not want to reset it (disable or mark inactive,
+ * which can happen on a CRTC that's already disabled).
+ * => It currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+ /* Color managment settings */
+ if (dm_new_crtc_state->base.color_mgmt_changed) {
+ ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ amdgpu_dm_set_ctm(dm_new_crtc_state);
+ }
}
return ret;
@@ -4580,11 +4725,9 @@ static int dm_update_planes_state(struct dc *dc,
bool pflip_needed = !state->allow_modeset;
int ret = 0;
- if (pflip_needed)
- return ret;
- /* Add new planes */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ /* Add new planes, in reverse order as DC expectation */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
new_plane_crtc = new_plane_state->crtc;
old_plane_crtc = old_plane_state->crtc;
dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4596,6 +4739,8 @@ static int dm_update_planes_state(struct dc *dc,
/* Remove any changed/removed planes */
if (!enable) {
+ if (pflip_needed)
+ continue;
if (!old_plane_crtc)
continue;
@@ -4627,6 +4772,7 @@ static int dm_update_planes_state(struct dc *dc,
*lock_and_validation_needed = true;
} else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
continue;
@@ -4640,39 +4786,52 @@ static int dm_update_planes_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
continue;
+ if (pflip_needed)
+ continue;
WARN_ON(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
- if (!dm_new_plane_state->dc_state) {
- ret = -EINVAL;
- return ret;
- }
-
ret = fill_plane_attributes(
new_plane_crtc->dev->dev_private,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
new_plane_state,
- new_crtc_state,
- false);
- if (ret)
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
return ret;
+ }
-
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
if (!dc_add_plane_to_context(
dc,
dm_new_crtc_state->stream,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
dm_state->context)) {
- ret = -EINVAL;
- return ret;
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
}
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+
*lock_and_validation_needed = true;
}
}
@@ -4681,11 +4840,36 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
+static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_crtc_state *crtc_state;
+
+ WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(plane_state))
+ return -EDEADLK;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc->primary == plane && crtc_state->active) {
+ if (!plane_state->fb)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int i;
- int ret;
struct amdgpu_device *adev = dev->dev_private;
struct dc *dc = adev->dm.dc;
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
@@ -4693,6 +4877,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int ret, i;
/*
* This bool will be set for true for any modeset/reset
@@ -4704,37 +4889,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /*
- * legacy_cursor_update should be made false for SoC's having
- * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
- * otherwise for software cursor plane,
- * we should not add it to list of affected planes.
- */
- if (state->legacy_cursor_update) {
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->color_mgmt_changed) {
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto fail;
- }
- }
- } else {
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
- !new_crtc_state->color_mgmt_changed)
- continue;
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_atomic_check_plane_state_fb(state, crtc);
+ if (ret)
+ goto fail;
- if (!new_crtc_state->enable)
- continue;
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed)
+ continue;
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
+ if (!new_crtc_state->enable)
+ continue;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto fail;
- }
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
}
dm_state->context = dc_create_state();
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 0230250..b68400c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -85,8 +85,6 @@ struct amdgpu_display_manager {
struct dal *dal;
struct dc *dc;
struct cgs_device *cgs_device;
- /* lock to be used when DAL is called from SYNC IRQ context */
- spinlock_t dal_lock;
struct amdgpu_device *adev; /*AMD base driver*/
struct drm_device *ddev; /*DRM base driver*/
@@ -119,17 +117,6 @@ struct amdgpu_display_manager {
/* this spin lock synchronizes access to 'irq_handler_list_table' */
spinlock_t irq_handler_list_table_lock;
- /* Timer-related data. */
- struct list_head timer_handler_list;
- struct workqueue_struct *timer_workqueue;
-
- /* Use dal_mutex for any activity which is NOT syncronized by
- * DRM mode setting locks.
- * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
- * DRM mode setting locks being acquired. This is where dal_mutex
- * is acquired before calling into DAL. */
- struct mutex dal_mutex;
-
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
@@ -210,6 +197,9 @@ struct dm_plane_state {
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
+
+ int crc_skip_count;
+ bool crc_enabled;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
@@ -222,6 +212,18 @@ struct dm_atomic_state {
#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+struct dm_connector_state {
+ struct drm_connector_state base;
+
+ enum amdgpu_rmx_type scaling;
+ uint8_t underscan_vborder;
+ uint8_t underscan_hborder;
+ bool underscan_enable;
+ struct mod_freesync_user_enable user_enable;
+};
+
+#define to_dm_connector_state(x)\
+ container_of((x), struct dm_connector_state, base)
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
struct drm_connector_state *
@@ -256,6 +258,26 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
void
amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
+/* amdgpu_dm_crc.c */
+#ifdef CONFIG_DEBUG_FS
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt);
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
+#else
+#define amdgpu_dm_crtc_set_crc_source NULL
+#define amdgpu_dm_crtc_handle_crc_irq(x)
+#endif
+
+#define MAX_COLOR_LUT_ENTRIES 4096
+/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
+#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
+
+void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *dc_plane_state);
+void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc);
+int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc);
+
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
new file mode 100644
index 0000000..f6cb502
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_mode.h"
+#include "amdgpu_dm.h"
+#include "modules/color/color_gamma.h"
+
+#define MAX_DRM_LUT_VALUE 0xFFFF
+
+/*
+ * Initialize the color module.
+ *
+ * We're not using the full color module, only certain components.
+ * Only call setup functions for components that we need.
+ */
+void amdgpu_dm_init_color_mod(void)
+{
+ setup_x_points_distribution();
+}
+
+
+/*
+ * Return true if the given lut is a linear mapping of values, i.e. it acts
+ * like a bypass LUT.
+ *
+ * It is considered linear if the lut represents:
+ * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
+ * [0, MAX_COLOR_LUT_ENTRIES)
+ */
+static bool __is_lut_linear(struct drm_color_lut *lut, uint32_t size)
+{
+ int i;
+ uint32_t expected;
+ int delta;
+
+ for (i = 0; i < size; i++) {
+ /* All color values should equal */
+ if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue))
+ return false;
+
+ expected = i * MAX_DRM_LUT_VALUE / (size-1);
+
+ /* Allow a +/-1 error. */
+ delta = lut[i].red - expected;
+ if (delta < -1 || 1 < delta)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Convert the drm_color_lut to dc_gamma. The conversion depends on the size
+ * of the lut - whether or not it's legacy.
+ */
+static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
+ struct dc_gamma *gamma,
+ bool is_legacy)
+{
+ uint32_t r, g, b;
+ int i;
+
+ if (is_legacy) {
+ for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) {
+ r = drm_color_lut_extract(lut[i].red, 16);
+ g = drm_color_lut_extract(lut[i].green, 16);
+ b = drm_color_lut_extract(lut[i].blue, 16);
+
+ gamma->entries.red[i] = dal_fixed31_32_from_int(r);
+ gamma->entries.green[i] = dal_fixed31_32_from_int(g);
+ gamma->entries.blue[i] = dal_fixed31_32_from_int(b);
+ }
+ return;
+ }
+
+ /* else */
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ r = drm_color_lut_extract(lut[i].red, 16);
+ g = drm_color_lut_extract(lut[i].green, 16);
+ b = drm_color_lut_extract(lut[i].blue, 16);
+
+ gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE);
+ gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE);
+ gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE);
+ }
+}
+
+/**
+ * amdgpu_dm_set_regamma_lut: Set regamma lut for the given CRTC.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * Update the underlying dc_stream_state's output transfer function (OTF) in
+ * preparation for hardware commit. If no lut is specified by user, we default
+ * to SRGB.
+ *
+ * RETURNS:
+ * 0 on success, -ENOMEM if memory cannot be allocated to calculate the OTF.
+ */
+int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
+{
+ struct drm_property_blob *blob = crtc->base.gamma_lut;
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+ enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
+
+ bool ret;
+
+ if (!blob) {
+ /* By default, use the SRGB predefined curve.*/
+ stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ return 0;
+ }
+
+ lut = (struct drm_color_lut *)blob->data;
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+
+ if (__is_lut_linear(lut, lut_size)) {
+ /* Set to bypass if lut is set to linear */
+ stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
+ gamma->type = GAMMA_RGB_256;
+ else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CS_TFM_1D;
+ else {
+ /* Invalid lut size */
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
+
+ /* Convert drm_lut into dc_gamma */
+ __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
+
+ /* Call color module to translate into something DC understands. Namely
+ * a transfer function.
+ */
+ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
+ gamma, true);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ stream->out_transfer_func->type = old_type;
+ DRM_ERROR("Out of memory when calculating regamma params\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_set_ctm: Set the color transform matrix for the given CRTC.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * Update the underlying dc_stream_state's gamut remap matrix in preparation
+ * for hardware commit. If no matrix is specified by user, gamut remap will be
+ * disabled.
+ */
+void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
+{
+
+ struct drm_property_blob *blob = crtc->base.ctm;
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm;
+ int64_t val;
+ int i;
+
+ if (!blob) {
+ stream->gamut_remap_matrix.enable_remap = false;
+ return;
+ }
+
+ stream->gamut_remap_matrix.enable_remap = true;
+ ctm = (struct drm_color_ctm *)blob->data;
+ /*
+ * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating
+ * with homogeneous coordinates, augment the matrix with 0's.
+ *
+ * The format provided is S31.32, using signed-magnitude representation.
+ * Our fixed31_32 is also S31.32, but is using 2's complement. We have
+ * to convert from signed-magnitude to 2's complement.
+ */
+ for (i = 0; i < 12; i++) {
+ /* Skip 4th element */
+ if (i % 4 == 3) {
+ stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero;
+ continue;
+ }
+
+ /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
+ val = ctm->matrix[i - (i/4)];
+ /* If negative, convert to 2's complement. */
+ if (val & (1ULL << 63))
+ val = -(val & ~(1ULL << 63));
+
+ stream->gamut_remap_matrix.matrix[i].value = val;
+ }
+}
+
+
+/**
+ * amdgpu_dm_set_degamma_lut: Set degamma lut for the given CRTC.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * Update the underlying dc_stream_state's input transfer function (ITF) in
+ * preparation for hardware commit. If no lut is specified by user, we default
+ * to SRGB degamma.
+ *
+ * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
+ * Programmable degamma is not supported, and an attempt to do so will return
+ * -EINVAL.
+ *
+ * RETURNS:
+ * 0 on success, -EINVAL if custom degamma curve is given.
+ */
+int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_property_blob *blob = crtc_state->degamma_lut;
+ struct drm_color_lut *lut;
+
+ if (!blob) {
+ /* Default to SRGB */
+ dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
+ dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ return 0;
+ }
+
+ lut = (struct drm_color_lut *)blob->data;
+ if (__is_lut_linear(lut, MAX_COLOR_LUT_ENTRIES)) {
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ /* Otherwise, assume SRGB, since programmable degamma is not
+ * supported.
+ */
+ dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
+ dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
new file mode 100644
index 0000000..52f2c01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_crtc.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dc.h"
+
+enum amdgpu_dm_pipe_crc_source {
+ AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0,
+ AMDGPU_DM_PIPE_CRC_SOURCE_AUTO,
+ AMDGPU_DM_PIPE_CRC_SOURCE_MAX,
+ AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
+};
+
+static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
+{
+ if (!source || !strcmp(source, "none"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_NONE;
+ if (!strcmp(source, "auto"))
+ return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO;
+
+ return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
+}
+
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
+ struct dc_stream_state *stream_state = crtc_state->stream;
+
+ enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
+ src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ /* When enabling CRC, we should also disable dithering. */
+ if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
+ if (dc_stream_configure_crc(stream_state->ctx->dc,
+ stream_state,
+ true, true)) {
+ crtc_state->crc_enabled = true;
+ dc_stream_set_dither_option(stream_state,
+ DITHER_OPTION_TRUN8);
+ }
+ else
+ return -EINVAL;
+ } else {
+ if (dc_stream_configure_crc(stream_state->ctx->dc,
+ stream_state,
+ false, false)) {
+ crtc_state->crc_enabled = false;
+ dc_stream_set_dither_option(stream_state,
+ DITHER_OPTION_DEFAULT);
+ }
+ else
+ return -EINVAL;
+ }
+
+ *values_cnt = 3;
+ /* Reset crc_skipped on dm state */
+ crtc_state->crc_skip_count = 0;
+ return 0;
+}
+
+/**
+ * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC.
+ * @crtc: DRM CRTC object.
+ *
+ * This function should be called at the end of a vblank, when the fb has been
+ * fully processed through the pipe.
+ */
+void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
+ struct dc_stream_state *stream_state = crtc_state->stream;
+ uint32_t crcs[3];
+
+ /* Early return if CRC capture is not enabled. */
+ if (!crtc_state->crc_enabled)
+ return;
+
+ /*
+ * Since flipping and crc enablement happen asynchronously, we - more
+ * often than not - will be returning an 'uncooked' crc on first frame.
+ * Probably because hw isn't ready yet. For added security, skip the
+ * first two CRC values.
+ */
+ if (crtc_state->crc_skip_count < 2) {
+ crtc_state->crc_skip_count += 1;
+ return;
+ }
+
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ &crcs[0], &crcs[1], &crcs[2]))
+ return;
+
+ drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9bd142f..ca0b08b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
struct cea_sad *sad = &sads[i];
edid_caps->audio_modes[i].format_code = sad->format;
- edid_caps->audio_modes[i].channel_count = sad->channels;
+ edid_caps->audio_modes[i].channel_count = sad->channels + 1;
edid_caps->audio_modes[i].sample_rate = sad->freq;
edid_caps->audio_modes[i].sample_size = sad->byte2;
}
@@ -258,6 +258,15 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
return true;
}
+
+/*
+ * Clear payload allocation table before enable MST DP link.
+ */
+void dm_helpers_dp_mst_clear_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{}
+
/*
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
@@ -496,3 +505,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
return edid_status;
}
+
+void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
+{
+ /* TODO: something */
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index ca5d0d15..490017d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -51,11 +51,6 @@ struct amdgpu_dm_irq_handler_data {
enum dc_irq_source irq_source;
};
-struct amdgpu_dm_timer_handler_data {
- struct handler_common_data hcd;
- struct delayed_work d_work;
-};
-
#define DM_IRQ_TABLE_LOCK(adev, flags) \
spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
@@ -169,62 +164,6 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
return hnd_list;
}
-/* If 'handler_in == NULL' then remove ALL handlers. */
-static void remove_timer_handler(struct amdgpu_device *adev,
- struct amdgpu_dm_timer_handler_data *handler_in)
-{
- struct amdgpu_dm_timer_handler_data *handler_temp;
- struct list_head *handler_list;
- struct list_head *entry, *tmp;
- unsigned long irq_table_flags;
- bool handler_removed = false;
-
- DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-
- handler_list = &adev->dm.timer_handler_list;
-
- list_for_each_safe(entry, tmp, handler_list) {
- /* Note that list_for_each_safe() guarantees that
- * handler_temp is NOT null. */
- handler_temp = list_entry(entry,
- struct amdgpu_dm_timer_handler_data, hcd.list);
-
- if (handler_in == NULL || handler_in == handler_temp) {
- list_del(&handler_temp->hcd.list);
- DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
- handler_temp);
-
- if (handler_in == NULL) {
- /* Since it is still in the queue, it must
- * be cancelled. */
- cancel_delayed_work_sync(&handler_temp->d_work);
- }
-
- kfree(handler_temp);
- handler_removed = true;
-
- DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
- }
-
- /* Remove ALL handlers. */
- if (handler_in == NULL)
- continue;
-
- /* Remove a SPECIFIC handler.
- * Found our handler - we can stop here. */
- if (handler_in == handler_temp)
- break;
- }
-
- DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-
- if (handler_in != NULL && handler_removed == false)
- DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
- handler_in);
-}
-
static bool
validate_irq_registration_params(struct dc_interrupt_params *int_params,
void (*ih)(void *))
@@ -382,16 +321,6 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
}
- INIT_LIST_HEAD(&adev->dm.timer_handler_list);
-
- /* allocate and initialize the workqueue for DM timer */
- adev->dm.timer_workqueue = create_singlethread_workqueue(
- "dm_timer_queue");
- if (adev->dm.timer_workqueue == NULL) {
- DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
- return -1;
- }
-
return 0;
}
@@ -410,11 +339,6 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
lh = &adev->dm.irq_handler_list_low_tab[src];
flush_work(&lh->work);
}
-
- /* Cancel ALL timers and release handlers (if any). */
- remove_timer_handler(adev, NULL);
- /* Release the queue itself. */
- destroy_workqueue(adev->dm.timer_workqueue);
}
int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
@@ -683,13 +607,14 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+
+ adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 638c2c2..8291d74 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,17 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
I2C_MOT_TRUE : I2C_MOT_FALSE;
enum ddc_result res;
+ ssize_t read_bytes;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- res = dal_ddc_service_read_dpcd_data(
+ read_bytes = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
false,
I2C_MOT_UNDEF,
msg->address,
msg->buffer,
msg->size);
- break;
+ return read_bytes;
case DP_AUX_NATIVE_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -104,14 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
msg->size);
break;
case DP_AUX_I2C_READ:
- res = dal_ddc_service_read_dpcd_data(
+ read_bytes = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
true,
mot,
msg->address,
msg->buffer,
msg->size);
- break;
+ return read_bytes;
case DP_AUX_I2C_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -174,17 +178,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
};
-static int dm_connector_update_modes(struct drm_connector *connector,
- struct edid *edid)
-{
- int ret;
-
- ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
-
- return ret;
-}
-
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -194,6 +187,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ /*
+ * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
+ */
+ if (!aconnector->port || !aconnector->port->aux.ddc.algo)
+ return;
+
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
@@ -227,7 +226,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
int ret = 0;
if (!aconnector)
- return dm_connector_update_modes(connector, NULL);
+ return drm_add_edid_modes(connector, NULL);
if (!aconnector->edid) {
struct edid *edid;
@@ -263,7 +262,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
&aconnector->base, edid);
}
- ret = dm_connector_update_modes(connector, aconnector->edid);
+ ret = drm_add_edid_modes(connector, aconnector->edid);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5df8fd5..89342b4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -41,6 +41,10 @@ unsigned long long dm_get_timestamp(struct dc_context *ctx)
return 0;
}
+void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
+{
+}
+
bool dm_write_persistent_data(struct dc_context *ctx,
const struct dc_sink *sink,
const char *module_name,
@@ -67,15 +71,6 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-bool dm_pp_pre_dce_clock_change(
- struct dc_context *ctx,
- struct dm_pp_gpu_clock_range *requested_state,
- struct dm_pp_gpu_clock_range *actual_state)
-{
- /*TODO*/
- return false;
-}
-
bool dm_pp_apply_display_requirements(
const struct dc_context *ctx,
const struct dm_pp_display_configuration *pp_display_cfg)
@@ -131,11 +126,12 @@ bool dm_pp_apply_display_requirements(
adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
/* TODO: complete implementation of
- * amd_powerplay_display_configuration_change().
+ * pp_display_configuration_change().
* Follow example of:
* PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
* PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
- amd_powerplay_display_configuration_change(
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
@@ -146,30 +142,6 @@ bool dm_pp_apply_display_requirements(
return true;
}
-bool dc_service_get_system_clocks_range(
- const struct dc_context *ctx,
- struct dm_pp_gpu_clock_range *sys_clks)
-{
- struct amdgpu_device *adev = ctx->driver_context;
-
- /* Default values, in case PPLib is not compiled-in. */
- sys_clks->mclk.max_khz = 800000;
- sys_clks->mclk.min_khz = 800000;
-
- sys_clks->sclk.max_khz = 600000;
- sys_clks->sclk.min_khz = 300000;
-
- if (adev->pm.dpm_enabled) {
- sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
- sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
-
- sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
- sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
- }
-
- return true;
-}
-
static void get_default_clock_levels(
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels *clks)
@@ -264,22 +236,26 @@ bool dm_pp_get_clock_levels_by_type(
struct amd_pp_simple_clock_info validation_clks = { 0 };
uint32_t i;
- if (amd_powerplay_get_clock_by_type(pp_handle,
+ if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
- get_default_clock_levels(clk_type, dc_clks);
- return true;
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
}
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
- if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
- &validation_clks)) {
- /* Error in pplib. Provide default values. */
- DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
- validation_clks.engine_max_clock = 72000;
- validation_clks.memory_max_clock = 80000;
- validation_clks.level = 0;
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+ pp_handle, &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
}
DRM_INFO("DM_PPLIB: Validation clocks:\n");
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 6af8c8a..bca33bd 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,7 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 23c9a0e..3109649 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -46,7 +46,7 @@ uint16_t fixed_point_to_int_frac(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
- numerator = (uint16_t)dal_fixed31_32_floor(
+ numerator = (uint16_t)dal_fixed31_32_round(
dal_fixed31_32_mul_int(
arg,
divisor));
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 2693689..8a9bba8 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -554,6 +554,22 @@ static inline uint32_t ux_dy(
return result | fractional_part;
}
+static inline uint32_t clamp_ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits,
+ uint32_t min_clamp)
+{
+ uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
uint32_t dal_fixed31_32_u2d19(
struct fixed31_32 arg)
{
@@ -565,3 +581,24 @@ uint32_t dal_fixed31_32_u0d19(
{
return ux_dy(arg.value, 0, 19);
}
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
+
+int32_t dal_fixed31_32_s4d19(
+ struct fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
+ else
+ return ux_dy(arg.value, 4, 19);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
index 6e43168..854678a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -83,15 +83,11 @@ void dc_conn_log(struct dc_context *ctx,
link->link_index);
va_start(args, msg);
- entry.buf_offset += dm_log_to_buffer(
- &entry.buf[entry.buf_offset],
- LOG_MAX_LINE_SIZE - entry.buf_offset,
- msg, args);
+ dm_logger_append_va(&entry, msg, args);
- if (entry.buf[strlen(entry.buf) - 1] == '\n') {
- entry.buf[strlen(entry.buf) - 1] = '\0';
+ if (entry.buf_offset > 0 &&
+ entry.buf[entry.buf_offset - 1] == '\n')
entry.buf_offset--;
- }
if (hex_data)
for (i = 0; i < hex_data_count; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
index e04e8ec..31bee05 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/logger.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -60,7 +60,8 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
{LOG_EVENT_LINK_LOSS, "LinkLoss"},
{LOG_EVENT_UNDERFLOW, "Underflow"},
{LOG_IF_TRACE, "InterfaceTrace"},
- {LOG_DTN, "DTN"}
+ {LOG_DTN, "DTN"},
+ {LOG_PROFILING, "Profiling"}
};
@@ -70,9 +71,8 @@ static bool construct(struct dc_context *ctx, struct dal_logger *logger,
{
/* malloc buffer and init offsets */
logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
- logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
- GFP_KERNEL);
-
+ logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
+ GFP_KERNEL);
if (!logger->log_buffer)
return false;
@@ -313,6 +313,18 @@ void dm_logger_append(
const char *msg,
...)
{
+ va_list args;
+
+ va_start(args, msg);
+ dm_logger_append_va(entry, msg, args);
+ va_end(args);
+}
+
+void dm_logger_append_va(
+ struct log_entry *entry,
+ const char *msg,
+ va_list args)
+{
struct dal_logger *logger;
if (!entry) {
@@ -326,11 +338,8 @@ void dm_logger_append(
dal_logger_should_log(logger, entry->type)) {
uint32_t size;
- va_list args;
char buffer[LOG_MAX_LINE_SIZE];
- va_start(args, msg);
-
size = dm_log_to_buffer(
buffer, LOG_MAX_LINE_SIZE, msg, args);
@@ -339,8 +348,6 @@ void dm_logger_append(
} else {
append_entry(entry, "LOG_ERROR, line too long\n", 27);
}
-
- va_end(args);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 86e6438..c7f0b27 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -49,6 +49,9 @@
#define LAST_RECORD_TYPE 0xff
+#define DC_LOGGER \
+ bp->base.ctx->logger
+
/* GUID to validate external display connection info table (aka OPM module) */
static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
0x91, 0x6E, 0x57, 0x09,
@@ -190,6 +193,7 @@ static struct graphics_object_id bios_parser_get_connector_id(
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct graphics_object_id object_id = dal_graphics_object_id_init(
0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ uint16_t id;
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
@@ -197,12 +201,19 @@ static struct graphics_object_id bios_parser_get_connector_id(
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
- if (tbl && tbl->ucNumberOfObjects > i) {
- const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ if (!tbl) {
+ dm_error("Can't get connector table from atom bios.\n");
+ return object_id;
+ }
- object_id = object_id_from_bios_object_id(id);
+ if (tbl->ucNumberOfObjects <= i) {
+ dm_error("Can't find connector id %d in connector table of size %d.\n",
+ i, tbl->ucNumberOfObjects);
+ return object_id;
}
+ id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ object_id = object_id_from_bios_object_id(id);
return object_id;
}
@@ -2254,6 +2265,52 @@ static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
return BP_RESULT_OK;
}
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
struct graphics_object_id id)
{
@@ -3025,8 +3082,7 @@ static enum bp_result patch_bios_image_from_ext_display_connection_info(
opm_object,
&ext_display_connection_info_tbl) != BP_RESULT_OK) {
- dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
- "%s: Failed to read Connection Info Table", __func__);
+ DC_LOG_WARNING("%s: Failed to read Connection Info Table", __func__);
return BP_RESULT_UNSUPPORTED;
}
@@ -3741,14 +3797,11 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_gpio_pin_info = bios_parser_get_gpio_pin_info,
- .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
-
- .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
-
.get_encoder_cap_info = bios_parser_get_encoder_cap_info,
/* bios scratch register communication */
.is_accelerated_mode = bios_is_accelerated_mode,
+ .get_vga_enabled_displays = bios_get_vga_enabled_displays,
.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 1ee1717..985fe8c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -44,7 +44,7 @@
#include "bios_parser_common.h"
#define LAST_RECORD_TYPE 0xff
-
+#define SMU9_SYSPLL0_ID 0
struct i2c_id_config_access {
uint8_t bfI2C_LineMux:4;
@@ -1220,7 +1220,7 @@ static unsigned int bios_parser_get_smu_clock_info(
if (!bp->cmd_tbl.get_smu_clock_info)
return BP_RESULT_FAILURE;
- return bp->cmd_tbl.get_smu_clock_info(bp);
+ return bp->cmd_tbl.get_smu_clock_info(bp, 0);
}
static enum bp_result bios_parser_program_crtc_timing(
@@ -1280,6 +1280,12 @@ static bool bios_parser_is_accelerated_mode(
return bios_is_accelerated_mode(dcb);
}
+static uint32_t bios_parser_get_vga_enabled_displays(
+ struct dc_bios *bios)
+{
+ return bios_get_vga_enabled_displays(bios);
+}
+
/**
* bios_parser_set_scratch_critical_state
@@ -1315,6 +1321,7 @@ static enum bp_result bios_parser_get_firmware_info(
case 3:
switch (revision.minor) {
case 1:
+ case 2:
result = get_firmware_info_v3_1(bp, info);
break;
default:
@@ -1370,7 +1377,7 @@ static enum bp_result get_firmware_info_v3_1(
if (bp->cmd_tbl.get_smu_clock_info != NULL) {
/* VBIOS gives in 10KHz */
info->smu_gpu_pll_output_freq =
- bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
}
return BP_RESULT_OK;
@@ -1800,6 +1807,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_accelerated_mode = bios_parser_is_accelerated_mode,
+ .get_vga_enabled_displays = bios_parser_get_vga_enabled_displays,
.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index 5c9e510..d458947 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -78,5 +78,13 @@ void bios_set_scratch_critical_state(
REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
}
+uint32_t bios_get_vga_enabled_displays(
+ struct dc_bios *bios)
+{
+ uint32_t active_disp = 1;
+ if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+ active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
+ return active_disp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index c0047ef..75a29e6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -34,6 +34,7 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 3f7b2da..4b5fdd5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,6 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -910,6 +911,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_output_to_console("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -1227,6 +1230,8 @@ static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
enable_spread_spectrum_on_ppll_v3;
break;
default:
+ dm_output_to_console("Don't have enable_spread_spectrum_on_ppll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
break;
}
@@ -1422,6 +1427,8 @@ static void init_adjust_display_pll(struct bios_parser *bp)
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
break;
default:
+ dm_output_to_console("Don't have adjust_display_pll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
bp->cmd_tbl.adjust_display_pll = NULL;
break;
}
@@ -1695,6 +1702,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_output_to_console("Don't have set_crtc_timing for dtd v%d\n",
+ dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1704,6 +1713,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
break;
default:
+ dm_output_to_console("Don't have set_crtc_timing for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1890,6 +1901,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -1997,6 +2010,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_output_to_console("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -2103,6 +2118,8 @@ static void init_program_clock(struct bios_parser *bp)
bp->cmd_tbl.program_clock = program_clock_v6;
break;
default:
+ dm_output_to_console("Don't have program_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.program_clock = NULL;
break;
}
@@ -2324,6 +2341,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -2371,6 +2390,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_output_to_console("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index ba68693..3f63f71 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -34,6 +34,8 @@
#include "command_table_helper2.h"
#include "bios_parser_helper.h"
#include "bios_parser_types_internal2.h"
+#define DC_LOGGER \
+ bp->base.ctx->logger
#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
(((char *)(&((\
@@ -118,6 +120,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
+ dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = NULL;
break;
}
@@ -205,6 +208,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -237,8 +241,7 @@ static enum bp_result transmitter_control_v1_6(
if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
- dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
- "%s:ps.param.symclk_10khz = %d\n",\
+ DC_LOG_BIOS("%s:ps.param.symclk_10khz = %d\n",\
__func__, ps.param.symclk_10khz);
}
@@ -268,6 +271,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_output_to_console("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -327,8 +332,7 @@ static enum bp_result set_pixel_clock_v7(
(uint8_t) bp->cmd_helper->
transmitter_color_depth_to_atom(
bp_params->color_depth);
- dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
- "%s:program display clock = %d"\
+ DC_LOG_BIOS("%s:program display clock = %d"\
"colorDepth = %d\n", __func__,\
bp_params->target_pixel_clock, bp_params->color_depth);
@@ -379,6 +383,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -498,6 +503,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -565,6 +572,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_output_to_console("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -661,6 +670,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -710,6 +721,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_output_to_console("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
@@ -759,8 +772,7 @@ static enum bp_result set_dce_clock_v2_1(
*/
params.param.dceclk_10khz = cpu_to_le32(
bp_params->target_clock_frequency / 10);
- dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
- "%s:target_clock_frequency = %d"\
+ DC_LOG_BIOS("%s:target_clock_frequency = %d"\
"clock_type = %d \n", __func__,\
bp_params->target_clock_frequency,\
bp_params->clock_type);
@@ -784,7 +796,7 @@ static enum bp_result set_dce_clock_v2_1(
******************************************************************************
*****************************************************************************/
-static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id);
static void init_get_smu_clock_info(struct bios_parser *bp)
{
@@ -793,12 +805,13 @@ static void init_get_smu_clock_info(struct bios_parser *bp)
}
-static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
{
struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+ smu_input.syspll_id = id;
/* Get Specific Clock */
if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index 59061b8..ec1c0c9 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,7 @@ struct cmd_tbl {
struct bios_parser *bp,
struct bp_set_dce_clock_parameters *bp_params);
unsigned int (*get_smu_clock_info)(
- struct bios_parser *bp);
+ struct bios_parser *bp, uint8_t id);
};
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index 1fab634..4c3789df 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -29,38 +29,7 @@
#include "dce80/command_table_helper_dce80.h"
#include "dce110/command_table_helper_dce110.h"
#include "dce112/command_table_helper_dce112.h"
-
-struct command_table_helper {
- bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
- uint8_t (*encoder_action_to_atom)(
- enum bp_encoder_control_action action);
- uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
- bool enable_dp_audio);
- bool (*engine_bp_to_atom)(enum engine_id engine_id,
- uint32_t *atom_engine_id);
- void (*assign_control_parameter)(
- const struct command_table_helper *h,
- struct bp_encoder_control *control,
- DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
- bool (*clock_source_id_to_atom)(enum clock_source_id id,
- uint32_t *atom_pll_id);
- bool (*clock_source_id_to_ref_clk_src)(
- enum clock_source_id id,
- uint32_t *ref_clk_src_id);
- uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
- uint8_t (*encoder_id_to_atom)(enum encoder_id id);
- uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
- enum clock_source_id id);
- uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
- uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
- uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
- uint8_t (*phy_id_to_atom)(enum transmitter t);
- uint8_t (*disp_power_gating_action_to_atom)(
- enum bp_pipe_control_action action);
- bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
- uint32_t *atom_clock_type);
- uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
-};
+#include "command_table_helper_struct.h"
bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
enum dce_version dce);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
index 9f587c9..785fcb2 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -29,35 +29,7 @@
#include "dce80/command_table_helper_dce80.h"
#include "dce110/command_table_helper_dce110.h"
#include "dce112/command_table_helper2_dce112.h"
-
-struct command_table_helper {
- bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
- uint8_t (*encoder_action_to_atom)(
- enum bp_encoder_control_action action);
- uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
- bool enable_dp_audio);
- bool (*engine_bp_to_atom)(enum engine_id engine_id,
- uint32_t *atom_engine_id);
- bool (*clock_source_id_to_atom)(enum clock_source_id id,
- uint32_t *atom_pll_id);
- bool (*clock_source_id_to_ref_clk_src)(
- enum clock_source_id id,
- uint32_t *ref_clk_src_id);
- uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
- uint8_t (*encoder_id_to_atom)(enum encoder_id id);
- uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
- enum clock_source_id id);
- uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
- uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
- uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
- uint8_t (*phy_id_to_atom)(enum transmitter t);
- uint8_t (*disp_power_gating_action_to_atom)(
- enum bp_pipe_control_action action);
- bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
- uint32_t *atom_clock_type);
- uint8_t (*transmitter_color_depth_to_atom)(
- enum transmitter_color_depth id);
-};
+#include "command_table_helper_struct.h"
bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
enum dce_version dce);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h
new file mode 100644
index 0000000..1f2c0a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_STRUCT_H__
+#define __DAL_COMMAND_TABLE_HELPER_STRUCT_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct _DIG_ENCODER_CONTROL_PARAMETERS_V2;
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ void (*assign_control_parameter)(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 7959e38..95f332e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -24,9 +24,17 @@
# It calculates Bandwidth and Watermarks values for HW programming
#
-CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align := -mstack-alignment=16
+endif
+
+calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+
+CFLAGS_dcn_calcs.o := $(calcs_ccflags)
+CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
+CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6347712..0cbab81 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -29,6 +29,15 @@
#include "core_types.h"
#include "dal_asic_id.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*******************************************************************************
* Private Functions
******************************************************************************/
@@ -76,7 +85,6 @@ static void calculate_bandwidth(
const uint32_t s_mid5 = 5;
const uint32_t s_mid6 = 6;
const uint32_t s_high = 7;
- const uint32_t bus_efficiency = 1;
const uint32_t dmif_chunk_buff_margin = 1;
uint32_t max_chunks_fbc_mode;
@@ -583,7 +591,12 @@ static void calculate_bandwidth(
/* 1 = use channel 0 and 1*/
/* 2 = use channel 0,1,2,3*/
if ((fbc_enabled == 1 && lpt_enabled == 1)) {
- data->dram_efficiency = bw_int_to_fixed(1);
+ if (vbios->memory_type == bw_def_hbm)
+ data->dram_efficiency = bw_frc_to_fixed(5, 10);
+ else
+ data->dram_efficiency = bw_int_to_fixed(1);
+
+
if (dceip->low_power_tiling_mode == 0) {
data->number_of_dram_channels = 1;
}
@@ -598,7 +611,10 @@ static void calculate_bandwidth(
}
}
else {
- data->dram_efficiency = bw_frc_to_fixed(8, 10);
+ if (vbios->memory_type == bw_def_hbm)
+ data->dram_efficiency = bw_frc_to_fixed(5, 10);
+ else
+ data->dram_efficiency = bw_frc_to_fixed(8, 10);
}
/*memory request size and latency hiding:*/
/*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
@@ -614,7 +630,7 @@ static void calculate_bandwidth(
}
else {
/*graphics portrait tiling mode*/
- if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+ if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
@@ -625,7 +641,7 @@ static void calculate_bandwidth(
else {
if ((i < 4)) {
/*underlay landscape tiling mode is only supported*/
- if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+ if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
@@ -634,7 +650,7 @@ static void calculate_bandwidth(
}
else {
/*graphics landscape tiling mode*/
- if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+ if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) {
data->orthogonal_rotation[i] = 0;
}
else {
@@ -938,14 +954,14 @@ static void calculate_bandwidth(
}
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
- if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+ if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) {
/*set maximum chunk limit if only one graphic pipe is enabled*/
data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
}
else {
data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
/*clamp maximum chunk limit in the graphic display pipe*/
- if ((i >= 4)) {
+ if (i >= 4) {
data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
}
}
@@ -1162,9 +1178,9 @@ static void calculate_bandwidth(
}
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
- data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))));
if (data->d1_display_write_back_dwb_enable == 1) {
- data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width))));
}
}
}
@@ -1249,6 +1265,16 @@ static void calculate_bandwidth(
/* / (dispclk - display bw)*/
/*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
/*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+
+ /*initialize variables*/
+ number_of_displays_enabled = 0;
+ number_of_displays_enabled_with_margin = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ number_of_displays_enabled = number_of_displays_enabled + 1;
+ }
+ data->display_pstate_change_enable[k] = 0;
+ }
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
@@ -1267,7 +1293,10 @@ static void calculate_bandwidth(
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
- data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ if (number_of_displays_enabled > 2)
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ else
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
}
else {
data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
@@ -1328,25 +1357,16 @@ static void calculate_bandwidth(
/*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
if (data->enable[i]) {
- if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
- data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1) {
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
}
else {
/*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
- data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency));
}
data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
}
}
- /*initialize variables*/
- number_of_displays_enabled = 0;
- number_of_displays_enabled_with_margin = 0;
- for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
- if (data->enable[k]) {
- number_of_displays_enabled = number_of_displays_enabled + 1;
- }
- data->display_pstate_change_enable[k] = 0;
- }
for (i = 0; i <= 2; i++) {
for (j = 0; j <= 7; j++) {
data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
@@ -1361,10 +1381,11 @@ static void calculate_bandwidth(
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
- data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
- if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
@@ -1374,10 +1395,11 @@ static void calculate_bandwidth(
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
- data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
- if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]);
}
}
}
@@ -1387,7 +1409,7 @@ static void calculate_bandwidth(
}
/*determine the number of displays with margin to switch in the v_active region*/
for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
- if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+ if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) {
number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
}
}
@@ -1411,7 +1433,7 @@ static void calculate_bandwidth(
data->displays_with_same_mode[i] = bw_int_to_fixed(0);
if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
- if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+ if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
}
}
@@ -1426,19 +1448,38 @@ static void calculate_bandwidth(
/*aligned displays with the same timing.*/
/*the display(s) with the negative margin can be switched in the v_blank region while the other*/
/*displays are in v_blank or v_active.*/
- if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+ if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) {
data->nbp_state_change_enable = bw_def_yes;
}
else {
data->nbp_state_change_enable = bw_def_no;
}
/*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
- if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+ if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) {
nbp_state_change_enable_blank = bw_def_yes;
}
else {
nbp_state_change_enable_blank = bw_def_no;
}
+
+ /*average bandwidth*/
+ /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+ /*the average bandwidth with compression is the same, divided by the compression ratio*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+ }
+ }
+ data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+ data->total_average_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+ data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+ }
+ }
+
/*required yclk(pclk)*/
/*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
/*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
@@ -1461,7 +1502,7 @@ static void calculate_bandwidth(
}
}
/*compute minimum time to read one chunk from the dmif buffer*/
- if ((number_of_displays_enabled > 2)) {
+ if (number_of_displays_enabled > 2) {
data->chunk_request_delay = 0;
}
else {
@@ -1488,17 +1529,20 @@ static void calculate_bandwidth(
}
else {
data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
- if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+ if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->low_yclk);
data->y_clk_level = low;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
- else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
yclk_message = bw_fixed_to_int(vbios->mid_yclk);
data->y_clk_level = mid;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
}
- else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels)))
+ && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
yclk_message = bw_fixed_to_int(vbios->high_yclk);
data->y_clk_level = high;
data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
@@ -1514,8 +1558,8 @@ static void calculate_bandwidth(
/*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
/*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
/*for dmif, pte and cursor requests have to be included.*/
- data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
- data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
+ data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width);
if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
data->required_sclk = bw_int_to_fixed(9999);
sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
@@ -1528,42 +1572,56 @@ static void calculate_bandwidth(
}
else {
data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
- if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+ if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_low;
data->sclk_level = s_low;
data->required_sclk = vbios->low_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid1;
data->required_sclk = vbios->mid1_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid2;
data->required_sclk = vbios->mid2_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid3;
data->required_sclk = vbios->mid3_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid4;
data->required_sclk = vbios->mid4_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid5;
data->required_sclk = vbios->mid5_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
sclk_message = bw_def_mid;
data->sclk_level = s_mid6;
data->required_sclk = vbios->mid6_sclk;
}
- else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+ else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_high])) {
+ sclk_message = bw_def_high;
+ data->sclk_level = s_high;
+ data->required_sclk = vbios->high_sclk;
+ }
+ else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width))
+ && bw_ltn(data->required_sclk, sclk[s_high])) {
sclk_message = bw_def_high;
data->sclk_level = s_high;
data->required_sclk = vbios->high_sclk;
@@ -1672,7 +1730,7 @@ static void calculate_bandwidth(
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
}
- if (data->nbp_state_change_enable == bw_def_yes) {
+ if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) {
data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
}
@@ -1795,7 +1853,7 @@ static void calculate_bandwidth(
data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
/*unconditionally remove black out time from the nb p_state watermark*/
- if ((data->display_pstate_change_enable[i] == 1)) {
+ if (data->display_pstate_change_enable[i] == 1) {
data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
}
else {
@@ -1807,7 +1865,7 @@ static void calculate_bandwidth(
data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
- if ((data->display_pstate_change_enable[i] == 1)) {
+ if (data->display_pstate_change_enable[i] == 1) {
data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
}
else {
@@ -1852,23 +1910,6 @@ static void calculate_bandwidth(
else {
data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
}
- /*average bandwidth*/
- /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
- /*the average bandwidth with compression is the same, divided by the compression ratio*/
- for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
- if (data->enable[i]) {
- data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
- data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
- }
- }
- data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
- data->total_average_bandwidth = bw_int_to_fixed(0);
- for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
- if (data->enable[i]) {
- data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
- data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
- }
- }
/*stutter efficiency*/
/*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
/*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
@@ -1896,7 +1937,7 @@ static void calculate_bandwidth(
data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
}
}
- data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+ data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width));
data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
data->time_in_self_refresh = data->min_stutter_refresh_duration;
@@ -1948,7 +1989,7 @@ static void calculate_bandwidth(
for (i = 1; i <= 5; i++) {
data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
- data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))));
}
else {
data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
@@ -2024,9 +2065,12 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.cursor_width = 32;
vbios.average_compression_rate = 4;
vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(18); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2137,6 +2181,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2250,6 +2297,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2357,9 +2407,12 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.cursor_width = 32;
vbios.average_compression_rate = 4;
vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(18); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
dceip.dmif_pipe_en_fbc_chunk_tracker = false;
@@ -2470,6 +2523,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
vbios.blackout_duration = bw_int_to_fixed(0); /* us */
vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
dceip.large_cursor = false;
dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
dceip.dmif_pipe_en_fbc_chunk_tracker = true;
@@ -2588,6 +2644,7 @@ static void populate_initial_data(
data->graphics_tiling_mode = bw_def_tiled;
data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+ data->increase_voltage_to_support_mclk_switch = true;
/* Pipes with underlay first */
for (i = 0; i < pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 626f9cf..5e2ea12 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -27,6 +27,15 @@
#include "dcn_calc_auto.h"
#include "dcn_calc_math.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*REVISION#250*/
void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
{
@@ -773,11 +782,11 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->dst_y_after_scaler = 0.0;
}
v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
- v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
- v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
- v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
- v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k];
v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
if (v->pte_enable == dcn_bw_yes) {
v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
index b6abe0f..7600a4a 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -25,37 +25,44 @@
#include "dcn_calc_math.h"
+#define isNaN(number) ((number) != (number))
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
float dcn_bw_mod(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int) (arg1 / arg2));
}
float dcn_bw_min2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 < arg2 ? arg1 : arg2;
}
unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
{
- if (arg1 != arg1)
- return arg2;
- if (arg2 != arg2)
- return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
float dcn_bw_max2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index b142629..4bb43a3 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -33,6 +33,17 @@
#include "dcn10/dcn10_resource.h"
#include "dcn_calc_math.h"
+#define DC_LOGGER \
+ dc->ctx->logger
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/* Defaults from spreadsheet rev#247 */
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
@@ -432,25 +443,13 @@ static void dcn_bw_calc_rq_dlg_ttu(
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard;
- switch (v->output_deep_color[in_idx]) {
- case dcn_bw_encoder_12bpc:
- input.dout.output_bpc = dm_out_12;
- break;
- case dcn_bw_encoder_10bpc:
- input.dout.output_bpc = dm_out_10;
- break;
- case dcn_bw_encoder_8bpc:
- default:
- input.dout.output_bpc = dm_out_8;
- break;
- }
/*todo: soc->sr_enter_plus_exit_time??*/
dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
@@ -489,6 +488,7 @@ static void split_stream_across_pipes(
secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
if (primary_pipe->bottom_pipe) {
ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
@@ -628,7 +628,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
return updated;
}
-void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
+static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
{
/*
* disable optional pipe split by lower dispclk bounding box
@@ -637,7 +637,7 @@ void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
v->max_dispclk[0] = v->max_dppclk_vmin0p65;
}
-void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
unsigned int pixel_rate_khz)
{
float pixel_rate_mhz = pixel_rate_khz / 1000;
@@ -650,25 +650,20 @@ void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
v->max_dppclk[0] = pixel_rate_mhz;
}
-void hack_bounding_box(struct dcn_bw_internal_vars *v,
+static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug *dbg,
struct dc_state *context)
{
- if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) {
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
hack_disable_optional_pipe_split(v);
- }
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
- context->stream_count >= 2) {
+ context->stream_count >= 2)
hack_disable_optional_pipe_split(v);
- }
if (context->stream_count == 1 &&
- dbg->force_single_disp_pipe_split) {
- struct dc_stream_state *stream0 = context->streams[0];
-
- hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
- }
+ dbg->force_single_disp_pipe_split)
+ hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_khz);
}
bool dcn_validate_bandwidth(
@@ -802,23 +797,10 @@ bool dcn_validate_bandwidth(
v->phyclk_per_state[2] = v->phyclkv_nom0p8;
v->phyclk_per_state[1] = v->phyclkv_mid0p72;
v->phyclk_per_state[0] = v->phyclkv_min0p65;
-
- hack_bounding_box(v, &dc->debug, context);
-
- if (v->voltage_override == dcn_bw_v_max0p9) {
- v->voltage_override_level = number_of_states - 1;
- } else if (v->voltage_override == dcn_bw_v_nom0p8) {
- v->voltage_override_level = number_of_states - 2;
- } else if (v->voltage_override == dcn_bw_v_mid0p72) {
- v->voltage_override_level = number_of_states - 3;
- } else {
- v->voltage_override_level = 0;
- }
v->synchronized_vblank = dcn_bw_no;
v->ta_pscalculation = dcn_bw_override;
v->allow_different_hratio_vratio = dcn_bw_yes;
-
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -890,6 +872,17 @@ bool dcn_validate_bandwidth(
+ pipe->bottom_pipe->plane_res.scl_data.recout.width;
}
+ if (pipe->plane_state->rotation % 2 == 0) {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
+ } else {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
+ }
v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
@@ -940,8 +933,19 @@ bool dcn_validate_bandwidth(
v->number_of_active_planes = input_idx;
scaler_settings_calculation(v);
+
+ hack_bounding_box(v, &dc->debug, context);
+
mode_support_and_system_configuration(v);
+ /* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */
+ if (v->voltage_level != 0
+ && context->stream_count == 1
+ && dc->debug.force_single_disp_pipe_split) {
+ v->max_dppclk[0] = v->max_dppclk_vmin0p65;
+ mode_support_and_system_configuration(v);
+ }
+
if (v->voltage_level == 0 &&
(dc->debug.sr_exit_time_dpm0_ns
|| dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
@@ -979,8 +983,6 @@ bool dcn_validate_bandwidth(
context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
- context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
@@ -994,7 +996,26 @@ bool dcn_validate_bandwidth(
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
+ context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+
+ switch (v->voltage_level) {
+ case 0:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
+ break;
+ case 1:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
+ break;
+ case 2:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
+ break;
+ default:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
+ break;
+ }
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1006,9 +1027,9 @@ bool dcn_validate_bandwidth(
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
- pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1034,6 +1055,8 @@ bool dcn_validate_bandwidth(
if (pipe->plane_state) {
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+ pipe->plane_state->update_flags.bits.full_update = 1;
+
if (v->dpp_per_plane[input_idx] == 2 ||
((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -1045,9 +1068,9 @@ bool dcn_validate_bandwidth(
TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe */
- hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1073,6 +1096,9 @@ bool dcn_validate_bandwidth(
hsplit_pipe->stream = NULL;
hsplit_pipe->top_pipe = NULL;
hsplit_pipe->bottom_pipe = NULL;
+ /* Clear plane_res and stream_res */
+ memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
+ memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
resource_build_scaling_params(pipe);
}
/* for now important to do this after pipe split for building e2e params */
@@ -1235,45 +1261,66 @@ unsigned int dcn_find_dcfclk_suits_all(
else
dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
- "\tdcf_clk for voltage = %d\n", dcf_clk);
+ DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
return dcf_clk;
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
void dcn_bw_update_from_pplib(struct dc *dc)
{
struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage clks = {0};
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ bool res;
kernel_fpu_begin();
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
-
- if (dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
- clks.num_levels != 0) {
- ASSERT(clks.num_levels >= 3);
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
- if (clks.num_levels > 2) {
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
- (clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
- } else {
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
- (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
- }
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res) {
+ ASSERT(fclks.num_levels >= 3);
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
- (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
- (clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
} else
BREAK_TO_DEBUGGER();
- if (dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
- clks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res && dcfclks.num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
} else
BREAK_TO_DEBUGGER();
@@ -1412,8 +1459,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
kernel_fpu_begin();
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
- "sr_exit_time: %d ns\n"
+ DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
"sr_enter_plus_exit_time: %d ns\n"
"urgent_latency: %d ns\n"
"write_back_latency: %d ns\n"
@@ -1481,8 +1527,7 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dcn_soc->vmm_page_size,
dc->dcn_soc->dram_clock_change_latency * 1000,
dc->dcn_soc->return_bus_width);
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
- "rob_buffer_size_in_kbyte: %d\n"
+ DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
"det_buffer_size_in_kbyte: %d\n"
"dpp_output_buffer_pixels: %d\n"
"opp_output_buffer_lines: %d\n"
@@ -1550,35 +1595,6 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
dc->dcn_ip->dcfclk_cstate_latency);
- dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
-
- dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
- dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
- dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
- dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
-
- dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
- dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
- dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
- dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
-
- dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
- dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
- dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
- dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
-
- dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
- dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
- dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
- dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
-
- dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
- dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
- dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
- dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7240db2..63a3d46 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -29,6 +29,7 @@
#include "core_status.h"
#include "core_types.h"
#include "hw_sequencer.h"
+#include "dce/dce_hwseq.h"
#include "resource.h"
@@ -38,8 +39,10 @@
#include "bios_parser_interface.h"
#include "include/irq_service_interface.h"
#include "transform.h"
+#include "dmcu.h"
#include "dpp.h"
#include "timing_generator.h"
+#include "abm.h"
#include "virtual/virtual_link_encoder.h"
#include "link_hwss.h"
@@ -49,11 +52,20 @@
#include "dm_helpers.h"
#include "mem_input.h"
#include "hubp.h"
+#define DC_LOGGER \
+ dc->ctx->logger
/*******************************************************************************
* Private functions
******************************************************************************/
+
+static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+{
+ if (new > *original)
+ *original = new;
+}
+
static void destroy_links(struct dc *dc)
{
uint32_t i;
@@ -157,7 +169,7 @@ failed_alloc:
return false;
}
-static bool stream_adjust_vmin_vmax(struct dc *dc,
+bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state **streams, int num_streams,
int vmin, int vmax)
{
@@ -182,7 +194,7 @@ static bool stream_adjust_vmin_vmax(struct dc *dc,
return ret;
}
-static bool stream_get_crtc_position(struct dc *dc,
+bool dc_stream_get_crtc_position(struct dc *dc,
struct dc_stream_state **streams, int num_streams,
unsigned int *v_pos, unsigned int *nom_v_pos)
{
@@ -207,136 +219,92 @@ static bool stream_get_crtc_position(struct dc *dc,
return ret;
}
-static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
-{
- int i = 0;
- bool ret = false;
- struct pipe_ctx *pipes;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
- pipes = &dc->current_state->res_ctx.pipe_ctx[i];
- dc->hwss.program_gamut_remap(pipes);
- ret = true;
- }
- }
-
- return ret;
-}
-
-static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+/**
+ * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * @dc: DC Object
+ * @stream: The stream to configure CRC on.
+ * @enable: Enable CRC if true, disable otherwise.
+ * @continuous: Capture CRC on every frame if true. Otherwise, only capture
+ * once.
+ *
+ * By default, only CRC0 is configured, and the entire frame is used to
+ * calculate the crc.
+ */
+bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
+ bool enable, bool continuous)
{
- int i = 0;
- bool ret = false;
- struct pipe_ctx *pipes;
+ int i;
+ struct pipe_ctx *pipe;
+ struct crc_params param;
+ struct timing_generator *tg;
for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
-
- pipes = &dc->current_state->res_ctx.pipe_ctx[i];
- dc->hwss.program_csc_matrix(pipes,
- stream->output_color_space,
- stream->csc_color_matrix.matrix);
- ret = true;
- }
- }
-
- return ret;
-}
-
-static void set_static_screen_events(struct dc *dc,
- struct dc_stream_state **streams,
- int num_streams,
- const struct dc_static_screen_events *events)
-{
- int i = 0;
- int j = 0;
- struct pipe_ctx *pipes_affected[MAX_PIPES];
- int num_pipes_affected = 0;
-
- for (i = 0; i < num_streams; i++) {
- struct dc_stream_state *stream = streams[i];
-
- for (j = 0; j < MAX_PIPES; j++) {
- if (dc->current_state->res_ctx.pipe_ctx[j].stream
- == stream) {
- pipes_affected[num_pipes_affected++] =
- &dc->current_state->res_ctx.pipe_ctx[j];
- }
- }
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream)
+ break;
}
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
- dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ /* Always capture the full frame */
+ param.windowa_x_start = 0;
+ param.windowa_y_start = 0;
+ param.windowa_x_end = pipe->stream->timing.h_addressable;
+ param.windowa_y_end = pipe->stream->timing.v_addressable;
+ param.windowb_x_start = 0;
+ param.windowb_y_start = 0;
+ param.windowb_x_end = pipe->stream->timing.h_addressable;
+ param.windowb_y_end = pipe->stream->timing.v_addressable;
+
+ /* Default to the union of both windows */
+ param.selection = UNION_WINDOW_A_B;
+ param.continuous_mode = continuous;
+ param.enable = enable;
+
+ tg = pipe->stream_res.tg;
+
+ /* Only call if supported */
+ if (tg->funcs->configure_crc)
+ return tg->funcs->configure_crc(tg, &param);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
}
-static void set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
+/**
+ * dc_stream_get_crc: Get CRC values for the given stream.
+ * @dc: DC object
+ * @stream: The DC stream state of the stream to get CRCs from.
+ * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
+ *
+ * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
+ * Return false if stream is not found, or if CRCs are not enabled.
+ */
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
-
int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i] == link)
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream)
break;
}
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
-}
-
-static void perform_link_training(struct dc *dc,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern)
-{
- int i;
-
- for (i = 0; i < dc->link_count; i++)
- dc_link_dp_perform_link_training(
- dc->links[i],
- link_setting,
- skip_video_pattern);
-}
-
-static void set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
-{
- link->preferred_link_setting = *link_setting;
- dp_retrain_link_dp_test(link, link_setting, false);
-}
-
-static void enable_hpd(const struct dc_link *link)
-{
- dc_link_dp_enable_hpd(link);
-}
-
-static void disable_hpd(const struct dc_link *link)
-{
- dc_link_dp_disable_hpd(link);
-}
-
+ tg = pipe->stream_res.tg;
-static void set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
+ if (tg->funcs->get_crc)
+ return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
}
-static void set_dither_option(struct dc_stream_state *stream,
+void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
struct bit_depth_reduction_params params;
@@ -352,7 +320,6 @@ static void set_dither_option(struct dc_stream_state *stream,
}
}
- memset(&params, 0, sizeof(params));
if (!pipes)
return;
if (option > DITHER_OPTION_MAX)
@@ -360,85 +327,45 @@ static void set_dither_option(struct dc_stream_state *stream,
stream->dither_option = option;
- resource_build_bit_depth_reduction_params(stream,
- &params);
+ memset(&params, 0, sizeof(params));
+ resource_build_bit_depth_reduction_params(stream, &params);
stream->bit_depth_params = params;
- pipes->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
-}
-
-void set_dpms(
- struct dc *dc,
- struct dc_stream_state *stream,
- bool dpms_off)
-{
- struct pipe_ctx *pipe_ctx = NULL;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
- pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- break;
- }
- }
- if (!pipe_ctx) {
- ASSERT(0);
- return;
+ if (pipes->plane_res.xfm &&
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipes->plane_res.xfm,
+ pipes->plane_res.scl_data.lb_params.depth,
+ &stream->bit_depth_params);
}
- if (stream->dpms_off != dpms_off) {
- stream->dpms_off = dpms_off;
- if (dpms_off)
- core_link_disable_stream(pipe_ctx,
- KEEP_ACQUIRED_RESOURCE);
- else
- core_link_enable_stream(dc->current_state, pipe_ctx);
- }
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
}
-static void allocate_dc_stream_funcs(struct dc *dc)
+void dc_stream_set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_events *events)
{
- if (dc->hwss.set_drr != NULL) {
- dc->stream_funcs.adjust_vmin_vmax =
- stream_adjust_vmin_vmax;
- }
-
- dc->stream_funcs.set_static_screen_events =
- set_static_screen_events;
-
- dc->stream_funcs.get_crtc_position =
- stream_get_crtc_position;
-
- dc->stream_funcs.set_gamut_remap =
- set_gamut_remap;
-
- dc->stream_funcs.program_csc_matrix =
- program_csc_matrix;
-
- dc->stream_funcs.set_dither_option =
- set_dither_option;
-
- dc->stream_funcs.set_dpms =
- set_dpms;
-
- dc->link_funcs.set_drive_settings =
- set_drive_settings;
-
- dc->link_funcs.perform_link_training =
- perform_link_training;
-
- dc->link_funcs.set_preferred_link_settings =
- set_preferred_link_settings;
+ int i = 0;
+ int j = 0;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
- dc->link_funcs.enable_hpd =
- enable_hpd;
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
- dc->link_funcs.disable_hpd =
- disable_hpd;
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
- dc->link_funcs.set_test_pattern =
- set_test_pattern;
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
static void destruct(struct dc *dc)
@@ -485,19 +412,17 @@ static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dal_logger *logger;
- struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
- GFP_KERNEL);
- struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
- GFP_KERNEL);
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
- struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
- GFP_KERNEL);
- struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
goto fail;
@@ -505,6 +430,7 @@ static bool construct(struct dc *dc,
dc->bw_dceip = dc_dceip;
+ dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
if (!dc_vbios) {
dm_error("%s: failed to create vbios\n", __func__);
goto fail;
@@ -512,6 +438,7 @@ static bool construct(struct dc *dc,
dc->bw_vbios = dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
goto fail;
@@ -519,6 +446,7 @@ static bool construct(struct dc *dc,
dc->dcn_soc = dcn_soc;
+ dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
if (!dcn_ip) {
dm_error("%s: failed to create dcn_ip\n", __func__);
goto fail;
@@ -527,11 +455,18 @@ static bool construct(struct dc *dc,
dc->dcn_ip = dcn_ip;
#endif
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
if (!dc_ctx) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc->ctx = dc_ctx;
+
dc->current_state = dc_create_state();
if (!dc->current_state) {
@@ -539,11 +474,6 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
-
/* Create logger */
logger = dal_logger_create(dc_ctx, init_params->log_mask);
@@ -553,14 +483,11 @@ static bool construct(struct dc *dc,
goto fail;
}
dc_ctx->logger = logger;
- dc->ctx = dc_ctx;
- dc->ctx->dce_environment = init_params->dce_environment;
+ dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
- dc->ctx->dce_version = dc_version;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
-#endif
+ dc_ctx->dce_version = dc_version;
+
/* Resource should construct all asic specific resources.
* This should be the only place where we need to parse the asic id
*/
@@ -616,8 +543,6 @@ static bool construct(struct dc *dc,
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
- allocate_dc_stream_funcs(dc);
-
return true;
fail:
@@ -686,11 +611,17 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->caps.max_links = dc->link_count;
dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
+
+ /* Populate versioning information */
+ dc->versions.dc_ver = DC_VER;
+
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
dc->config = init_params->flags;
- dm_logger_write(dc->ctx->logger, LOG_DC,
- "Display Core initialized\n");
+ DC_LOG_DC("Display Core initialized\n");
/* TODO: missing feature to be enabled */
@@ -712,6 +643,30 @@ void dc_destroy(struct dc **dc)
*dc = NULL;
}
+static void enable_timing_multisync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i = 0, multisync_count = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream ||
+ !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
+ continue;
+ if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
+ continue;
+ multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
+ multisync_count++;
+ }
+
+ if (multisync_count > 0) {
+ dc->hwss.enable_per_frame_crtc_position_reset(
+ dc, multisync_count, multisync_pipes);
+ }
+}
+
static void program_timing_sync(
struct dc *dc,
struct dc_state *ctx)
@@ -758,7 +713,7 @@ static void program_timing_sync(
for (j = 0; j < group_size; j++) {
struct pipe_ctx *temp;
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
if (j == 0)
break;
@@ -771,7 +726,7 @@ static void program_timing_sync(
/* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -828,7 +783,6 @@ bool dc_enable_stereo(
return ret;
}
-
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
@@ -838,7 +792,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct dc_bios *dcb = dc->ctx->dc_bios;
enum dc_status result = DC_ERROR_UNEXPECTED;
struct pipe_ctx *pipe;
- int i, j, k, l;
+ int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
disable_dangling_plane(dc, context);
@@ -847,11 +801,48 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_streams[i] = context->streams[i];
if (!dcb->funcs->is_accelerated_mode(dcb))
- dc->hwss.enable_accelerated_mode(dc);
+ dc->hwss.enable_accelerated_mode(dc, context);
+
+ dc->hwss.set_bandwidth(dc, context, false);
+
+ /* re-program planes for existing stream, in case we need to
+ * free up plane resource for later use
+ */
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
+
+ /* Program hardware */
+ dc->hwss.ready_shared_resources(dc, context);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (result != DC_OK)
+ return result;
+
+ if (context->stream_count > 1) {
+ enable_timing_multisync(dc, context);
+ program_timing_sync(dc, context);
+ }
+
+ /* Program all planes within new context*/
for (i = 0; i < context->stream_count; i++) {
const struct dc_sink *sink = context->streams[i]->sink;
+ if (!context->streams[i]->mode_changed)
+ continue;
+
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
@@ -880,26 +871,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->streams[i]->timing.pix_clk_khz);
}
- dc->hwss.ready_shared_resources(dc, context);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
- dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
- }
- result = dc->hwss.apply_ctx_to_hw(dc, context);
-
- program_timing_sync(dc, context);
-
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < MAX_PIPES; j++) {
- pipe = &context->res_ctx.pipe_ctx[j];
-
- if (!pipe->top_pipe && pipe->stream == context->streams[i])
- dc->hwss.pipe_control_lock(dc, pipe, false);
- }
- }
+ /* pplib is notified if disp_num changed */
+ dc->hwss.set_bandwidth(dc, context, true);
dc_release_state(dc->current_state);
@@ -920,7 +895,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
if (false == context_changed(dc, context))
return DC_OK;
- dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+ DC_LOG_DC("%s: %d streams\n",
__func__, context->stream_count);
for (i = 0; i < context->stream_count; i++) {
@@ -936,7 +911,6 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -945,9 +919,13 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == NULL
- || context->res_ctx.pipe_ctx[i].plane_state == NULL)
- dc->hwss.power_down_front_end(dc, i);
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ }
+
+ dc->optimized_required = false;
/* 3rd param should be true, temp w/a for RV*/
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1014,6 +992,7 @@ bool dc_commit_planes_to_stream(
flip_addr[i].address = plane_states[i]->address;
flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].input_tf = plane_states[i]->input_tf;
plane_info[i].format = plane_states[i]->format;
plane_info[i].plane_size = plane_states[i]->plane_size;
plane_info[i].rotation = plane_states[i]->rotation;
@@ -1118,79 +1097,105 @@ static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
}
}
-static enum surface_update_type get_plane_info_update_type(
- const struct dc_surface_update *u,
- int surface_index)
+static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
{
- struct dc_plane_info temp_plane_info;
- memset(&temp_plane_info, 0, sizeof(temp_plane_info));
+ union surface_update_flags *update_flags = &u->surface->update_flags;
if (!u->plane_info)
return UPDATE_TYPE_FAST;
- temp_plane_info = *u->plane_info;
+ if (u->plane_info->color_space != u->surface->color_space)
+ update_flags->bits.color_space_change = 1;
- /* Copy all parameters that will cause a full update
- * from current surface, the rest of the parameters
- * from provided plane configuration.
- * Perform memory compare and special validation
- * for those that can cause fast/medium updates
- */
+ if (u->plane_info->input_tf != u->surface->input_tf)
+ update_flags->bits.input_tf_change = 1;
- /* Full update parameters */
- temp_plane_info.color_space = u->surface->color_space;
- temp_plane_info.dcc = u->surface->dcc;
- temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
- temp_plane_info.plane_size = u->surface->plane_size;
- temp_plane_info.rotation = u->surface->rotation;
- temp_plane_info.stereo_format = u->surface->stereo_format;
-
- if (surface_index == 0)
- temp_plane_info.visible = u->plane_info->visible;
- else
- temp_plane_info.visible = u->surface->visible;
-
- if (memcmp(u->plane_info, &temp_plane_info,
- sizeof(struct dc_plane_info)) != 0)
- return UPDATE_TYPE_FULL;
+ if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
+ update_flags->bits.horizontal_mirror_change = 1;
+
+ if (u->plane_info->rotation != u->surface->rotation)
+ update_flags->bits.rotation_change = 1;
+
+ if (u->plane_info->format != u->surface->format)
+ update_flags->bits.pixel_format_change = 1;
+
+ if (u->plane_info->stereo_format != u->surface->stereo_format)
+ update_flags->bits.stereo_format_change = 1;
+
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
+ update_flags->bits.per_pixel_alpha_change = 1;
+
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ update_flags->bits.dcc_change = 1;
if (pixel_format_to_bpp(u->plane_info->format) !=
- pixel_format_to_bpp(u->surface->format)) {
+ pixel_format_to_bpp(u->surface->format))
/* different bytes per element will require full bandwidth
* and DML calculation
*/
- return UPDATE_TYPE_FULL;
- }
+ update_flags->bits.bpp_change = 1;
+
+ if (u->gamma && dce_use_lut(u->plane_info->format))
+ update_flags->bits.gamma_change = 1;
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
sizeof(union dc_tiling_info)) != 0) {
+ update_flags->bits.swizzle_change = 1;
/* todo: below are HW dependent, we should add a hook to
* DCE/N resource and validated there.
*/
- if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
/* swizzled mode requires RQ to be setup properly,
* thus need to run DML to calculate RQ settings
*/
- return UPDATE_TYPE_FULL;
- }
+ update_flags->bits.bandwidth_change = 1;
}
+ if (update_flags->bits.rotation_change
+ || update_flags->bits.stereo_format_change
+ || update_flags->bits.pixel_format_change
+ || update_flags->bits.gamma_change
+ || update_flags->bits.bpp_change
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.output_tf_change)
+ return UPDATE_TYPE_FULL;
+
return UPDATE_TYPE_MED;
}
-static enum surface_update_type get_scaling_info_update_type(
+static enum surface_update_type get_scaling_info_update_type(
const struct dc_surface_update *u)
{
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
if (!u->scaling_info)
return UPDATE_TYPE_FAST;
- if (u->scaling_info->src_rect.width != u->surface->src_rect.width
- || u->scaling_info->src_rect.height != u->surface->src_rect.height
- || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
- || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
- return UPDATE_TYPE_FULL;
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
+ update_flags->bits.scaling_change = 1;
+
+ if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
+ && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
+ /* Making dst rect smaller requires a bandwidth change */
+ update_flags->bits.bandwidth_change = 1;
+ }
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
+
+ update_flags->bits.scaling_change = 1;
+ if (u->scaling_info->src_rect.width > u->surface->src_rect.width
+ && u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ /* Making src rect bigger requires a bandwidth change */
+ update_flags->bits.clock_change = 1;
+ }
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
@@ -1198,41 +1203,56 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ update_flags->bits.position_change = 1;
+
+ if (update_flags->bits.clock_change
+ || update_flags->bits.bandwidth_change)
+ return UPDATE_TYPE_FULL;
+
+ if (update_flags->bits.scaling_change
+ || update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
}
-static enum surface_update_type det_surface_update(
- const struct dc *dc,
- const struct dc_surface_update *u,
- int surface_index)
+static enum surface_update_type det_surface_update(const struct dc *dc,
+ const struct dc_surface_update *u)
{
const struct dc_state *context = dc->current_state;
- enum surface_update_type type = UPDATE_TYPE_FAST;
+ enum surface_update_type type;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface))
+ update_flags->raw = 0; // Reset all flags
+
+ if (!is_surface_in_context(context, u->surface)) {
+ update_flags->bits.new_plane = 1;
return UPDATE_TYPE_FULL;
+ }
- type = get_plane_info_update_type(u, surface_index);
- if (overall_type < type)
- overall_type = type;
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
type = get_scaling_info_update_type(u);
- if (overall_type < type)
- overall_type = type;
+ elevate_update_type(&overall_type, type);
- if (u->in_transfer_func ||
- u->hdr_static_metadata) {
- if (overall_type < UPDATE_TYPE_MED)
- overall_type = UPDATE_TYPE_MED;
+ if (u->in_transfer_func)
+ update_flags->bits.in_transfer_func_change = 1;
+
+ if (u->input_csc_color_matrix)
+ update_flags->bits.input_csc_change = 1;
+
+ if (update_flags->bits.in_transfer_func_change
+ || update_flags->bits.input_csc_change) {
+ type = UPDATE_TYPE_MED;
+ elevate_update_type(&overall_type, type);
}
return overall_type;
}
-enum surface_update_type dc_check_update_surfaces_for_stream(
+static enum surface_update_type check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
int surface_count,
@@ -1250,18 +1270,38 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
- det_surface_update(dc, &updates[i], i);
+ det_surface_update(dc, &updates[i]);
if (type == UPDATE_TYPE_FULL)
return type;
- if (overall_type < type)
- overall_type = type;
+ elevate_update_type(&overall_type, type);
}
return overall_type;
}
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type type;
+
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0;
+
+ type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
+ if (type == UPDATE_TYPE_FULL)
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.bits.full_update = 1;
+
+ return type;
+}
+
static struct dc_stream_status *stream_get_status(
struct dc_state *ctx,
struct dc_stream_state *stream)
@@ -1289,20 +1329,13 @@ static void commit_planes_for_stream(struct dc *dc,
struct dc_state *context)
{
int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
if (update_type == UPDATE_TYPE_FULL) {
dc->hwss.set_bandwidth(dc, context, false);
context_clock_trace(dc, context);
}
- if (update_type > UPDATE_TYPE_FAST) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
- }
- }
-
if (surface_count == 0) {
/*
* In case of turning off screen, no need to program front end a second time.
@@ -1312,103 +1345,83 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
- /* Lock pipes for provided surfaces, or all active if full update*/
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *plane_state = srf_updates[i].surface;
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
- continue;
- if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
- continue;
-
- dc->hwss.pipe_control_lock(
- dc,
- pipe_ctx,
- true);
- }
- if (update_type == UPDATE_TYPE_FULL)
- break;
- }
-
/* Full fe update*/
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
- continue;
+ if (!pipe_ctx->top_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ struct dc_stream_status *stream_status = NULL;
- if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
- struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+ top_pipe_to_program = pipe_ctx;
+
+ if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
+ continue;
+
+ stream_status =
+ stream_get_status(context, pipe_ctx->stream);
dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
+
+ if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
+ // if otg funcs defined check if blanked before programming
+ if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ } else
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
}
}
- if (update_type > UPDATE_TYPE_FAST)
+ if (update_type == UPDATE_TYPE_FULL)
context_timing_trace(dc, &context->res_ctx);
- /* Perform requested Updates */
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *plane_state = srf_updates[i].surface;
-
- if (update_type == UPDATE_TYPE_MED)
- dc->hwss.apply_ctx_for_surface(
- dc, stream, surface_count, context);
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != plane_state)
- continue;
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ if (update_type == UPDATE_TYPE_FAST) {
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
- if (srf_updates[i].flip_addr)
- dc->hwss.update_plane_addr(dc, pipe_ctx);
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
- if (update_type == UPDATE_TYPE_FAST)
- continue;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- /* work around to program degamma regs for split pipe after set mode. */
- if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
- dc->hwss.set_input_transfer_func(
- pipe_ctx, pipe_ctx->plane_state);
+ if (pipe_ctx->stream != stream)
+ continue;
- if (stream_update != NULL &&
- stream_update->out_transfer_func != NULL) {
- dc->hwss.set_output_transfer_func(
- pipe_ctx, pipe_ctx->stream);
- }
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
- if (srf_updates[i].hdr_static_metadata) {
- resource_build_info_frame(pipe_ctx);
- dc->hwss.update_info_frame(pipe_ctx);
+ if (srf_updates[i].flip_addr)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
- /* Unlock pipes */
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
- for (j = 0; j < surface_count; j++) {
- if (update_type != UPDATE_TYPE_FULL &&
- srf_updates[j].surface != pipe_ctx->plane_state)
- continue;
- if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ if (pipe_ctx->stream != stream)
continue;
- dc->hwss.pipe_control_lock(
- dc,
- pipe_ctx,
- false);
-
- break;
+ if (stream_update->hdr_static_metadata) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
}
- }
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -1480,10 +1493,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
stream_update,
update_type,
context);
-
- if (update_type >= UPDATE_TYPE_FULL)
- dc_post_update_surfaces_to_stream(dc);
-
+ /*update current_State*/
if (dc->current_state != context) {
struct dc_state *old = dc->current_state;
@@ -1492,6 +1502,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_release_state(old);
}
+ /*let's use current_state to update watermark etc*/
+ if (update_type >= UPDATE_TYPE_FULL)
+ dc_post_update_surfaces_to_stream(dc);
return;
@@ -1517,13 +1530,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
if (dc == NULL)
- return;
+ return false;
- dal_irq_service_set(dc->res_pool->irqs, src, enable);
+ return dal_irq_service_set(dc->res_pool->irqs, src, enable);
}
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
@@ -1646,12 +1659,17 @@ struct dc_sink *dc_link_add_remote_sink(
&dc_sink->dc_edid,
&dc_sink->edid_caps);
- if (edid_status != EDID_OK)
- goto fail;
+ /*
+ * Treat device as no EDID device if EDID
+ * parsing fails
+ */
+ if (edid_status != EDID_OK) {
+ dc_sink->dc_edid.length = 0;
+ dm_error("Bad EDID, status%d!\n", edid_status);
+ }
return dc_sink;
-fail:
- dc_link_remove_remote_sink(link, dc_sink);
+
fail_add_sink:
dc_sink_release(dc_sink);
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 43c7a7f..5a552cb3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,26 +36,22 @@
#include "hw_sequencer.h"
#include "resource.h"
+#define DC_LOGGER \
+ logger
#define SURFACE_TRACE(...) do {\
if (dc->debug.surface_trace) \
- dm_logger_write(logger, \
- LOG_IF_TRACE, \
- ##__VA_ARGS__); \
+ DC_LOG_IF_TRACE(__VA_ARGS__); \
} while (0)
#define TIMING_TRACE(...) do {\
if (dc->debug.timing_trace) \
- dm_logger_write(logger, \
- LOG_SYNC, \
- ##__VA_ARGS__); \
+ DC_LOG_SYNC(__VA_ARGS__); \
} while (0)
#define CLOCK_TRACE(...) do {\
if (dc->debug.clock_trace) \
- dm_logger_write(logger, \
- LOG_BANDWIDTH_CALCS, \
- ##__VA_ARGS__); \
+ DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
} while (0)
void pre_surface_trace(
@@ -159,6 +155,7 @@ void pre_surface_trace(
"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
"plane_state->tiling_info.gfx8.array_mode = %d;\n"
"plane_state->color_space = %d;\n"
+ "plane_state->input_tf = %d;\n"
"plane_state->dcc.enable = %d;\n"
"plane_state->format = %d;\n"
"plane_state->rotation = %d;\n"
@@ -166,6 +163,7 @@ void pre_surface_trace(
plane_state->tiling_info.gfx8.pipe_config,
plane_state->tiling_info.gfx8.array_mode,
plane_state->color_space,
+ plane_state->input_tf,
plane_state->dcc.enable,
plane_state->format,
plane_state->rotation,
@@ -206,6 +204,7 @@ void update_surface_trace(
if (update->plane_info) {
SURFACE_TRACE(
"plane_info->color_space = %d;\n"
+ "plane_info->input_tf = %d;\n"
"plane_info->format = %d;\n"
"plane_info->plane_size.grph.surface_pitch = %d;\n"
"plane_info->plane_size.grph.surface_size.height = %d;\n"
@@ -214,6 +213,7 @@ void update_surface_trace(
"plane_info->plane_size.grph.surface_size.y = %d;\n"
"plane_info->rotation = %d;\n",
update->plane_info->color_space,
+ update->plane_info->input_tf,
update->plane_info->format,
update->plane_info->plane_size.grph.surface_pitch,
update->plane_info->plane_size.grph.surface_size.height,
@@ -357,25 +357,20 @@ void context_clock_trace(
struct dc *core_dc = dc;
struct dal_logger *logger = core_dc->ctx->logger;
- CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
- "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
- "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dppclk_khz,
context->bw.dcn.calc_clk.dcfclk_khz,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.dram_ccm_us,
- context->bw.dcn.calc_clk.min_active_dram_ccm_us);
- CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
- "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
- "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.socclk_khz);
+ CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dppclk_khz,
context->bw.dcn.calc_clk.dcfclk_khz,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.dram_ccm_us,
- context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ context->bw.dcn.calc_clk.fclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 71993d5..ebc96b7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -28,6 +28,8 @@
#include "timing_generator.h"
#include "hw_sequencer.h"
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
/* used as index in array of black_color_format */
enum black_color_format {
BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
@@ -38,6 +40,15 @@ enum black_color_format {
BLACK_COLOR_FORMAT_DEBUG,
};
+enum dc_color_space_type {
+ COLOR_SPACE_RGB_TYPE,
+ COLOR_SPACE_RGB_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR601_TYPE,
+ COLOR_SPACE_YCBCR709_TYPE,
+ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR709_LIMITED_TYPE
+};
+
static const struct tg_color black_color_format[] = {
/* BlackColorFormat_RGB_FullRange */
{0, 0, 0},
@@ -53,6 +64,140 @@ static const struct tg_color black_color_format[] = {
{0xff, 0xff, 0},
};
+struct out_csc_color_matrix_type {
+ enum dc_color_space_type color_space_type;
+ uint16_t regval[12];
+};
+
+static const struct out_csc_color_matrix_type output_csc_matrix[] = {
+ { COLOR_SPACE_RGB_TYPE,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { COLOR_SPACE_RGB_LIMITED_TYPE,
+ { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { COLOR_SPACE_YCBCR601_TYPE,
+ { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
+ 0xF6B7, 0xE04, 0x1004} },
+ { COLOR_SPACE_YCBCR709_TYPE,
+ { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
+ 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+
+ /* TODO: correct values below */
+ { COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+ { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+};
+
+static bool is_rgb_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_XR_RGB ||
+ color_space == COLOR_SPACE_MSREF_SCRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_ADOBERGB ||
+ color_space == COLOR_SPACE_DCIP3 ||
+ color_space == COLOR_SPACE_DOLBYVISION)
+ ret = true;
+ return ret;
+}
+
+static bool is_rgb_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_XV_YCC_601)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_XV_YCC_709)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ ret = true;
+ return ret;
+}
+enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+{
+ enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
+
+ if (is_rgb_type(color_space))
+ type = COLOR_SPACE_RGB_TYPE;
+ else if (is_rgb_limited_type(color_space))
+ type = COLOR_SPACE_RGB_LIMITED_TYPE;
+ else if (is_ycbcr601_type(color_space))
+ type = COLOR_SPACE_YCBCR601_TYPE;
+ else if (is_ycbcr709_type(color_space))
+ type = COLOR_SPACE_YCBCR709_TYPE;
+ else if (is_ycbcr601_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
+ else if (is_ycbcr709_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
+
+ return type;
+}
+
+const uint16_t *find_color_matrix(enum dc_color_space color_space,
+ uint32_t *array_size)
+{
+ int i;
+ enum dc_color_space_type type;
+ const uint16_t *val = NULL;
+ int arr_size = NUM_ELEMENTS(output_csc_matrix);
+
+ type = get_color_space_type(color_space);
+ for (i = 0; i < arr_size; i++)
+ if (output_csc_matrix[i].color_space_type == type) {
+ val = output_csc_matrix[i].regval;
+ *array_size = 12;
+ break;
+ }
+
+ return val;
+}
+
+
void color_space_to_black_color(
const struct dc *dc,
enum dc_color_space colorspace,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 42a111b..eeb0447 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,9 +45,11 @@
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_enum.h"
#include "dce/dce_11_0_sh_mask.h"
+#define DC_LOGGER \
+ dc_ctx->logger
#define LINK_INFO(...) \
- dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+ DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
/*******************************************************************************
@@ -126,6 +128,8 @@ static bool program_hpd_filter(
int delay_on_connect_in_ms = 0;
int delay_on_disconnect_in_ms = 0;
+ if (link->is_hpd_filter_disabled)
+ return false;
/* Verify feature is supported */
switch (link->connector_signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -464,7 +468,7 @@ static void link_disconnect_sink(struct dc_link *link)
link->dpcd_sink_count = 0;
}
-static void detect_dp(
+static bool detect_dp(
struct dc_link *link,
struct display_sink_capability *sink_caps,
bool *converter_disable_audio,
@@ -478,7 +482,8 @@ static void detect_dp(
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- detect_dp_sink_caps(link);
+ if (!detect_dp_sink_caps(link))
+ return false;
if (is_mst_supported(link)) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
@@ -529,7 +534,7 @@ static void detect_dp(
* active dongle unplug processing for short irq
*/
link_disconnect_sink(link);
- return;
+ return true;
}
if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
@@ -541,6 +546,8 @@ static void detect_dp(
sink_caps,
audio_support);
}
+
+ return true;
}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -604,11 +611,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_DISPLAY_PORT: {
- detect_dp(
+ if (!detect_dp(
link,
&sink_caps,
&converter_disable_audio,
- aud_support, reason);
+ aud_support, reason))
+ return false;
/* Active dongle downstream unplug */
if (link->type == dc_connection_active_dongle
@@ -671,14 +679,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
switch (edid_status) {
case EDID_BAD_CHECKSUM:
- dm_logger_write(link->ctx->logger, LOG_ERROR,
- "EDID checksum invalid.\n");
+ DC_LOG_ERROR("EDID checksum invalid.\n");
break;
case EDID_NO_RESPONSE:
- dm_logger_write(link->ctx->logger, LOG_ERROR,
- "No EDID read.\n");
- return false;
-
+ DC_LOG_ERROR("No EDID read.\n");
default:
break;
}
@@ -708,8 +712,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
"%s: [Block %d] ", sink->edid_caps.display_name, i);
}
- dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
- "%s: "
+ DC_LOG_DETECTION_EDID_PARSER("%s: "
"manufacturer_id = %X, "
"product_id = %X, "
"serial_number = %X, "
@@ -729,8 +732,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink->edid_caps.audio_mode_count);
for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
- dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
- "%s: mode number = %d, "
+ DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
"format_code = %d, "
"channel_count = %d, "
"sample_rate = %d, "
@@ -938,8 +940,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
- __func__, init_params->connector_index);
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
@@ -979,8 +982,7 @@ static bool construct(
}
break;
default:
- dm_logger_write(dc_ctx->logger, LOG_WARNING,
- "Unsupported Connector type:%d!\n", link->link_id.id);
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
goto create_fail;
}
@@ -1133,7 +1135,7 @@ static void dpcd_configure_panel_mode(
{
union dpcd_edp_config edp_config_set;
bool panel_mode_edp = false;
-
+ struct dc_context *dc_ctx = link->ctx;
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
if (DP_PANEL_MODE_DEFAULT != panel_mode) {
@@ -1170,8 +1172,7 @@ static void dpcd_configure_panel_mode(
ASSERT(result == DDC_RESULT_SUCESSFULL);
}
}
- dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
- "Link: %d eDP panel mode supported: %d "
+ DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
"eDP panel mode enabled: %d \n",
link->link_index,
link->dpcd_caps.panel_mode_edp,
@@ -1247,6 +1248,12 @@ static enum dc_status enable_link_dp(
pipe_ctx->clock_source->id,
&link_settings);
+ if (stream->sink->edid_caps.panel_patch.dppowerup_delay > 0) {
+ int delay_dp_power_up_in_ms = stream->sink->edid_caps.panel_patch.dppowerup_delay;
+
+ msleep(delay_dp_power_up_in_ms);
+ }
+
panel_mode = dp_get_panel_mode(link);
dpcd_configure_panel_mode(link, panel_mode);
@@ -1271,6 +1278,23 @@ static enum dc_status enable_link_dp(
return status;
}
+static enum dc_status enable_link_edp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ /*in case it is not on*/
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
+ status = enable_link_dp(state, pipe_ctx);
+
+
+ return status;
+}
+
static enum dc_status enable_link_dp_mst(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
@@ -1283,6 +1307,9 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
/* set the sink to MST mode before enabling the link */
dp_enable_mst_on_sink(link, true);
@@ -1730,8 +1757,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
link->link_enc,
pipe_ctx->clock_source->id,
display_color_depth,
- pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
- pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+ pipe_ctx->stream->signal,
stream->phy_pix_clk);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
@@ -1746,9 +1772,11 @@ static enum dc_status enable_link(
enum dc_status status = DC_ERROR_UNEXPECTED;
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_EDP:
status = enable_link_dp(state, pipe_ctx);
break;
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_edp(state, pipe_ctx);
+ break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
status = enable_link_dp_mst(state, pipe_ctx);
msleep(200);
@@ -1767,9 +1795,21 @@ static enum dc_status enable_link(
}
if (pipe_ctx->stream_res.audio && status == DC_OK) {
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
/* notify audio driver for audio modes of monitor */
+ struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ unsigned int i, num_audio = 1;
+ for (i = 0; i < MAX_PIPES; i++) {
+ /*current_state not updated yet*/
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+ num_audio++;
+ }
+
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+ if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
@@ -1798,7 +1838,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
else
dp_disable_link_phy_mst(link, signal);
} else
- link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
}
static bool dp_active_dongle_validate_timing(
@@ -1871,7 +1911,7 @@ enum dc_status dc_link_validate_mode_timing(
const struct dc_crtc_timing *timing)
{
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
- struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
+ struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps;
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
@@ -1909,14 +1949,27 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ struct dc_context *dc_ctx = link->ctx;
unsigned int controller_id = 0;
+ bool use_smooth_brightness = true;
int i;
- if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ if ((dmcu == NULL) ||
+ (abm == NULL) ||
+ (abm->funcs->set_backlight_level == NULL))
return false;
- dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
- "New Backlight level: %d (0x%X)\n", level, level);
+ if (stream) {
+ if (stream->bl_pwm_level == 0)
+ frame_ramp = 0;
+
+ ((struct dc_stream_state *)stream)->bl_pwm_level = level;
+ }
+
+ use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
if (dc_is_embedded_signal(link->connector_signal)) {
if (stream != NULL) {
@@ -1937,7 +1990,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
abm,
level,
frame_ramp,
- controller_id);
+ controller_id,
+ use_smooth_brightness);
}
return true;
@@ -1954,144 +2008,6 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
return true;
}
-bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- if (dmcu != NULL && link->psr_enabled)
- dmcu->funcs->get_psr_state(dmcu, psr_state);
-
- return true;
-}
-
-bool dc_link_setup_psr(struct dc_link *link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int i;
-
- psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
-
- if (link != NULL &&
- dmcu != NULL) {
- /* updateSinkPsrDpcdConfig*/
- union dpcd_psr_configuration psr_configuration;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
- psr_configuration.bits.ENABLE = 1;
- psr_configuration.bits.CRC_VERIFICATION = 1;
- psr_configuration.bits.FRAME_CAPTURE_INDICATION =
- psr_config->psr_frame_capture_indication_req;
-
- /* Check for PSR v2*/
- if (psr_config->psr_version == 0x2) {
- /* For PSR v2 selective update.
- * Indicates whether sink should start capturing
- * immediately following active scan line,
- * or starting with the 2nd active scan line.
- */
- psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
- /*For PSR v2, determines whether Sink should generate
- * IRQ_HPD when CRC mismatch is detected.
- */
- psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
- }
-
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 368,
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
- psr_context->transmitterId = link->link_enc->transmitter;
- psr_context->engineId = link->link_enc->preferred_engine;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
- /* dmcu -1 for all controller id values,
- * therefore +1 here
- */
- psr_context->controllerId =
- core_dc->current_state->res_ctx.
- pipe_ctx[i].stream_res.tg->inst + 1;
- break;
- }
- }
-
- /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
- psr_context->phyType = PHY_TYPE_UNIPHY;
- /*PhyId is associated with the transmitter id*/
- psr_context->smuPhyId = link->link_enc->transmitter;
-
- psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
- timing.pix_clk_khz * 1000),
- stream->timing.v_total),
- stream->timing.h_total);
-
- psr_context->psrSupportedDisplayConfig = true;
- psr_context->psrExitLinkTrainingRequired =
- psr_config->psr_exit_link_training_required;
- psr_context->sdpTransmitLineNumDeadline =
- psr_config->psr_sdp_transmit_line_num_deadline;
- psr_context->psrFrameCaptureIndicationReq =
- psr_config->psr_frame_capture_indication_req;
-
- psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
-
- psr_context->numberOfControllers =
- link->dc->res_pool->res_cap->num_timing_generator;
-
- psr_context->rfb_update_auto_en = true;
-
- /* 2 frames before enter PSR. */
- psr_context->timehyst_frames = 2;
- /* half a frame
- * (units in 100 lines, i.e. a value of 1 represents 100 lines)
- */
- psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
- psr_context->aux_repeats = 10;
-
- psr_context->psr_level.u32all = 0;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- /*skip power down the single pipe since it blocks the cstate*/
- if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
-
- /* SMU will perform additional powerdown sequence.
- * For unsupported ASICs, set psr_level flag to skip PSR
- * static screen notification to SMU.
- * (Always set for DAL2, did not check ASIC)
- */
- psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
-
- /* Complete PSR entry before aborting to prevent intermittent
- * freezes on certain eDPs
- */
- psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
-
- /* Controls additional delay after remote frame capture before
- * continuing power down, default = 0
- */
- psr_context->frame_delay = 0;
-
- link->psr_enabled = true;
- dmcu->funcs->setup_psr(dmcu, link, psr_context);
- return true;
- } else
- return false;
-
-}
-
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2220,6 +2136,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
+ struct dc_context *dc_ctx = link->ctx;
uint8_t i;
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2237,21 +2154,18 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
}
else
- dm_logger_write(link->ctx->logger, LOG_WARNING,
- "Failed to update"
+ DC_LOG_WARNING("Failed to update"
"MST allocation table for"
"pipe idx:%d\n",
pipe_ctx->pipe_idx);
- dm_logger_write(link->ctx->logger, LOG_MST,
- "%s "
+ DC_LOG_MST("%s "
"stream_count: %d: \n ",
__func__,
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- dm_logger_write(link->ctx->logger, LOG_MST,
- "stream_enc[%d]: 0x%x "
+ DC_LOG_MST("stream_enc[%d]: 0x%x "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
@@ -2302,6 +2216,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
uint8_t i;
bool mst_mode = (link->type == dc_connection_mst_branch);
+ struct dc_context *dc_ctx = link->ctx;
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
@@ -2327,23 +2242,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
}
else {
- dm_logger_write(link->ctx->logger, LOG_WARNING,
- "Failed to update"
+ DC_LOG_WARNING("Failed to update"
"MST allocation table for"
"pipe idx:%d\n",
pipe_ctx->pipe_idx);
}
}
- dm_logger_write(link->ctx->logger, LOG_MST,
- "%s"
+ DC_LOG_MST("%s"
"stream_count: %d: ",
__func__,
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- dm_logger_write(link->ctx->logger, LOG_MST,
- "stream_enc[%d]: 0x%x "
+ DC_LOG_MST("stream_enc[%d]: 0x%x "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
@@ -2377,12 +2289,24 @@ void core_link_enable_stream(
struct pipe_ctx *pipe_ctx)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
+ enum dc_status status;
- enum dc_status status = enable_link(state, pipe_ctx);
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ core_dc->apply_edp_fast_boot_optimization) {
+ core_dc->apply_edp_fast_boot_optimization = false;
+ pipe_ctx->stream->dpms_off = false;
+ return;
+ }
+
+ if (pipe_ctx->stream->dpms_off)
+ return;
+
+ status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
- dm_logger_write(pipe_ctx->stream->ctx->logger,
- LOG_WARNING, "enabling link %u failed: %d\n",
+ DC_LOG_WARNING("enabling link %u failed: %d\n",
pipe_ctx->stream->sink->link->link_index,
status);
@@ -2408,9 +2332,8 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- core_dc->hwss.unblank_stream(pipe_ctx,
- &pipe_ctx->stream->sink->link->cur_link_settings);
+ core_dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->sink->link->cur_link_settings);
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
@@ -2420,6 +2343,8 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ core_dc->hwss.blank_stream(pipe_ctx);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
@@ -2435,3 +2360,36 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
core_dc->hwss.set_avmute(pipe_ctx, enable);
}
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ struct gpio *hpd;
+
+ if (enable) {
+ link->is_hpd_filter_disabled = false;
+ program_hpd_filter(link);
+ } else {
+ link->is_hpd_filter_disabled = true;
+ /* Obtain HPD handle */
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = 0;
+ config.delay_on_disconnect = 0;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d5294798b..49c2fac 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,7 +629,7 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
@@ -660,8 +660,9 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
if (dal_i2caux_submit_aux_command(
ddc->ctx->i2caux,
ddc->ddc_pin,
- &command))
- return DDC_RESULT_SUCESSFULL;
+ &command)) {
+ return (ssize_t)command.payloads->length;
+ }
return DDC_RESULT_FAILED_OPERATION;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index e6bf05d..3b50535 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -11,6 +11,8 @@
#include "dpcd_defs.h"
#include "resource.h"
+#define DC_LOGGER \
+ link->ctx->logger
/* maximum pre emphasis level allowed for each voltage swing level*/
static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
@@ -63,8 +65,7 @@ static void wait_for_training_aux_rd_interval(
udelay(default_wait_in_micro_secs);
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s:\n wait = %d\n",
+ DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
default_wait_in_micro_secs);
}
@@ -79,8 +80,7 @@ static void dpcd_set_training_pattern(
&dpcd_pattern.raw,
1);
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s\n %x pattern = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
__func__,
DP_TRAINING_PATTERN_SET,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
@@ -116,8 +116,7 @@ static void dpcd_set_link_settings(
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
__func__,
DP_LINK_BW_SET,
lt_settings->link_settings.link_rate,
@@ -151,8 +150,7 @@ static enum dpcd_training_patterns
break;
default:
ASSERT(0);
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s: Invalid HW Training pattern: %d\n",
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
__func__, pattern);
break;
}
@@ -184,8 +182,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
= dpcd_pattern.raw;
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s\n %x pattern = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
__func__,
DP_TRAINING_PATTERN_SET,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
@@ -219,9 +216,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lane,
size_in_bytes);
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s:\n %x VS set = %x PE set = %x \
- max VS Reached = %x max PE Reached = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
DP_TRAINING_LANE0_SET,
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
@@ -457,14 +452,12 @@ static void get_lane_status_and_drive_settings(
ln_status_updated->raw = dpcd_buf[2];
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+ DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
__func__,
DP_LANE0_1_STATUS, dpcd_buf[0],
DP_LANE2_3_STATUS, dpcd_buf[1]);
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
__func__,
DP_ADJUST_REQUEST_LANE0_1,
dpcd_buf[4],
@@ -557,9 +550,7 @@ static void dpcd_set_lane_settings(
}
*/
- dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
- "%s\n %x VS set = %x PE set = %x \
- max VS Reached = %x max PE Reached = %x\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
DP_TRAINING_LANE0_SET,
dpcd_lane[0].bits.VOLTAGE_SWING_SET,
@@ -671,16 +662,14 @@ static bool perform_post_lt_adj_req_sequence(
}
if (!req_drv_setting_changed) {
- dm_logger_write(link->ctx->logger, LOG_WARNING,
- "%s: Post Link Training Adjust Request Timed out\n",
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
__func__);
ASSERT(0);
return true;
}
}
- dm_logger_write(link->ctx->logger, LOG_WARNING,
- "%s: Post Link Training Adjust Request limit reached\n",
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
__func__);
ASSERT(0);
@@ -711,6 +700,22 @@ static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
return HW_DP_TRAINING_PATTERN_2;
}
+static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE0;
+ else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE1;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ return result;
+}
+
static enum link_training_result perform_channel_equalization_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -720,7 +725,7 @@ static enum link_training_result perform_channel_equalization_sequence(
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
hw_tr_pattern = get_supported_tp(link);
@@ -773,7 +778,7 @@ static enum link_training_result perform_channel_equalization_sequence(
}
-static bool perform_clock_recovery_sequence(
+static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings)
{
@@ -848,11 +853,11 @@ static bool perform_clock_recovery_sequence(
/* 5. check CR done*/
if (is_cr_done(lane_count, dpcd_lane_status))
- return true;
+ return LINK_TRAINING_SUCCESS;
/* 6. max VS reached*/
if (is_max_vs_reached(lt_settings))
- return false;
+ break;
/* 7. same voltage*/
/* Note: VS same for all lanes,
@@ -871,21 +876,19 @@ static bool perform_clock_recovery_sequence(
if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
ASSERT(0);
- dm_logger_write(link->ctx->logger, LOG_ERROR,
- "%s: Link Training Error, could not \
- get CR after %d tries. \
- Possibly voltage swing issue", __func__,
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
LINK_TRAINING_MAX_CR_RETRY);
}
- return false;
+ return get_cr_failure(lane_count, dpcd_lane_status);
}
-static inline bool perform_link_training_int(
+static inline enum link_training_result perform_link_training_int(
struct dc_link *link,
struct link_training_settings *lt_settings,
- bool status)
+ enum link_training_result status)
{
union lane_count_set lane_count_set = { {0} };
union dpcd_training_pattern dpcd_pattern = { {0} };
@@ -906,9 +909,9 @@ static inline bool perform_link_training_int(
get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
return status;
- if (status &&
+ if (status == LINK_TRAINING_SUCCESS &&
perform_post_lt_adj_req_sequence(link, lt_settings) == false)
- status = false;
+ status = LINK_TRAINING_LQA_FAIL;
lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
lane_count_set.bits.ENHANCED_FRAMING = 1;
@@ -931,6 +934,8 @@ enum link_training_result dc_link_dp_perform_link_training(
enum link_training_result status = LINK_TRAINING_SUCCESS;
char *link_rate = "Unknown";
+ char *lt_result = "Unknown";
+
struct link_training_settings lt_settings;
memset(&lt_settings, '\0', sizeof(lt_settings));
@@ -954,22 +959,16 @@ enum link_training_result dc_link_dp_perform_link_training(
/* 2. perform link training (set link training done
* to false is done as well)*/
- if (!perform_clock_recovery_sequence(link, &lt_settings)) {
- status = LINK_TRAINING_CR_FAIL;
- } else {
+ status = perform_clock_recovery_sequence(link, &lt_settings);
+ if (status == LINK_TRAINING_SUCCESS) {
status = perform_channel_equalization_sequence(link,
&lt_settings);
}
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
- if (!perform_link_training_int(link,
+ status = perform_link_training_int(link,
&lt_settings,
- status == LINK_TRAINING_SUCCESS)) {
- /* the next link training setting in this case
- * would be the same as CR failure case.
- */
- status = LINK_TRAINING_CR_FAIL;
- }
+ status);
}
/* 6. print status message*/
@@ -994,13 +993,37 @@ enum link_training_result dc_link_dp_perform_link_training(
break;
}
+ switch (status) {
+ case LINK_TRAINING_SUCCESS:
+ lt_result = "pass";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ lt_result = "CR failed lane0";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ lt_result = "CR failed lane1";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ lt_result = "CR failed lane23";
+ break;
+ case LINK_TRAINING_EQ_FAIL_CR:
+ lt_result = "CR failed in EQ";
+ break;
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ lt_result = "EQ failed";
+ break;
+ case LINK_TRAINING_LQA_FAIL:
+ lt_result = "LQA failed";
+ break;
+ default:
+ break;
+ }
+
/* Connectivity log: link training */
CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
link_rate,
lt_settings.link_settings.lane_count,
- (status == LINK_TRAINING_SUCCESS) ? "pass" :
- ((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
- "EQ failed"),
+ lt_result,
lt_settings.lane_settings[0].VOLTAGE_SWING,
lt_settings.lane_settings[0].PRE_EMPHASIS);
@@ -1118,6 +1141,7 @@ bool dp_hbr_verify_link_cap(
dp_cs_id,
cur);
+
if (skip_link_training)
success = true;
else {
@@ -1282,7 +1306,10 @@ static bool decide_fallback_link_setting(
return false;
switch (training_result) {
- case LINK_TRAINING_CR_FAIL:
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ case LINK_TRAINING_LQA_FAIL:
{
if (!reached_minimum_link_rate
(current_link_setting->link_rate)) {
@@ -1293,8 +1320,18 @@ static bool decide_fallback_link_setting(
(current_link_setting->lane_count)) {
current_link_setting->link_rate =
initial_link_settings.link_rate;
- current_link_setting->lane_count =
- reduce_lane_count(
+ if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
+ return false;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
+ current_link_setting->lane_count =
+ LANE_COUNT_ONE;
+ else if (training_result ==
+ LINK_TRAINING_CR_FAIL_LANE23)
+ current_link_setting->lane_count =
+ LANE_COUNT_TWO;
+ else
+ current_link_setting->lane_count =
+ reduce_lane_count(
current_link_setting->lane_count);
} else {
return false;
@@ -1468,7 +1505,13 @@ void decide_link_settings(struct dc_stream_state *stream,
/* MST doesn't perform link training for now
* TODO: add MST specific link training routine
*/
- if (is_mst_supported(link)) {
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ /* EDP use the link cap setting */
+ if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
*link_setting = link->verified_link_cap;
return;
}
@@ -1553,8 +1596,7 @@ static bool hpd_rx_irq_check_link_loss_status(
if (sink_status_changed ||
!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: Link Status changed.\n", __func__);
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
return_code = true;
@@ -1567,8 +1609,7 @@ static bool hpd_rx_irq_check_link_loss_status(
sizeof(irq_reg_rx_power_state));
if (dpcd_result != DC_OK) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: DPCD read failed to obtain power state.\n",
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
__func__);
} else {
if (irq_reg_rx_power_state != DP_SET_POWER_D0)
@@ -1929,8 +1970,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: Got short pulse HPD on link %d\n",
+ DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
__func__, link->link_index);
@@ -1944,8 +1984,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
if (result != DC_OK) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: DPCD read failed to obtain irq data\n",
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
__func__);
return false;
}
@@ -1963,8 +2002,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
}
if (!allow_hpd_rx_irq(link)) {
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: skipping HPD handling on %d\n",
+ DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
}
@@ -2127,7 +2165,7 @@ static void get_active_converter_info(
union dwnstream_port_caps_byte3_hdmi
hdmi_caps = {.raw = det_caps[3] };
- union dwnstream_port_caps_byte1
+ union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
det_caps[1] * 25000;
@@ -2232,13 +2270,14 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
-static void retrieve_link_cap(struct dc_link *link)
+static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
+ enum dc_status status = DC_ERROR_UNEXPECTED;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -2246,11 +2285,16 @@ static void retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
- core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+
+ if (status != DC_OK) {
+ dm_error("%s: Read dpcd data failed.\n", __func__);
+ return false;
+ }
{
union training_aux_rd_interval aux_rd_interval;
@@ -2312,11 +2356,13 @@ static void retrieve_link_cap(struct dc_link *link)
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+ return true;
}
-void detect_dp_sink_caps(struct dc_link *link)
+bool detect_dp_sink_caps(struct dc_link *link)
{
- retrieve_link_cap(link);
+ return retrieve_link_cap(link);
/* dc init_hw has power encoder using default
* signal for connector. For native DP, no
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 9a33b47..7c866a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -88,15 +88,7 @@ void dp_enable_link_phy(
}
if (dc_is_dp_sst_signal(signal)) {
- if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_power_control(link->link_enc, true);
- link_enc->funcs->enable_dp_output(
- link_enc,
- link_settings,
- clock_source);
- link->dc->hwss.edp_backlight_control(link, true);
- } else
- link_enc->funcs->enable_dp_output(
+ link_enc->funcs->enable_dp_output(
link_enc,
link_settings,
clock_source);
@@ -110,7 +102,7 @@ void dp_enable_link_phy(
dp_receiver_power_ctrl(link, true);
}
-static bool edp_receiver_ready_T9(struct dc_link *link)
+bool edp_receiver_ready_T9(struct dc_link *link)
{
unsigned int tries = 0;
unsigned char sinkstatus = 0;
@@ -131,6 +123,28 @@ static bool edp_receiver_ready_T9(struct dc_link *link)
} while (++tries < 50);
return result;
}
+bool edp_receiver_ready_T7(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+ if (result == DC_OK && edpRev < DP_EDP_12)
+ return true;
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25); //MAx T7 is 50ms
+ } while (++tries < 300);
+ return result;
+}
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
@@ -138,12 +152,10 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, false);
- edp_receiver_ready_T9(link);
- link->link_enc->funcs->disable_output(link->link_enc, signal, link);
- link->dc->hwss.edp_power_control(link->link_enc, false);
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ link->dc->hwss.edp_power_control(link, false);
} else
- link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
@@ -267,6 +279,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
for (i = 0; i < MAX_PIPES; i++) {
if (pipes[i].stream != NULL &&
+ !pipes[i].top_pipe &&
pipes[i].stream->sink != NULL &&
pipes[i].stream->sink->link != NULL &&
pipes[i].stream_res.stream_enc != NULL &&
@@ -286,8 +299,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
link->link_enc->funcs->disable_output(
link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT,
- link);
+ SIGNAL_TYPE_DISPLAY_PORT);
/* Clear current link setting. */
memset(&link->cur_link_settings, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9288958..ba3487e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -35,6 +35,7 @@
#include "core_types.h"
#include "set_mode_types.h"
#include "virtual/virtual_stream_encoder.h"
+#include "dpcd_defs.h"
#include "dce80/dce80_resource.h"
#include "dce100/dce100_resource.h"
@@ -44,7 +45,8 @@
#include "dcn10/dcn10_resource.h"
#endif
#include "dce120/dce120_resource.h"
-
+#define DC_LOGGER \
+ ctx->logger
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -426,15 +428,8 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
static void rect_swap_helper(struct rect *rect)
{
- uint32_t temp = 0;
-
- temp = rect->height;
- rect->height = rect->width;
- rect->width = temp;
-
- temp = rect->x;
- rect->x = rect->y;
- rect->y = temp;
+ swap(rect->height, rect->width);
+ swap(rect->x, rect->y);
}
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -505,26 +500,15 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
/* Handle hsplit */
- if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else {
- data->viewport.width /= 2;
- data->viewport_c.width /= 2;
- }
+ if (sec_split) {
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else if (pri_split) {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
}
if (plane_state->rotation == ROTATION_ANGLE_90 ||
@@ -541,6 +525,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
int recout_full_x, recout_full_y;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
@@ -575,33 +564,43 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
- pipe_ctx->plane_res.scl_data.recout.y;
/* Handle h & vsplit */
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
- pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
- /* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ if (sec_split && top_bottom_split) {
+ pipe_ctx->plane_res.scl_data.recout.y +=
+ pipe_ctx->plane_res.scl_data.recout.height / 2;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height =
+ (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ } else if (pri_split && top_bottom_split)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
} else {
- pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
- pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- }
- } else if (pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else
pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
}
-
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
* * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
* ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
@@ -657,7 +656,20 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
struct rect src = pipe_ctx->plane_state->src_rect;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
+ flip_vert_scan_dir = true;
+ flip_horz_scan_dir = true;
+ } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
+ flip_vert_scan_dir = true;
+ else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ flip_horz_scan_dir = true;
+ if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
@@ -686,7 +698,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
@@ -694,7 +706,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = int_part > 0 ? int_part : 0;
data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
}
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
int int_part = dal_fixed31_32_floor(
dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
@@ -702,7 +714,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = int_part > 0 ? int_part : 0;
data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
}
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
int vp_clip = (src.x + src.width) / vpc_div -
data->viewport_c.width - data->viewport_c.x;
int int_part = dal_fixed31_32_floor(
@@ -711,7 +723,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
int_part = int_part > 0 ? int_part : 0;
data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
}
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
int vp_clip = (src.y + src.height) / vpc_div -
data->viewport_c.height - data->viewport_c.y;
int int_part = dal_fixed31_32_floor(
@@ -722,7 +734,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (data->viewport.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
@@ -743,7 +755,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
}
- if (data->viewport_c.x) {
+ if (data->viewport_c.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
@@ -764,7 +776,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
}
- if (data->viewport.y) {
+ if (data->viewport.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
@@ -785,7 +797,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
}
- if (data->viewport_c.y) {
+ if (data->viewport_c.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
@@ -823,7 +835,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct view recout_skip = { 0 };
bool res = false;
-
+ struct dc_context *ctx = pipe_ctx->stream->ctx;
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
* Inits require viewport, taps, ratios and recout of split pipe
@@ -882,7 +894,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
/* May need to re-check lb size after this in some obscure scenario */
calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
- dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+ DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
"y:%d\n dst_rect:\nheight:%d width:%d x:%d "
"y:%d\n",
@@ -1044,6 +1056,7 @@ static int acquire_first_split_pipe(
pipe_ctx->plane_res.ipp = pool->ipps[i];
pipe_ctx->plane_res.dpp = pool->dpps[i];
pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
pipe_ctx->pipe_idx = i;
pipe_ctx->stream = stream;
@@ -1111,6 +1124,7 @@ bool dc_add_plane_to_context(
ASSERT(tail_pipe);
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
@@ -1350,9 +1364,6 @@ bool dc_is_stream_scaling_unchanged(
return true;
}
-/* Maximum TMDS single link pixel clock 165MHz */
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
-
static void update_stream_engine_usage(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1399,6 +1410,8 @@ static int acquire_first_free_pipe(
pipe_ctx->plane_res.xfm = pool->transforms[i];
pipe_ctx->plane_res.dpp = pool->dpps[i];
pipe_ctx->stream_res.opp = pool->opps[i];
+ if (pool->dpps[i])
+ pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
pipe_ctx->pipe_idx = i;
@@ -1545,6 +1558,9 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool,
del_pipe->clock_source);
+ if (dc->res_pool->funcs->remove_stream_from_ctx)
+ dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
+
memset(del_pipe, 0, sizeof(*del_pipe));
break;
@@ -1721,6 +1737,10 @@ enum dc_status resource_map_pool_resources(
pipe_ctx->stream_res.audio, true);
}
+ /* Add ABM to the resource if on EDP */
+ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.abm = pool->abm;
+
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
@@ -2319,20 +2339,13 @@ static void set_spd_info_packet(
static void set_hdr_static_info_packet(
struct encoder_info_packet *info_packet,
- struct dc_plane_state *plane_state,
struct dc_stream_state *stream)
{
uint16_t i = 0;
enum signal_type signal = stream->signal;
- struct dc_hdr_static_metadata hdr_metadata;
uint32_t data;
- if (!plane_state)
- return;
-
- hdr_metadata = plane_state->hdr_static_ctx;
-
- if (!hdr_metadata.hdr_supported)
+ if (!stream->hdr_static_metadata.hdr_supported)
return;
if (dc_is_hdmi_signal(signal)) {
@@ -2352,55 +2365,55 @@ static void set_hdr_static_info_packet(
i = 2;
}
- data = hdr_metadata.is_hdr;
+ data = stream->hdr_static_metadata.is_hdr;
info_packet->sb[i++] = data ? 0x02 : 0x00;
info_packet->sb[i++] = 0x00;
- data = hdr_metadata.chromaticity_green_x / 2;
+ data = stream->hdr_static_metadata.chromaticity_green_x / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_green_y / 2;
+ data = stream->hdr_static_metadata.chromaticity_green_y / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_blue_x / 2;
+ data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_blue_y / 2;
+ data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_red_x / 2;
+ data = stream->hdr_static_metadata.chromaticity_red_x / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_red_y / 2;
+ data = stream->hdr_static_metadata.chromaticity_red_y / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_white_point_x / 2;
+ data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.chromaticity_white_point_y / 2;
+ data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.max_luminance;
+ data = stream->hdr_static_metadata.max_luminance;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.min_luminance;
+ data = stream->hdr_static_metadata.min_luminance;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.maximum_content_light_level;
+ data = stream->hdr_static_metadata.maximum_content_light_level;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
- data = hdr_metadata.maximum_frame_average_light_level;
+ data = stream->hdr_static_metadata.maximum_frame_average_light_level;
info_packet->sb[i++] = data & 0xFF;
info_packet->sb[i++] = (data & 0xFF00) >> 8;
@@ -2428,7 +2441,8 @@ static void set_vsc_info_packet(
unsigned int vscPacketRevision = 0;
unsigned int i;
- if (stream->sink->link->psr_enabled) {
+ /*VSC packet set to 2 when DP revision >= 1.2*/
+ if (stream->psr_version != 0) {
vscPacketRevision = 2;
}
@@ -2551,16 +2565,14 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
- set_hdr_static_info_packet(&info->hdrsmd,
- pipe_ctx->plane_state, pipe_ctx->stream);
+ set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
} else if (dc_is_dp_signal(signal)) {
set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
set_spd_info_packet(&info->spd, pipe_ctx->stream);
- set_hdr_static_info_packet(&info->hdrsmd,
- pipe_ctx->plane_state, pipe_ctx->stream);
+ set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index e230cc4..ce0747e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,23 +33,20 @@
/*******************************************************************************
* Private functions
******************************************************************************/
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
-static void update_stream_signal(struct dc_stream_state *stream)
+void update_stream_signal(struct dc_stream_state *stream)
{
- if (stream->output_signal == SIGNAL_TYPE_NONE) {
- struct dc_sink *dc_sink = stream->sink;
- if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
- stream->signal = stream->sink->link->connector_signal;
- else
- stream->signal = dc_sink->sink_signal;
- } else {
- stream->signal = stream->output_signal;
- }
+ struct dc_sink *dc_sink = stream->sink;
+
+ if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
+ stream->signal = stream->sink->link->connector_signal;
+ else
+ stream->signal = dc_sink->sink_signal;
if (dc_is_dvi_signal(stream->signal)) {
- if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
- stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ if (stream->ctx->dc->caps.dual_link_dvi &&
+ stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
+ stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
else
stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -196,44 +193,19 @@ bool dc_stream_set_cursor_attributes(
core_dc = stream->ctx->dc;
res_ctx = &core_dc->current_state->res_ctx;
+ stream->cursor_attributes = *attributes;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ if (pipe_ctx->stream != stream)
continue;
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
continue;
- if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
- pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
- pipe_ctx->plane_res.ipp, attributes);
-
- if (pipe_ctx->plane_res.hubp != NULL &&
- pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.hubp, attributes);
-
- if (pipe_ctx->plane_res.mi != NULL &&
- pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.mi, attributes);
-
-
- if (pipe_ctx->plane_res.xfm != NULL &&
- pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.xfm, attributes);
-
- if (pipe_ctx->plane_res.dpp != NULL &&
- pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.dpp, attributes);
+ core_dc->hwss.set_cursor_attribute(pipe_ctx);
}
-
- stream->cursor_attributes = *attributes;
-
return true;
}
@@ -257,51 +229,19 @@ bool dc_stream_set_cursor_position(
core_dc = stream->ctx->dc;
res_ctx = &core_dc->current_state->res_ctx;
+ stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
- struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dc_cursor_position pos_cpy = *position;
- struct dc_cursor_mi_param param = {
- .pixel_clk_khz = stream->timing.pix_clk_khz,
- .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
- };
if (pipe_ctx->stream != stream ||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_state ||
- (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+ !pipe_ctx->plane_res.ipp)
continue;
- if (pipe_ctx->plane_state->address.type
- == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- pos_cpy.enable = false;
-
- if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
- pos_cpy.enable = false;
-
-
- if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
- ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
-
- if (mi != NULL && mi->funcs->set_cursor_position != NULL)
- mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
-
- if (!hubp)
- continue;
-
- if (hubp->funcs->set_cursor_position != NULL)
- hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
-
- if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
- dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
-
+ core_dc->hwss.set_cursor_position(pipe_ctx);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 9d8f4a5..fa4b3c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.07"
+#define DC_VER "3.1.38"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -48,6 +48,18 @@
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
+struct dmcu_version {
+ unsigned int date;
+ unsigned int month;
+ unsigned int year;
+ unsigned int interface_version;
+};
+
+struct dc_versions {
+ const char *dc_ver;
+ struct dmcu_version dmcu_version;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -58,8 +70,11 @@ struct dc_caps {
uint32_t i2c_speed_in_khz;
unsigned int max_cursor_size;
unsigned int max_video_width;
+ int linear_pitch_alignment;
bool dcc_const_color;
bool dynamic_audio;
+ bool is_apu;
+ bool dual_link_dvi;
};
struct dc_dcc_surface_param {
@@ -92,74 +107,59 @@ struct dc_surface_dcc_cap {
};
struct dc_static_screen_events {
+ bool force_trigger;
bool cursor_update;
bool surface_update;
bool overlay_update;
};
+
+/* Surface update type is used by dc_update_surfaces_and_stream
+ * The update type is determined at the very beginning of the function based
+ * on parameters passed in and decides how much programming (or updating) is
+ * going to be done during the call.
+ *
+ * UPDATE_TYPE_FAST is used for really fast updates that do not require much
+ * logical calculations or hardware register programming. This update MUST be
+ * ISR safe on windows. Currently fast update will only be used to flip surface
+ * address.
+ *
+ * UPDATE_TYPE_MED is used for slower updates which require significant hw
+ * re-programming however do not affect bandwidth consumption or clock
+ * requirements. At present, this is the level at which front end updates
+ * that do not require us to run bw_calcs happen. These are in/out transfer func
+ * updates, viewport offset changes, recout size changes and pixel depth changes.
+ * This update can be done at ISR, but we want to minimize how often this happens.
+ *
+ * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
+ * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
+ * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
+ * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
+ * a full update. This cannot be done at ISR level and should be a rare event.
+ * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
+ * underscan we don't expect to see this call at all.
+ */
+
+enum surface_update_type {
+ UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
+ UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
+ UPDATE_TYPE_FULL, /* may need to shuffle resources */
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
struct dc_state;
+
struct dc_cap_funcs {
bool (*get_dcc_compression_cap)(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
};
-struct dc_stream_state_funcs {
- bool (*adjust_vmin_vmax)(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- int vmin,
- int vmax);
- bool (*get_crtc_position)(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- unsigned int *v_pos,
- unsigned int *nom_v_pos);
-
- bool (*set_gamut_remap)(struct dc *dc,
- const struct dc_stream_state *stream);
-
- bool (*program_csc_matrix)(struct dc *dc,
- struct dc_stream_state *stream);
-
- void (*set_static_screen_events)(struct dc *dc,
- struct dc_stream_state **stream,
- int num_streams,
- const struct dc_static_screen_events *events);
-
- void (*set_dither_option)(struct dc_stream_state *stream,
- enum dc_dither_option option);
-
- void (*set_dpms)(struct dc *dc,
- struct dc_stream_state *stream,
- bool dpms_off);
-};
-
struct link_training_settings;
-struct dc_link_funcs {
- void (*set_drive_settings)(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link);
- void (*perform_link_training)(struct dc *dc,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern);
- void (*set_preferred_link_settings)(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link);
- void (*enable_hpd)(const struct dc_link *link);
- void (*disable_hpd)(const struct dc_link *link);
- void (*set_test_pattern)(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-};
/* Structure to hold configuration flags set by dm at dc creation. */
struct dc_config {
@@ -184,6 +184,16 @@ enum wm_report_mode {
WM_REPORT_OVERRIDE = 1,
};
+struct dc_clocks {
+ int dispclk_khz;
+ int max_supported_dppclk_khz;
+ int dppclk_khz;
+ int dcfclk_khz;
+ int socclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+};
+
struct dc_debug {
bool surface_visual_confirm;
bool sanity_checks;
@@ -225,15 +235,17 @@ struct dc_debug {
bool disable_stereo_support;
bool vsr_support;
bool performance_trace;
+ bool az_endpoint_mute_only;
+ bool always_use_regamma;
+ bool p010_mpo_support;
};
struct dc_state;
struct resource_pool;
struct dce_hwseq;
struct dc {
+ struct dc_versions versions;
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
- struct dc_stream_state_funcs stream_funcs;
- struct dc_link_funcs link_funcs;
struct dc_config config;
struct dc_debug debug;
@@ -266,6 +278,10 @@ struct dc {
*/
struct dm_pp_display_configuration prev_display_config;
+ bool optimized_required;
+
+ bool apply_edp_fast_boot_optimization;
+
/* FBC compressor */
#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
@@ -302,9 +318,6 @@ struct dc_init_data {
struct dc_config flags;
uint32_t log_mask;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
- uint64_t fbc_gpu_addr;
-#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
@@ -333,24 +346,6 @@ enum color_transfer_func {
transfer_func_gamma_26
};
-enum color_color_space {
- color_space_unsupported,
- color_space_srgb,
- color_space_bt601,
- color_space_bt709,
- color_space_xv_ycc_bt601,
- color_space_xv_ycc_bt709,
- color_space_xr_rgb,
- color_space_bt2020,
- color_space_adobe,
- color_space_dci_p3,
- color_space_sc_rgb_ms_ref,
- color_space_display_native,
- color_space_app_ctrl,
- color_space_dolby_vision,
- color_space_custom_coordinates
-};
-
struct dc_hdr_static_metadata {
/* display chromaticities and white point in units of 0.00001 */
unsigned int chromaticity_green_x;
@@ -374,7 +369,7 @@ struct dc_hdr_static_metadata {
enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
- TF_TYPE_BYPASS
+ TF_TYPE_BYPASS,
};
struct dc_transfer_func_distributed_points {
@@ -393,6 +388,7 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_BT709,
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
+ TRANSFER_FUNCTION_UNITY,
};
struct dc_transfer_func {
@@ -400,6 +396,8 @@ struct dc_transfer_func {
struct dc_transfer_func_distributed_points tf_pts;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
+ /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
+ uint32_t sdr_ref_white_level;
struct dc_context *ctx;
};
@@ -415,8 +413,40 @@ struct dc_plane_status {
bool is_right_eye;
};
+union surface_update_flags {
+
+ struct {
+ /* Medium updates */
+ uint32_t dcc_change:1;
+ uint32_t color_space_change:1;
+ uint32_t input_tf_change:1;
+ uint32_t horizontal_mirror_change:1;
+ uint32_t per_pixel_alpha_change:1;
+ uint32_t rotation_change:1;
+ uint32_t swizzle_change:1;
+ uint32_t scaling_change:1;
+ uint32_t position_change:1;
+ uint32_t in_transfer_func_change:1;
+ uint32_t input_csc_change:1;
+ uint32_t output_tf_change:1;
+ uint32_t pixel_format_change:1;
+
+ /* Full updates */
+ uint32_t new_plane:1;
+ uint32_t bpp_change:1;
+ uint32_t gamma_change:1;
+ uint32_t bandwidth_change:1;
+ uint32_t clock_change:1;
+ uint32_t stereo_format_change:1;
+ uint32_t full_update:1;
+ } bits;
+
+ uint32_t raw;
+};
+
struct dc_plane_state {
struct dc_plane_address address;
+ struct dc_plane_flip_time time;
struct scaling_taps scaling_quality;
struct rect src_rect;
struct rect dst_rect;
@@ -426,27 +456,31 @@ struct dc_plane_state {
union dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
- struct dc_hdr_static_metadata hdr_static_ctx;
struct dc_gamma *gamma_correction;
struct dc_transfer_func *in_transfer_func;
+ struct dc_bias_and_scale *bias_and_scale;
+ struct csc_transform input_csc_color_matrix;
+ struct fixed31_32 coeff_reduction_factor;
+ uint32_t sdr_white_level;
- // sourceContentAttribute cache
- bool is_source_input_valid;
- struct dc_hdr_static_metadata source_input_mastering_info;
- enum color_color_space source_input_color_space;
- enum color_transfer_func source_input_tf;
+ // TODO: No longer used, remove
+ struct dc_hdr_static_metadata hdr_static_ctx;
enum dc_color_space color_space;
+ enum color_transfer_func input_tf;
+
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
+ bool is_tiling_rotated;
bool per_pixel_alpha;
bool visible;
bool flip_immediate;
bool horizontal_mirror;
+ union surface_update_flags update_flags;
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
@@ -463,10 +497,13 @@ struct dc_plane_info {
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
- enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+ enum dc_color_space color_space;
+ enum color_transfer_func input_tf;
+ unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
+ bool input_csc_enabled;
};
struct dc_scaling_info {
@@ -483,13 +520,16 @@ struct dc_surface_update {
struct dc_flip_addrs *flip_addr;
struct dc_plane_info *plane_info;
struct dc_scaling_info *scaling_info;
+
/* following updates require alloc/sleep/spin that is not isr safe,
* null means no updates
*/
- /* gamma TO BE REMOVED */
struct dc_gamma *gamma;
+ enum color_transfer_func color_input_tf;
struct dc_transfer_func *in_transfer_func;
- struct dc_hdr_static_metadata *hdr_static_metadata;
+
+ struct csc_transform *input_csc_color_matrix;
+ struct fixed31_32 *coeff_reduction_factor;
};
/*
@@ -517,6 +557,7 @@ struct dc_transfer_func *dc_create_transfer_func(void);
*/
struct dc_flip_addrs {
struct dc_plane_address address;
+ unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
};
@@ -524,197 +565,7 @@ struct dc_flip_addrs {
bool dc_post_update_surfaces_to_stream(
struct dc *dc);
-/* Surface update type is used by dc_update_surfaces_and_stream
- * The update type is determined at the very beginning of the function based
- * on parameters passed in and decides how much programming (or updating) is
- * going to be done during the call.
- *
- * UPDATE_TYPE_FAST is used for really fast updates that do not require much
- * logical calculations or hardware register programming. This update MUST be
- * ISR safe on windows. Currently fast update will only be used to flip surface
- * address.
- *
- * UPDATE_TYPE_MED is used for slower updates which require significant hw
- * re-programming however do not affect bandwidth consumption or clock
- * requirements. At present, this is the level at which front end updates
- * that do not require us to run bw_calcs happen. These are in/out transfer func
- * updates, viewport offset changes, recout size changes and pixel depth changes.
- * This update can be done at ISR, but we want to minimize how often this happens.
- *
- * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
- * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
- * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
- * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
- * a full update. This cannot be done at ISR level and should be a rare event.
- * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
- * underscan we don't expect to see this call at all.
- */
-
-enum surface_update_type {
- UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
- UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
- UPDATE_TYPE_FULL, /* may need to shuffle resources */
-};
-
-/*******************************************************************************
- * Stream Interfaces
- ******************************************************************************/
-
-struct dc_stream_status {
- int primary_otg_inst;
- int stream_enc_inst;
- int plane_count;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
-
- /*
- * link this stream passes through
- */
- struct dc_link *link;
-};
-
-struct dc_stream_state {
- struct dc_sink *sink;
- struct dc_crtc_timing timing;
-
- struct rect src; /* composition area */
- struct rect dst; /* stream addressable area */
-
- struct audio_info audio_info;
-
- struct freesync_context freesync_ctx;
-
- struct dc_transfer_func *out_transfer_func;
- struct colorspace_transform gamut_remap_matrix;
- struct csc_transform csc_color_matrix;
-
- enum signal_type output_signal;
-
- enum dc_color_space output_color_space;
- enum dc_dither_option dither_option;
-
- enum view_3d_format view_format;
-
- bool ignore_msa_timing_param;
- /* TODO: custom INFO packets */
- /* TODO: ABM info (DMCU) */
- /* TODO: PSR info */
- /* TODO: CEA VIC */
-
- /* from core_stream struct */
- struct dc_context *ctx;
-
- /* used by DCP and FMT */
- struct bit_depth_reduction_params bit_depth_params;
- struct clamping_and_pixel_encoding_params clamping;
-
- int phy_pix_clk;
- enum signal_type signal;
- bool dpms_off;
-
- struct dc_stream_status status;
-
- struct dc_cursor_attributes cursor_attributes;
-
- /* from stream struct */
- struct kref refcount;
-};
-
-struct dc_stream_update {
- struct rect src;
- struct rect dst;
- struct dc_transfer_func *out_transfer_func;
-};
-
-bool dc_is_stream_unchanged(
- struct dc_stream_state *old_stream, struct dc_stream_state *stream);
-bool dc_is_stream_scaling_unchanged(
- struct dc_stream_state *old_stream, struct dc_stream_state *stream);
-
-/*
- * Set up surface attributes and associate to a stream
- * The surfaces parameter is an absolute set of all surface active for the stream.
- * If no surfaces are provided, the stream will be blanked; no memory read.
- * Any flip related attribute changes must be done through this interface.
- *
- * After this call:
- * Surfaces attributes are programmed and configured to be composed into stream.
- * This does not trigger a flip. No surface address is programmed.
- */
-
-bool dc_commit_planes_to_stream(
- struct dc *dc,
- struct dc_plane_state **plane_states,
- uint8_t new_plane_count,
- struct dc_stream_state *dc_stream,
- struct dc_state *state);
-
-void dc_commit_updates_for_stream(struct dc *dc,
- struct dc_surface_update *srf_updates,
- int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_plane_state **plane_states,
- struct dc_state *state);
-/*
- * Log the current stream state.
- */
-void dc_stream_log(
- const struct dc_stream_state *stream,
- struct dal_logger *dc_logger,
- enum dc_log_type log_type);
-
-uint8_t dc_get_current_stream_count(struct dc *dc);
-struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
-
-/*
- * Return the current frame counter.
- */
-uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
-
-/* TODO: Return parsed values rather than direct register read
- * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
- * being refactored properly to be dce-specific
- */
-bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
- uint32_t *v_blank_start,
- uint32_t *v_blank_end,
- uint32_t *h_position,
- uint32_t *v_position);
-
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context);
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context);
+#include "dc_stream.h"
/*
* Structure to store surface/stream associations for validation
@@ -725,22 +576,12 @@ struct dc_validation_set {
uint8_t plane_count;
};
-enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
-
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx);
-/*
- * This function takes a stream and checks if it is guaranteed to be supported.
- * Guaranteed means that MAX_COFUNC similar streams are supported.
- *
- * After this call:
- * No hardware is programmed for call. Only validation is done.
- */
-
void dc_resource_state_construct(
const struct dc *dc,
@@ -767,42 +608,6 @@ void dc_resource_state_destruct(struct dc_state *context);
*/
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-/*
- * Set up streams and links associated to drive sinks
- * The streams parameter is an absolute set of all active streams.
- *
- * After this call:
- * Phy, Encoder, Timing Generator are programmed and enabled.
- * New streams are enabled with blank stream; no memory read.
- */
-/*
- * Enable stereo when commit_streams is not required,
- * for example, frame alternate.
- */
-bool dc_enable_stereo(
- struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *streams[],
- uint8_t stream_count);
-
-/**
- * Create a new default stream for the requested sink
- */
-struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
-
-void dc_stream_retain(struct dc_stream_state *dc_stream);
-void dc_stream_release(struct dc_stream_state *dc_stream);
-
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
-
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
- struct dc_surface_update *updates,
- int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
-
struct dc_state *dc_create_state(void);
void dc_retain_state(struct dc_state *context);
@@ -835,171 +640,7 @@ struct dpcd_caps {
bool dpcd_display_control_capable;
};
-struct dc_link_status {
- struct dpcd_caps *dpcd_caps;
-};
-
-/* DP MST stream allocation (payload bandwidth number) */
-struct link_mst_stream_allocation {
- /* DIG front */
- const struct stream_encoder *stream_enc;
- /* associate DRM payload table with DC stream encoder */
- uint8_t vcp_id;
- /* number of slots required for the DP stream in transport packet */
- uint8_t slot_count;
-};
-
-/* DP MST stream allocation table */
-struct link_mst_stream_allocation_table {
- /* number of DP video streams */
- int stream_count;
- /* array of stream allocations */
- struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
-};
-
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting;
- struct dc_link_settings preferred_link_setting;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
-
- bool test_pattern_enabled;
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- enum edp_revision edp_revision;
- bool psr_enabled;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- } wa_flags;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
-
-};
-
-const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-
-/*
- * Return an enumerated dc_link. dc_link order is constant and determined at
- * boot time. They cannot be created or destroyed.
- * Use dc_get_caps() to get number of links.
- */
-static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
-{
- return dc->links[link_index];
-}
-
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
- uint32_t frame_ramp, const struct dc_stream_state *stream);
-
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
-
-bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
-
-bool dc_link_setup_psr(struct dc_link *dc_link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context);
-
-/* Request DC to detect if there is a Panel connected.
- * boot - If this call is during initial boot.
- * Return false for any type of detection failure or MST detection
- * true otherwise. True meaning further action is required (status update
- * and OS notification).
- */
-enum dc_detect_reason {
- DETECT_REASON_BOOT,
- DETECT_REASON_HPD,
- DETECT_REASON_HPDRX,
-};
-
-bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
-
-/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
- * Return:
- * true - Downstream port status changed. DM should call DC to do the
- * detection.
- * false - no change in Downstream port status. No further action required
- * from DM. */
-bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data);
-
-struct dc_sink_init_data;
-
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *dc_link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data);
-
-void dc_link_remove_remote_sink(
- struct dc_link *link,
- struct dc_sink *sink);
-
-/* Used by diagnostics for virtual link at the moment */
-
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- struct link_training_settings *lt_settings);
-
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-
-void dc_link_dp_enable_hpd(const struct dc_link *link);
-
-void dc_link_dp_disable_hpd(const struct dc_link *link);
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
+#include "dc_link.h"
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
@@ -1037,6 +678,7 @@ struct dc_sink {
/* private to dc_sink.c */
struct kref refcount;
+
};
void dc_sink_retain(struct dc_sink *sink);
@@ -1051,18 +693,6 @@ struct dc_sink_init_data {
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
-/*******************************************************************************
- * Cursor interfaces - To manages the cursor within a stream
- ******************************************************************************/
-/* TODO: Deprecated once we switch to dc_set_cursor_position */
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
- const struct dc_cursor_attributes *attributes);
-
-bool dc_stream_set_cursor_position(
- struct dc_stream_state *stream,
- const struct dc_cursor_position *position);
-
/* Newer interfaces */
struct dc_cursor {
struct dc_plane_address address;
@@ -1076,7 +706,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
struct dc *dc,
uint32_t src_id,
uint32_t ext_id);
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
enum dc_irq_source dc_get_hpd_irq_source_at_index(
struct dc *dc, uint32_t link_index);
@@ -1090,14 +720,4 @@ void dc_set_power_state(
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
-/*
- * DPCD access interfaces
- */
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd);
-
-
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 273d80a..d9b84ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -111,6 +111,8 @@ struct dc_vbios_funcs {
struct dc_bios *bios);
bool (*is_accelerated_mode)(
struct dc_bios *bios);
+ uint32_t (*get_vga_enabled_displays)(
+ struct dc_bios *bios);
void (*get_bios_event_info)(
struct dc_bios *bios,
struct bios_event_info *info);
@@ -199,6 +201,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 77e2de6..2726b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -255,7 +255,7 @@ enum dpcd_downstream_port_detailed_type {
DOWN_STREAM_DETAILED_DP_PLUS_PLUS
};
-union dwnstream_port_caps_byte1 {
+union dwnstream_port_caps_byte2 {
struct {
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
uint8_t RESERVED:6;
@@ -298,6 +298,32 @@ union dwnstream_port_caps_byte3_hdmi {
/*4-byte structure for detailed capabilities of a down-stream port
(DP-to-TMDS converter).*/
+union dwnstream_portxcaps {
+ struct {
+ union dwnstream_port_caps_byte0 byte0;
+ unsigned char max_TMDS_clock; //byte1
+ union dwnstream_port_caps_byte2 byte2;
+
+ union {
+ union dwnstream_port_caps_byte3_dvi byteDVI;
+ union dwnstream_port_caps_byte3_hdmi byteHDMI;
+ } byte3;
+ } bytes;
+
+ unsigned char raw[4];
+};
+
+union downstream_port {
+ struct {
+ unsigned char present:1;
+ unsigned char type:2;
+ unsigned char format_conv:1;
+ unsigned char detailed_caps:1;
+ unsigned char reserved:3;
+ } bits;
+ unsigned char raw;
+};
+
union sink_status {
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 90e81f7..48e1fcf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -178,8 +178,13 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
field_value = get_reg_field_value_ex(reg_val, mask, shift);
- if (field_value == condition_value)
+ if (field_value == condition_value) {
+ if (i * delay_between_poll_us > 1000)
+ dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
+ delay_between_poll_us * i / 1000,
+ func_name, line);
return reg_val;
+ }
}
dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 1a9f57f..b83a7dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -413,12 +413,14 @@ struct dc_cursor_mi_param {
enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
- GAMMA_MAX_ENTRIES = 1024
+ GAMMA_CS_TFM_1D_ENTRIES = 4096,
+ GAMMA_MAX_ENTRIES = 4096
};
enum dc_gamma_type {
GAMMA_RGB_256 = 1,
- GAMMA_RGB_FLOAT_1024 = 2
+ GAMMA_RGB_FLOAT_1024 = 2,
+ GAMMA_CS_TFM_1D = 3,
};
struct dc_gamma {
@@ -434,6 +436,8 @@ struct dc_gamma {
/* private to DC core */
struct dc_context *ctx;
+
+ bool is_identity;
};
/* Used by both ipp amd opp functions*/
@@ -492,15 +496,24 @@ struct dc_cursor_attributes {
enum dc_color_space {
COLOR_SPACE_UNKNOWN,
COLOR_SPACE_SRGB,
+ COLOR_SPACE_XR_RGB,
COLOR_SPACE_SRGB_LIMITED,
+ COLOR_SPACE_MSREF_SCRGB,
COLOR_SPACE_YCBCR601,
COLOR_SPACE_YCBCR709,
+ COLOR_SPACE_XV_YCC_709,
+ COLOR_SPACE_XV_YCC_601,
COLOR_SPACE_YCBCR601_LIMITED,
COLOR_SPACE_YCBCR709_LIMITED,
COLOR_SPACE_2020_RGB_FULLRANGE,
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
COLOR_SPACE_2020_YCBCR,
COLOR_SPACE_ADOBERGB,
+ COLOR_SPACE_DCIP3,
+ COLOR_SPACE_DISPLAYNATIVE,
+ COLOR_SPACE_DOLBYVISION,
+ COLOR_SPACE_APPCTRL,
+ COLOR_SPACE_CUSTOMPOINTS,
};
enum dc_dither_option {
@@ -570,8 +583,6 @@ enum dc_timing_standard {
TIMING_STANDARD_MAX
};
-
-
enum dc_color_depth {
COLOR_DEPTH_UNDEFINED,
COLOR_DEPTH_666,
@@ -664,9 +675,35 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
-struct dc_crtc_timing {
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+enum vrr_state {
+ VRR_STATE_OFF = 0,
+ VRR_STATE_VARIABLE,
+ VRR_STATE_FIXED,
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+};
+
+struct dc_crtc_timing {
uint32_t h_total;
uint32_t h_border_left;
uint32_t h_addressable;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
new file mode 100644
index 0000000..fb4d9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_LINK_H_
+#define DC_LINK_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+
+struct dc_link_status {
+ struct dpcd_caps *dpcd_caps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ bool is_hpd_filter_disabled;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting;
+ struct dc_link_settings preferred_link_setting;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ enum edp_revision edp_revision;
+ bool psr_enabled;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link. dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+};
+
+bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
new file mode 100644
index 0000000..d017df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_STREAM_H_
+#define DC_STREAM_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+
+struct dc_stream_status {
+ int primary_otg_inst;
+ int stream_enc_inst;
+ int plane_count;
+ struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+
+ /*
+ * link this stream passes through
+ */
+ struct dc_link *link;
+};
+
+struct dc_stream_state {
+ struct dc_sink *sink;
+ struct dc_crtc_timing timing;
+ struct dc_crtc_timing_adjust timing_adjust;
+ struct vrr_params vrr_params;
+
+ struct rect src; /* composition area */
+ struct rect dst; /* stream addressable area */
+
+ struct audio_info audio_info;
+
+ struct freesync_context freesync_ctx;
+
+ struct dc_hdr_static_metadata hdr_static_metadata;
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+ struct csc_transform csc_color_matrix;
+
+ enum dc_color_space output_color_space;
+ enum dc_dither_option dither_option;
+
+ enum view_3d_format view_format;
+ enum color_transfer_func output_tf;
+
+ bool ignore_msa_timing_param;
+ /* TODO: custom INFO packets */
+ /* TODO: ABM info (DMCU) */
+ /* PSR info */
+ unsigned char psr_version;
+ /* TODO: CEA VIC */
+
+ /* DMCU info */
+ unsigned int abm_level;
+ unsigned int bl_pwm_level;
+
+ /* from core_stream struct */
+ struct dc_context *ctx;
+
+ /* used by DCP and FMT */
+ struct bit_depth_reduction_params bit_depth_params;
+ struct clamping_and_pixel_encoding_params clamping;
+
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
+
+ struct dc_stream_status status;
+
+ struct dc_cursor_attributes cursor_attributes;
+ struct dc_cursor_position cursor_position;
+
+ /* from stream struct */
+ struct kref refcount;
+
+ struct crtc_trigger_info triggered_crtc_reset;
+
+ /* Computed state bits */
+ bool mode_changed : 1;
+
+};
+
+struct dc_stream_update {
+ struct rect src;
+ struct rect dst;
+ struct dc_transfer_func *out_transfer_func;
+ struct dc_hdr_static_metadata *hdr_static_metadata;
+ enum color_transfer_func color_output_tf;
+ unsigned int *abm_level;
+};
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ * Surfaces attributes are programmed and configured to be composed into stream.
+ * This does not trigger a flip. No surface address is programmed.
+ */
+
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state);
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state);
+/*
+ * Log the current stream state.
+ */
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dc_logger,
+ enum dc_log_type log_type);
+
+uint8_t dc_get_current_stream_count(struct dc *dc);
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+
+/*
+ * Return the current frame counter.
+ */
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context);
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
+
+/*
+ * This function takes a stream and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC similar streams are supported.
+ *
+ * After this call:
+ * No hardware is programmed for call. Only validation is done.
+ */
+
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+/*
+ * Enable stereo when commit_streams is not required,
+ * for example, frame alternate.
+ */
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status);
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+
+void update_stream_signal(struct dc_stream_state *stream);
+
+void dc_stream_retain(struct dc_stream_state *dc_stream);
+void dc_stream_release(struct dc_stream_state *dc_stream);
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *dc_stream);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a stream
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes);
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position);
+
+bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ int vmin,
+ int vmax);
+
+bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+bool dc_stream_configure_crc(struct dc *dc,
+ struct dc_stream_state *stream,
+ bool enable,
+ bool continuous);
+
+bool dc_stream_get_crc(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t *r_cr,
+ uint32_t *g_y,
+ uint32_t *b_cb);
+
+void dc_stream_set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ const struct dc_static_screen_events *events);
+
+void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option);
+
+
+bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ int vmin,
+ int vmax);
+
+bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+void dc_stream_set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ const struct dc_static_screen_events *events);
+
+#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index a8698e3..9441305 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -193,6 +193,10 @@ union display_content_support {
} bits;
};
+struct dc_panel_patch {
+ unsigned int dppowerup_delay;
+};
+
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -218,6 +222,9 @@ struct dc_edid_caps {
bool lte_340mcsc_scramble;
bool edid_hdmi;
+ bool hdr_supported;
+
+ struct dc_panel_patch panel_patch;
};
struct view {
@@ -514,6 +521,24 @@ struct audio_info {
struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
};
+struct vrr_params {
+ enum vrr_state state;
+ uint32_t window_min;
+ uint32_t window_max;
+ uint32_t inserted_frame_duration_in_us;
+ uint32_t frames_to_insert;
+ uint32_t frame_counter;
+};
+
+#define DC_PLANE_UPDATE_TIMES_MAX 10
+
+struct dc_plane_flip_time {
+ unsigned int time_elapsed_in_us[DC_PLANE_UPDATE_TIMES_MAX];
+ unsigned int index;
+ unsigned int prev_update_time_in_us;
+};
+
+// Will combine with vrr_params at some point.
struct freesync_context {
bool supported;
bool enabled;
@@ -638,11 +663,6 @@ struct colorspace_transform {
bool enable_remap;
};
-struct csc_transform {
- uint16_t matrix[12];
- bool enable_adjustment;
-};
-
enum i2c_mot_mode {
I2C_MOT_UNDEF,
I2C_MOT_TRUE,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 0e0336c..fe92a12 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -42,6 +42,8 @@
#define FN(reg_name, field_name) \
abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
+#define DC_LOGGER \
+ abm->ctx->logger
#define CTX \
abm_dce->base.ctx
@@ -51,16 +53,6 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-struct abm_backlight_registers {
- unsigned int BL_PWM_CNTL;
- unsigned int BL_PWM_CNTL2;
- unsigned int BL_PWM_PERIOD_CNTL;
- unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
-/* registers setting needs to be save and restored used at InitBacklight */
-static struct abm_backlight_registers stored_backlight_registers = {0};
-
static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
{
@@ -333,6 +325,15 @@ static bool dce_abm_immediate_disable(struct abm *abm)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ abm->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ abm->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
return true;
}
@@ -347,16 +348,16 @@ static bool dce_abm_init_backlight(struct abm *abm)
*/
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
if (value == 0 || value == 1) {
- if (stored_backlight_registers.BL_PWM_CNTL != 0) {
+ if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
REG_WRITE(BL_PWM_CNTL,
- stored_backlight_registers.BL_PWM_CNTL);
+ abm->stored_backlight_registers.BL_PWM_CNTL);
REG_WRITE(BL_PWM_CNTL2,
- stored_backlight_registers.BL_PWM_CNTL2);
+ abm->stored_backlight_registers.BL_PWM_CNTL2);
REG_WRITE(BL_PWM_PERIOD_CNTL,
- stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
BL_PWM_REF_DIV,
- stored_backlight_registers.
+ abm->stored_backlight_registers.
LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
} else {
/* TODO: Note: This should not really happen since VBIOS
@@ -366,15 +367,15 @@ static bool dce_abm_init_backlight(struct abm *abm)
REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
}
} else {
- stored_backlight_registers.BL_PWM_CNTL =
+ abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
- stored_backlight_registers.BL_PWM_CNTL2 =
+ abm->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
- stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &stored_backlight_registers.
+ &abm->stored_backlight_registers.
LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
@@ -395,30 +396,20 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
}
-static bool is_dmcu_initialized(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int dmcu_uc_reset;
-
- REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
-
- return !dmcu_uc_reset;
-}
-
static bool dce_abm_set_backlight_level(
struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id)
+ unsigned int controller_id,
+ bool use_smooth_brightness)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
- "New Backlight level: %d (0x%X)\n",
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_level, backlight_level);
/* If DMCU is in reset state, DMCU is uninitialized */
- if (is_dmcu_initialized(abm))
+ if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
backlight_level,
frame_ramp,
@@ -435,8 +426,7 @@ static const struct abm_funcs dce_funcs = {
.init_backlight = dce_abm_init_backlight,
.set_backlight_level = dce_abm_set_backlight_level,
.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
- .set_abm_immediate_disable = dce_abm_immediate_disable,
- .is_dmcu_initialized = is_dmcu_initialized
+ .set_abm_immediate_disable = dce_abm_immediate_disable
};
static void dce_abm_construct(
@@ -450,6 +440,10 @@ static void dce_abm_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
+ base->stored_backlight_registers.BL_PWM_CNTL = 0;
+ base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+ base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+ base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
abm_dce->regs = regs;
abm_dce->abm_shift = abm_shift;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 59e909e..ff94369 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -37,8 +37,7 @@
SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
- SR(MASTER_COMM_DATA_REG1), \
- SR(DMCU_STATUS)
+ SR(MASTER_COMM_DATA_REG1)
#define ABM_DCE110_COMMON_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
@@ -84,8 +83,7 @@
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
- ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
- ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh)
#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
@@ -174,7 +172,6 @@
type MASTER_COMM_CMD_REG_BYTE2; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
- type UC_IN_RESET; \
type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
type BL_PWM_GRP1_REG_LOCK; \
type BL_PWM_GRP1_REG_UPDATE_PENDING
@@ -206,7 +203,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t DMCU_STATUS;
uint32_t BL_PWM_GRP1_REG_LOCK;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 0df9ecb..6d5cdcd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -33,6 +33,8 @@
#define CTX \
aud->base.ctx
+#define DC_LOGGER \
+ aud->base.ctx->logger
#define REG(reg)\
(aud->regs->reg)
@@ -63,8 +65,7 @@ static void write_indirect_azalia_reg(struct audio *audio,
REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
AZALIA_ENDPOINT_REG_DATA, reg_data);
- dm_logger_write(CTX->logger, LOG_HW_AUDIO,
- "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
+ DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
reg_index, reg_data);
}
@@ -81,8 +82,7 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
- dm_logger_write(CTX->logger, LOG_HW_AUDIO,
- "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
+ DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
reg_index, value);
return value;
@@ -359,10 +359,12 @@ void dce_aud_az_enable(struct audio *audio)
AUDIO_ENABLED);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
- value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
- dm_logger_write(CTX->logger, LOG_HW_AUDIO,
- "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
+ DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
audio->inst, value);
}
@@ -372,6 +374,10 @@ void dce_aud_az_disable(struct audio *audio)
struct dce_audio *aud = DCE_AUD(audio);
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
set_reg_field_value(value, 0,
AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
@@ -383,8 +389,7 @@ void dce_aud_az_disable(struct audio *audio)
CLOCK_GATING_DISABLE);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
- dm_logger_write(CTX->logger, LOG_HW_AUDIO,
- "\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
+ DC_LOG_HW_AUDIO("\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
audio->inst, value);
}
@@ -716,6 +721,11 @@ void dce_aud_az_configure(
DESCRIPTION17);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
}
/*
@@ -783,8 +793,7 @@ void dce_aud_wall_dto_setup(
crtc_info->calculated_pixel_clock,
&clock_info);
- dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
- "\n%s:Input::requested_pixel_clock = %d"\
+ DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock = %d"\
"calculated_pixel_clock =%d\n"\
"audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
crtc_info->requested_pixel_clock,\
@@ -897,6 +906,10 @@ void dce_aud_hw_init(
REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
CLKSTOP, 1,
EPSS, 1);
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
}
static const struct audio_funcs funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 31280d2..0aa2cda 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -41,7 +41,8 @@
#define CTX \
clk_src->base.ctx
-
+#define DC_LOGGER \
+ calc_pll_cs->ctx->logger
#undef FN
#define FN(reg_name, field_name) \
clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
@@ -288,7 +289,7 @@ static uint32_t calculate_pixel_clock_pll_dividers(
uint32_t max_ref_divider;
if (pll_settings->adjusted_pix_clk == 0) {
- dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s Bad requested pixel clock", __func__);
return MAX_PLL_CALC_ERROR;
}
@@ -349,13 +350,13 @@ static uint32_t calculate_pixel_clock_pll_dividers(
* ## SVS Wed 15 Jul 2009 */
if (min_post_divider > max_post_divider) {
- dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s Post divider range is invalid", __func__);
return MAX_PLL_CALC_ERROR;
}
if (min_ref_divider > max_ref_divider) {
- dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s Reference divider range is invalid", __func__);
return MAX_PLL_CALC_ERROR;
}
@@ -466,7 +467,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
{
uint32_t field = 0;
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
-
+ struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
/* Check if reference clock is external (not pcie/xtalin)
* HW Dce80 spec:
* 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
@@ -493,7 +494,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
/* Should never happen, ASSERT and fill up values to be able
* to continue. */
- dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s: Failed to adjust pixel clock!!", __func__);
pll_settings->actual_pix_clk =
pix_clk_params->requested_pix_clk;
@@ -556,11 +557,12 @@ static uint32_t dce110_get_pix_clk_dividers(
struct pll_settings *pll_settings)
{
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
if (pix_clk_params == NULL || pll_settings == NULL
|| pix_clk_params->requested_pix_clk == 0) {
- dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s: Invalid parameters!!\n", __func__);
return pll_calc_error;
}
@@ -908,19 +910,9 @@ static bool dce110_program_pix_clk(
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
- unsigned dp_dto_ref_kHz = 600000;
- /* DPREF clock from FPGA TODO: Does FPGA have this value? */
+ unsigned dp_dto_ref_kHz = 700000;
unsigned clock_kHz = pll_settings->actual_pix_clk;
- /* For faster simulation, if mode pixe clock less than 290MHz,
- * pixel clock can be hard coded to 290Mhz. For 4K mode, pixel clock
- * is greater than 500Mhz, need real pixel clock
- * clock_kHz = 290000;
- */
- /* TODO: un-hardcode when we can set display clock properly*/
- /*clock_kHz = pix_clk_params->requested_pix_clk;*/
- clock_kHz = 290000;
-
/* Set DTO values: phase = target clock, modulo = reference clock */
REG_WRITE(PHASE[inst], clock_kHz);
REG_WRITE(MODULO[inst], dp_dto_ref_kHz);
@@ -1062,14 +1054,14 @@ static void get_ss_info_from_atombios(
struct spread_spectrum_info *ss_info_cur;
struct spread_spectrum_data *ss_data_cur;
uint32_t i;
-
+ struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
if (ss_entries_num == NULL) {
- dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ DC_LOG_SYNC(
"Invalid entry !!!\n");
return;
}
if (spread_spectrum_data == NULL) {
- dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ DC_LOG_SYNC(
"Invalid array pointer!!!\n");
return;
}
@@ -1114,7 +1106,7 @@ static void get_ss_info_from_atombios(
++i, ++ss_info_cur, ++ss_data_cur) {
if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
- dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ DC_LOG_SYNC(
"Invalid ATOMBIOS SS Table!!!\n");
goto out_free_data;
}
@@ -1124,9 +1116,9 @@ static void get_ss_info_from_atombios(
if (as_signal == AS_SIGNAL_TYPE_HDMI
&& ss_info_cur->spread_spectrum_percentage > 6){
/* invalid input, do nothing */
- dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ DC_LOG_SYNC(
"Invalid SS percentage ");
- dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ DC_LOG_SYNC(
"for HDMI in ATOMBIOS info Table!!!\n");
continue;
}
@@ -1238,12 +1230,12 @@ static bool calc_pll_max_vco_construct(
if (init_data->num_fract_fb_divider_decimal_point == 0 ||
init_data->num_fract_fb_divider_decimal_point_precision >
init_data->num_fract_fb_divider_decimal_point) {
- dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"The dec point num or precision is incorrect!");
return false;
}
if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
- dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"Incorrect fract feedback divider precision num!");
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 9031d22..78e6beb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -29,12 +29,12 @@
#include "fixed32_32.h"
#include "bios_parser_interface.h"
#include "dc.h"
-#include "dce_abm.h"
#include "dmcu.h"
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn_calcs.h"
#endif
#include "core_types.h"
+#include "dc_types.h"
#define TO_DCE_CLOCKS(clocks)\
@@ -49,6 +49,8 @@
#define CTX \
clk_dce->base.ctx
+#define DC_LOGGER \
+ clk->ctx->logger
/* Max clock values for each state indexed by "enum clocks_state": */
static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
@@ -292,8 +294,10 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
low_req_clk = i + 1;
if (low_req_clk > clk->max_clks_state) {
- dm_logger_write(clk->ctx->logger, LOG_WARNING,
- "%s: clocks unsupported", __func__);
+ DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
+ __func__,
+ req_clocks->display_clk_khz,
+ req_clocks->pixel_clk_khz);
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
}
@@ -309,8 +313,7 @@ static bool dce_clock_set_min_clocks_state(
if (clocks_state > clk->max_clks_state) {
/*Requested state exceeds max supported state.*/
- dm_logger_write(clk->ctx->logger, LOG_WARNING,
- "Requested state exceeds max supported state");
+ DC_LOG_WARNING("Requested state exceeds max supported state");
return false;
} else if (clocks_state == clk->cur_min_clks_state) {
/*if we're trying to set the same state, we can just return
@@ -384,7 +387,6 @@ static int dce112_set_clock(
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
@@ -417,9 +419,12 @@ static int dce112_set_clock(
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (clk_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+
clk_dce->dfs_bypass_disp_clk = actual_clock;
return actual_clock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index fd77df5..2ee3d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -49,8 +49,16 @@
#define PSR_EXIT 0x21
#define PSR_SET 0x23
#define PSR_SET_WAITLOOP 0x31
+#define MCP_INIT_DMCU 0x88
+#define MCP_INIT_IRAM 0x89
+#define MCP_DMCU_VERSION 0x90
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
-unsigned int cached_wait_loop_number = 0;
+
+static bool dce_dmcu_init(struct dmcu *dmcu)
+{
+ // Do nothing
+ return true;
+}
bool dce_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
@@ -84,7 +92,7 @@ static void dce_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
- uint32_t psrStateOffset = 0xf0;
+ uint32_t psr_state_offset = 0xf0;
/* Enable write access to IRAM */
REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
@@ -92,7 +100,7 @@ static void dce_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
/* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
- REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psr_state_offset);
/* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
*psr_state = REG_READ(DMCU_IRAM_RD_DATA);
@@ -255,13 +263,33 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
}
+static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_uc_reset;
+
+ /* microcontroller is not running */
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ /* DMCU is not running */
+ if (dmcu_uc_reset)
+ return false;
+
+ return true;
+}
+
static void dce_psr_wait_loop(
struct dmcu *dmcu,
unsigned int wait_loop_number)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
- if (cached_wait_loop_number == wait_loop_number)
+
+ if (dmcu->cached_wait_loop_number == wait_loop_number)
+ return;
+
+ /* DMCU is not running */
+ if (!dce_is_dmcu_initialized(dmcu))
return;
/* waitDMCUReadyForCmd */
@@ -269,7 +297,7 @@ static void dce_psr_wait_loop(
masterCmdData1.u32 = 0;
masterCmdData1.bits.wait_loop = wait_loop_number;
- cached_wait_loop_number = wait_loop_number;
+ dmcu->cached_wait_loop_number = wait_loop_number;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
/* setDMCUParam_Cmd */
@@ -279,14 +307,136 @@ static void dce_psr_wait_loop(
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
}
-static void dce_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+static void dce_get_psr_wait_loop(
+ struct dmcu *dmcu, unsigned int *psr_wait_loop_number)
{
- *psr_wait_loop_number = cached_wait_loop_number;
+ *psr_wait_loop_number = dmcu->cached_wait_loop_number;
return;
}
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
+static void dcn10_get_dmcu_state(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t dmcu_state_offset = 0xf6;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_RD_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_state_offset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ dmcu->dmcu_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_RD_ADDR_AUTO_INC, 0);
+}
+
+static void dcn10_get_dmcu_version(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ uint32_t dmcu_version_offset = 0xf1;
+
+ /* Clear scratch */
+ REG_WRITE(DC_DMCU_SCRATCH, 0);
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_RD_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR and read from DATA register */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_version_offset);
+ dmcu->dmcu_version.interface_version = REG_READ(DMCU_IRAM_RD_DATA);
+ dmcu->dmcu_version.year = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) |
+ REG_READ(DMCU_IRAM_RD_DATA));
+ dmcu->dmcu_version.month = REG_READ(DMCU_IRAM_RD_DATA);
+ dmcu->dmcu_version.date = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_RD_ADDR_AUTO_INC, 0);
+
+ /* Send MCP command message to DMCU to get version reply from FW.
+ * We expect this version should match the one in IRAM, otherwise
+ * something is wrong with DMCU and we should fail and disable UC.
+ */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set command to get DMCU version from microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_DMCU_VERSION);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Somehow version does not match, so fail and return version 0 */
+ if (dmcu->dmcu_version.interface_version != REG_READ(DC_DMCU_SCRATCH))
+ dmcu->dmcu_version.interface_version = 0;
+}
+
+static bool dcn10_dmcu_init(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* DMCU FW should populate the scratch register if running */
+ if (REG_READ(DC_DMCU_SCRATCH) == 0)
+ return false;
+
+ /* Check state is uninitialized */
+ dcn10_get_dmcu_state(dmcu);
+
+ /* If microcontroller is already initialized, do nothing */
+ if (dmcu->dmcu_state == DMCU_RUNNING)
+ return true;
+
+ /* Retrieve and cache the DMCU firmware version. */
+ dcn10_get_dmcu_version(dmcu);
+
+ /* Check interface version to confirm firmware is loaded and running */
+ if (dmcu->dmcu_version.interface_version == 0)
+ return false;
+
+ /* Wait until microcontroller is ready to process interrupt */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set initialized ramping boundary value */
+ REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
+
+ /* Set command to initialize microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_INIT_DMCU);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ // Check state is initialized
+ dcn10_get_dmcu_state(dmcu);
+
+ // If microcontroller is not in running state, fail
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+
+ return true;
+}
+
+static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset,
const char *src,
unsigned int bytes)
@@ -294,7 +444,9 @@ bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
unsigned int count = 0;
- REG_UPDATE(DMCU_CTRL, DMCU_ENABLE, 1);
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
/* Enable write access to IRAM */
REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
@@ -313,6 +465,19 @@ bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
IRAM_HOST_ACCESS_EN, 0,
IRAM_WR_ADDR_AUTO_INC, 0);
+ /* Wait until microcontroller is ready to process interrupt */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set command to signal IRAM is loaded and to initialize IRAM */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_INIT_IRAM);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
return true;
}
@@ -320,7 +485,11 @@ static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
- uint32_t psrStateOffset = 0xf0;
+ uint32_t psr_state_offset = 0xf0;
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return;
/* Enable write access to IRAM */
REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
@@ -328,7 +497,7 @@ static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
/* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
- REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psr_state_offset);
/* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
*psr_state = REG_READ(DMCU_IRAM_RD_DATA);
@@ -348,6 +517,13 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
unsigned int retryCount;
uint32_t psr_state = 0;
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return;
+
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (psr_state == 0 && !enable)
+ return;
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -399,6 +575,10 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
union dce_dmcu_psr_config_data_reg2 masterCmdData2;
union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return;
+
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -505,13 +685,18 @@ static void dcn10_psr_wait_loop(
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+
+ /* If microcontroller is not running, do nothing */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return;
+
if (wait_loop_number != 0) {
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
masterCmdData1.u32 = 0;
masterCmdData1.bits.wait_loop = wait_loop_number;
- cached_wait_loop_number = wait_loop_number;
+ dmcu->cached_wait_loop_number = wait_loop_number;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
/* setDMCUParam_Cmd */
@@ -522,31 +707,44 @@ static void dcn10_psr_wait_loop(
}
}
-static void dcn10_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+static void dcn10_get_psr_wait_loop(
+ struct dmcu *dmcu, unsigned int *psr_wait_loop_number)
{
- *psr_wait_loop_number = cached_wait_loop_number;
+ *psr_wait_loop_number = dmcu->cached_wait_loop_number;
return;
}
+static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ /* microcontroller is not running */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+ return true;
+}
+
#endif
static const struct dmcu_funcs dce_funcs = {
+ .dmcu_init = dce_dmcu_init,
.load_iram = dce_dmcu_load_iram,
.set_psr_enable = dce_dmcu_set_psr_enable,
.setup_psr = dce_dmcu_setup_psr,
.get_psr_state = dce_get_dmcu_psr_state,
.set_psr_wait_loop = dce_psr_wait_loop,
- .get_psr_wait_loop = dce_get_psr_wait_loop
+ .get_psr_wait_loop = dce_get_psr_wait_loop,
+ .is_dmcu_initialized = dce_is_dmcu_initialized
};
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
static const struct dmcu_funcs dcn10_funcs = {
+ .dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
.set_psr_enable = dcn10_dmcu_set_psr_enable,
.setup_psr = dcn10_dmcu_setup_psr,
.get_psr_state = dcn10_get_dmcu_psr_state,
.set_psr_wait_loop = dcn10_psr_wait_loop,
- .get_psr_wait_loop = dcn10_get_psr_wait_loop
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .is_dmcu_initialized = dcn10_is_dmcu_initialized
};
#endif
@@ -561,6 +759,7 @@ static void dce_dmcu_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
+ base->cached_wait_loop_number = 0;
dmcu_dce->regs = regs;
dmcu_dce->dmcu_shift = dmcu_shift;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index b85f53c..c24c0e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -31,6 +31,7 @@
#define DMCU_COMMON_REG_LIST_DCE_BASE() \
SR(DMCU_CTRL), \
+ SR(DMCU_STATUS), \
SR(DMCU_RAM_ACCESS_CTRL), \
SR(DMCU_IRAM_WR_CTRL), \
SR(DMCU_IRAM_WR_DATA), \
@@ -42,7 +43,25 @@
SR(DMCU_IRAM_RD_CTRL), \
SR(DMCU_IRAM_RD_DATA), \
SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
- SR(SMU_INTERRUPT_CONTROL)
+ SR(SMU_INTERRUPT_CONTROL), \
+ SR(DC_DMCU_SCRATCH)
+
+#define DMCU_DCE80_REG_LIST() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_STATUS), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(SMU_INTERRUPT_CONTROL), \
+ SR(DC_DMCU_SCRATCH)
#define DMCU_DCE110_COMMON_REG_LIST() \
DMCU_COMMON_REG_LIST_DCE_BASE(), \
@@ -58,10 +77,16 @@
#define DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
DMCU_SF(DMCU_CTRL, \
DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
IRAM_HOST_ACCESS_EN, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_RD_ADDR_AUTO_INC, mask_sh), \
DMCU_SF(MASTER_COMM_CMD_REG, \
MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
@@ -75,6 +100,24 @@
STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_RD_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+
#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
DMCU_SF(DCI_MEM_PWR_STATUS, \
@@ -89,7 +132,10 @@
type DMCU_IRAM_MEM_PWR_STATE; \
type IRAM_HOST_ACCESS_EN; \
type IRAM_WR_ADDR_AUTO_INC; \
+ type IRAM_RD_ADDR_AUTO_INC; \
type DMCU_ENABLE; \
+ type UC_IN_STOP_MODE; \
+ type UC_IN_RESET; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_INTERRUPT; \
type DPHY_RX_FAST_TRAINING_CAPABLE; \
@@ -112,6 +158,7 @@ struct dce_dmcu_mask {
struct dce_dmcu_registers {
uint32_t DMCU_CTRL;
+ uint32_t DMCU_STATUS;
uint32_t DMCU_RAM_ACCESS_CTRL;
uint32_t DCI_MEM_PWR_STATUS;
uint32_t DMU_MEM_PWR_CNTL;
@@ -127,6 +174,7 @@ struct dce_dmcu_registers {
uint32_t DMCU_IRAM_RD_DATA;
uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
uint32_t SMU_INTERRUPT_CONTROL;
+ uint32_t DC_DMCU_SCRATCH;
};
struct dce_dmcu {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index d2e66b1..4877243 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -56,7 +56,7 @@ void dce_pipe_control_lock(struct dc *dc,
if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
return;
- val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
BLND_SCL_V_UPDATE_LOCK, &scl,
BLND_BLND_V_UPDATE_LOCK, &blnd,
@@ -67,19 +67,19 @@ void dce_pipe_control_lock(struct dc *dc,
blnd = lock_val;
update_lock_mode = lock_val;
- REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
BLND_SCL_V_UPDATE_LOCK, scl);
if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
- REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], val,
BLND_BLND_V_UPDATE_LOCK, blnd,
BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
if (hws->wa.blnd_crtc_trigger) {
if (!lock) {
- uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
- REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
+ uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst]);
+ REG_WRITE(CRTC_H_BLANK_START_END[pipe->stream_res.tg->inst], value);
}
}
}
@@ -197,9 +197,9 @@ void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
}
/* Only use LUT for 8 bit formats */
-bool dce_use_lut(const struct dc_plane_state *plane_state)
+bool dce_use_lut(enum surface_pixel_format format)
{
- switch (plane_state->format) {
+ switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 5250615..057b8af 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -140,46 +140,8 @@
BL_REG_LIST()
#define HWSEQ_DCN_REG_LIST()\
- SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 0), \
- SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 1), \
- SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 2), \
- SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 3), \
- SRII(DCHUBP_CNTL, HUBP, 0), \
- SRII(DCHUBP_CNTL, HUBP, 1), \
- SRII(DCHUBP_CNTL, HUBP, 2), \
- SRII(DCHUBP_CNTL, HUBP, 3), \
- SRII(HUBP_CLK_CNTL, HUBP, 0), \
- SRII(HUBP_CLK_CNTL, HUBP, 1), \
- SRII(HUBP_CLK_CNTL, HUBP, 2), \
- SRII(HUBP_CLK_CNTL, HUBP, 3), \
- SRII(DPP_CONTROL, DPP_TOP, 0), \
- SRII(DPP_CONTROL, DPP_TOP, 1), \
- SRII(DPP_CONTROL, DPP_TOP, 2), \
- SRII(DPP_CONTROL, DPP_TOP, 3), \
- SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
- SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
- SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
- SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
SR(REFCLK_CNTL), \
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
- SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
- SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
- SR(DCHUBBUB_ARB_SAT_LEVEL),\
- SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
- SR(DCHUBBUB_TEST_DEBUG_INDEX), \
- SR(DCHUBBUB_TEST_DEBUG_DATA), \
SR(DIO_MEM_PWR_CTRL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
@@ -199,22 +161,10 @@
MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
-#define HWSEQ_SR_WATERMARK_REG_LIST()\
- SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
- SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
- SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
- SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
- SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
- SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
- SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
- SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
-
#define HWSEQ_DCN1_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
- HWSEQ_SR_WATERMARK_REG_LIST(), \
HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
HWSEQ_PHYPLL_REG_LIST(OTG), \
- SR(DCHUBBUB_SDPIF_FB_TOP),\
SR(DCHUBBUB_SDPIF_FB_BASE),\
SR(DCHUBBUB_SDPIF_FB_OFFSET),\
SR(DCHUBBUB_SDPIF_AGP_BASE),\
@@ -240,6 +190,7 @@
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
+ SR(VGA_TEST_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
@@ -264,40 +215,9 @@ struct dce_hwseq_registers {
uint32_t DCHUB_AGP_BOT;
uint32_t DCHUB_AGP_TOP;
- uint32_t OTG_GLOBAL_SYNC_STATUS[4];
- uint32_t DCHUBP_CNTL[4];
- uint32_t HUBP_CLK_CNTL[4];
- uint32_t DPP_CONTROL[4];
- uint32_t OPP_PIPE_CONTROL[4];
uint32_t REFCLK_CNTL;
- uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
- uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
- uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
- uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
- uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
- uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
- uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
- uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
- uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
- uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
- uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
- uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
- uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
- uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
- uint32_t DCHUBBUB_ARB_SAT_LEVEL;
- uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+
uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
- uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
- uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
- uint32_t DCHUBBUB_TEST_DEBUG_DATA;
- uint32_t DCHUBBUB_SDPIF_FB_TOP;
uint32_t DCHUBBUB_SDPIF_FB_BASE;
uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
uint32_t DCHUBBUB_SDPIF_AGP_BASE;
@@ -342,6 +262,7 @@ struct dce_hwseq_registers {
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
uint32_t D4VGA_CONTROL;
+ uint32_t VGA_TEST_CONTROL;
/* MMHUB registers. read only. temporary hack */
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -408,6 +329,8 @@ struct dce_hwseq_registers {
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
@@ -438,33 +361,17 @@ struct dce_hwseq_registers {
#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
- HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR, mask_sh), \
- HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_OCCURRED, mask_sh), \
- HWS_SF(HUBP0_, DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh), \
- HWS_SF(HUBP0_, HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh), \
- HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
- HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
- HWS_SF(, DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \
- HWS_SF(, DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
- HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
/* todo: get these from GVM instead of reading registers ourselves */\
HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
@@ -500,7 +407,15 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#define HWSEQ_REG_FIELD_LIST(type) \
@@ -533,16 +448,14 @@ struct dce_hwseq_registers {
type ENABLE_L1_TLB;\
type SYSTEM_ACCESS_MODE;\
type LVTMA_BLON;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;
+ type LVTMA_PWRSEQ_TARGET_STATE_R;\
+ type LVTMA_DIGON;\
+ type LVTMA_DIGON_OVRD;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
- type VUPDATE_NO_LOCK_EVENT_CLEAR; \
- type VUPDATE_NO_LOCK_EVENT_OCCURRED; \
type HUBP_VTG_SEL; \
type HUBP_CLOCK_ENABLE; \
type DPP_CLOCK_ENABLE; \
- type DPPCLK_RATE_CONTROL; \
- type SDPIF_FB_TOP;\
type SDPIF_FB_BASE;\
type SDPIF_FB_OFFSET;\
type SDPIF_AGP_BASE;\
@@ -555,14 +468,6 @@ struct dce_hwseq_registers {
type AGP_BOT;\
type AGP_TOP;\
type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
- type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
- type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
- type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
- type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
- type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
- type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
- type DCHUBBUB_ARB_SAT_LEVEL;\
- type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
type OPP_PIPE_CLOCK_EN;\
type IP_REQUEST_EN; \
type DOMAIN0_POWER_FORCEON; \
@@ -591,7 +496,14 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER;
+ type DENTIST_DPPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type VGA_TEST_ENABLE; \
+ type VGA_TEST_RENDER_START; \
+ type D1VGA_MODE_ENABLE; \
+ type D2VGA_MODE_ENABLE; \
+ type D3VGA_MODE_ENABLE; \
+ type D4VGA_MODE_ENABLE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
@@ -627,5 +539,5 @@ void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
struct clock_source *clk_src,
unsigned int tg_inst);
-bool dce_use_lut(const struct dc_plane_state *plane_state);
+bool dce_use_lut(enum surface_pixel_format format);
#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
index d618fdd..d737e91 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -135,36 +135,34 @@ static void dce_ipp_cursor_set_attributes(
}
-static void dce_ipp_program_prescale(
- struct input_pixel_processor *ipp,
- struct ipp_prescale_params *params)
+static void dce_ipp_program_prescale(struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params)
{
struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
/* set to bypass mode first before change */
REG_UPDATE(PRESCALE_GRPH_CONTROL,
- GRPH_PRESCALE_BYPASS,
- 1);
+ GRPH_PRESCALE_BYPASS, 1);
REG_SET_2(PRESCALE_VALUES_GRPH_R, 0,
- GRPH_PRESCALE_SCALE_R, params->scale,
- GRPH_PRESCALE_BIAS_R, params->bias);
+ GRPH_PRESCALE_SCALE_R, params->scale,
+ GRPH_PRESCALE_BIAS_R, params->bias);
REG_SET_2(PRESCALE_VALUES_GRPH_G, 0,
- GRPH_PRESCALE_SCALE_G, params->scale,
- GRPH_PRESCALE_BIAS_G, params->bias);
+ GRPH_PRESCALE_SCALE_G, params->scale,
+ GRPH_PRESCALE_BIAS_G, params->bias);
REG_SET_2(PRESCALE_VALUES_GRPH_B, 0,
- GRPH_PRESCALE_SCALE_B, params->scale,
- GRPH_PRESCALE_BIAS_B, params->bias);
+ GRPH_PRESCALE_SCALE_B, params->scale,
+ GRPH_PRESCALE_BIAS_B, params->bias);
if (params->mode != IPP_PRESCALE_MODE_BYPASS) {
REG_UPDATE(PRESCALE_GRPH_CONTROL,
- GRPH_PRESCALE_BYPASS, 0);
+ GRPH_PRESCALE_BYPASS, 0);
/* If prescale is in use, then legacy lut should be bypassed */
REG_UPDATE(INPUT_GAMMA_CONTROL,
- GRPH_INPUT_GAMMA_MODE, 1);
+ GRPH_INPUT_GAMMA_MODE, 1);
}
}
@@ -223,13 +221,12 @@ static void dce_ipp_set_degamma(
struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
- ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS ||
- mode == IPP_DEGAMMA_MODE_HW_sRGB);
+ ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS || mode == IPP_DEGAMMA_MODE_HW_sRGB);
REG_SET_3(DEGAMMA_CONTROL, 0,
- GRPH_DEGAMMA_MODE, degamma_type,
- CURSOR_DEGAMMA_MODE, degamma_type,
- CURSOR2_DEGAMMA_MODE, degamma_type);
+ GRPH_DEGAMMA_MODE, degamma_type,
+ CURSOR_DEGAMMA_MODE, degamma_type,
+ CURSOR2_DEGAMMA_MODE, degamma_type);
}
static const struct ipp_funcs dce_ipp_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index fe88852..8167cad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -56,6 +56,8 @@
#define CTX \
enc110->base.ctx
+#define DC_LOGGER \
+ enc110->base.ctx->logger
#define REG(reg)\
(enc110->link_regs->reg)
@@ -82,13 +84,6 @@
#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
-/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
-#define TMDS_MIN_PIXEL_CLOCK 25000
-/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
-#define TMDS_MAX_PIXEL_CLOCK 165000
-/* For current ASICs pixel clock - 600MHz */
-#define MAX_ENCODER_CLOCK 600000
-
enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
@@ -683,6 +678,7 @@ void dce110_link_encoder_construct(
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
enc110->base.funcs = &dce110_lnk_enc_funcs;
enc110->base.ctx = init_data->ctx;
@@ -757,15 +753,23 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
+ /* default to one to mirror Windows behavior */
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
+ enc110->base.id, &bp_cap_info);
+
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
- enc110->base.ctx->dc_bios, enc110->base.id,
- &bp_cap_info)) {
+ if (BP_RESULT_OK == result) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
}
}
@@ -816,7 +820,6 @@ void dce110_link_encoder_hw_init(
struct link_encoder *enc)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- struct dc_context *ctx = enc110->base.ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
@@ -828,11 +831,13 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP)
+ cntl.signal = SIGNAL_TYPE_EDP;
+
result = link_transmitter_control(enc110, &cntl);
if (result != BP_RESULT_OK) {
- dm_logger_write(ctx->logger, LOG_ERROR,
- "%s: Failed to execute VBIOS command table!\n",
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
return;
@@ -845,8 +850,6 @@ void dce110_link_encoder_hw_init(
ASSERT(result == BP_RESULT_OK);
- } else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
- ctx->dc->hwss.edp_power_control(enc, true);
}
aux_initialize(enc110);
@@ -906,12 +909,10 @@ void dce110_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- struct dc_context *ctx = enc110->base.ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
@@ -921,16 +922,12 @@ void dce110_link_encoder_enable_tmds_output(
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
cntl.pll_id = clock_source;
- if (hdmi) {
- cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
- cntl.lanes_number = 4;
- } else if (dual_link) {
- cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ cntl.signal = signal;
+ if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
cntl.lanes_number = 8;
- } else {
- cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else
cntl.lanes_number = 4;
- }
+
cntl.hpd_sel = enc110->base.hpd_source;
cntl.pixel_clock = pixel_clock;
@@ -939,8 +936,7 @@ void dce110_link_encoder_enable_tmds_output(
result = link_transmitter_control(enc110, &cntl);
if (result != BP_RESULT_OK) {
- dm_logger_write(ctx->logger, LOG_ERROR,
- "%s: Failed to execute VBIOS command table!\n",
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
@@ -953,7 +949,6 @@ void dce110_link_encoder_enable_dp_output(
enum clock_source_id clock_source)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- struct dc_context *ctx = enc110->base.ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
@@ -980,8 +975,7 @@ void dce110_link_encoder_enable_dp_output(
result = link_transmitter_control(enc110, &cntl);
if (result != BP_RESULT_OK) {
- dm_logger_write(ctx->logger, LOG_ERROR,
- "%s: Failed to execute VBIOS command table!\n",
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
@@ -994,7 +988,6 @@ void dce110_link_encoder_enable_dp_mst_output(
enum clock_source_id clock_source)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- struct dc_context *ctx = enc110->base.ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
@@ -1021,8 +1014,7 @@ void dce110_link_encoder_enable_dp_mst_output(
result = link_transmitter_control(enc110, &cntl);
if (result != BP_RESULT_OK) {
- dm_logger_write(ctx->logger, LOG_ERROR,
- "%s: Failed to execute VBIOS command table!\n",
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
}
@@ -1033,11 +1025,9 @@ void dce110_link_encoder_enable_dp_mst_output(
*/
void dce110_link_encoder_disable_output(
struct link_encoder *enc,
- enum signal_type signal,
- struct dc_link *link)
+ enum signal_type signal)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
- struct dc_context *ctx = enc110->base.ctx;
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
@@ -1045,8 +1035,6 @@ void dce110_link_encoder_disable_output(
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
}
- if (enc110->base.connector.id == CONNECTOR_ID_EDP)
- ctx->dc->hwss.edp_backlight_control(link, false);
/* Power-down RX and disable GPU PHY should be paired.
* Disabling PHY without powering down RX may cause
* symbol lock loss, on which we will get DP Sink interrupt. */
@@ -1067,8 +1055,7 @@ void dce110_link_encoder_disable_output(
result = link_transmitter_control(enc110, &cntl);
if (result != BP_RESULT_OK) {
- dm_logger_write(ctx->logger, LOG_ERROR,
- "%s: Failed to execute VBIOS command table!\n",
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
__func__);
BREAK_TO_DEBUGGER();
return;
@@ -1077,20 +1064,6 @@ void dce110_link_encoder_disable_output(
/* disable encoder */
if (dc_is_dp_signal(signal))
link_encoder_disable(enc110);
-
- if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
- /* power down eDP panel */
- /* TODO: Power control cause regression, we should implement
- * it properly, for now just comment it.
- *
- * link_encoder_edp_wait_for_hpd_ready(
- link_enc,
- link_enc->connector,
- false);
-
- * link_encoder_edp_power_control(
- link_enc, false); */
- }
}
void dce110_link_encoder_dp_set_lane_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 494067d..0ec3433 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock);
/* enables DP PHY output */
@@ -228,9 +227,8 @@ void dce110_link_encoder_enable_dp_mst_output(
/* disable PHY output */
void dce110_link_encoder_disable_output(
- struct link_encoder *link_enc,
- enum signal_type signal,
- struct dc_link *link);
+ struct link_encoder *enc,
+ enum signal_type signal);
/* set DP lane settings */
void dce110_link_encoder_dp_set_lane_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 3931412..8709389 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -128,23 +128,22 @@ static void set_truncation(
return;
}
/* on other format-to do */
- if (params->flags.TRUNCATE_ENABLED == 0 ||
- params->flags.TRUNCATE_DEPTH == 2)
+ if (params->flags.TRUNCATE_ENABLED == 0)
return;
/*Set truncation depth and Enable truncation*/
REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
FMT_TRUNCATE_EN, 1,
FMT_TRUNCATE_DEPTH,
- params->flags.TRUNCATE_MODE,
+ params->flags.TRUNCATE_DEPTH,
FMT_TRUNCATE_MODE,
- params->flags.TRUNCATE_DEPTH);
+ params->flags.TRUNCATE_MODE);
}
/**
* set_spatial_dither
* 1) set spatial dithering mode: pattern of seed
- * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
+ * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
* 3) set random seed
* 4) set random mode
* lfsr is reset every frame or not reset
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index e42b6eb..444558c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -26,7 +26,8 @@
#include "dc_bios_types.h"
#include "dce_stream_encoder.h"
#include "reg_helper.h"
-
+#define DC_LOGGER \
+ enc110->base.ctx->logger
enum DP_PIXEL_ENCODING {
DP_PIXEL_ENCODING_RGB444 = 0x00000000,
DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
@@ -197,7 +198,6 @@ static void dce110_update_hdmi_info_packet(
uint32_t packet_index,
const struct encoder_info_packet *info_packet)
{
- struct dc_context *ctx = enc110->base.ctx;
uint32_t cont, send, line;
if (info_packet->valid) {
@@ -277,8 +277,7 @@ static void dce110_update_hdmi_info_packet(
#endif
default:
/* invalid HW packet index */
- dm_logger_write(
- ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"Invalid HW packet index: %s()\n",
__func__);
return;
@@ -300,6 +299,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
uint32_t h_back_porch;
uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
+ uint8_t dynamic_range_rgb = 0; /*full range*/
+ uint8_t dynamic_range_ycbcr = 1; /*bt709*/
#endif
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
@@ -380,11 +381,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
}
/* set dynamic range and YCbCr range */
- if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
- REG_UPDATE_2(
- DP_PIXEL_FORMAT,
- DP_DYN_RANGE, 0,
- DP_YCBCR_RANGE, 0);
+
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
switch (crtc_timing->display_color_depth) {
@@ -413,37 +410,57 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_SRGB:
misc0 = misc0 | 0x0;
misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_rgb = 0; /*full range*/
break;
case COLOR_SPACE_SRGB_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_rgb = 1; /*limited range*/
break;
case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_ycbcr = 0; /*bt601*/
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_ycbcr = 1; /*bt709*/
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
break;
- case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ dynamic_range_rgb = 1; /*limited range*/
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
- case COLOR_SPACE_YCBCR601_LIMITED:
- case COLOR_SPACE_YCBCR709_LIMITED:
/* do nothing */
break;
}
+ if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
+ REG_UPDATE_2(
+ DP_PIXEL_FORMAT,
+ DP_DYN_RANGE, dynamic_range_rgb,
+ DP_YCBCR_RANGE, dynamic_range_ycbcr);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (REG(DP_MSA_COLORIMETRY))
@@ -902,6 +919,7 @@ static void dce110_stream_encoder_dp_blank(
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
uint32_t retries = 0;
+ uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
/* Note: For CZ, we are changing driver default to disable
@@ -910,7 +928,10 @@ static void dce110_stream_encoder_dp_blank(
* handful of panels that cannot handle disable stream at
* HBLANK and will result in a white line flash across the
* screen on stream disable. */
-
+ REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1);
+ if ((reg1 & 0x1) == 0)
+ /*stream not enabled*/
+ return;
/* Specify the video stream disable point
* (2 = start of the next vertical blank) */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
@@ -1364,7 +1385,7 @@ static void dce110_se_setup_hdmi_audio(
crtc_info->requested_pixel_clock,
crtc_info->calculated_pixel_clock,
&audio_clock_info);
- dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
+ DC_LOG_HW_AUDIO(
"\n%s:Input::requested_pixel_clock = %d" \
"calculated_pixel_clock = %d \n", __func__, \
crtc_info->requested_pixel_clock, \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index ae32af3..832c5da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -38,6 +38,8 @@
#define CTX \
xfm_dce->base.ctx
+#define DC_LOGGER \
+ xfm_dce->base.ctx->logger
#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
#define GAMUT_MATRIX_SIZE 12
@@ -618,80 +620,48 @@ static void program_bit_depth_reduction(
enum dc_color_depth depth,
const struct bit_depth_reduction_params *bit_depth_params)
{
- enum dcp_bit_depth_reduction_mode depth_reduction_mode;
- enum dcp_spatial_dither_mode spatial_dither_mode;
- bool frame_random_enable;
- bool rgb_random_enable;
- bool highpass_random_enable;
+ enum dcp_out_trunc_round_depth trunc_round_depth;
+ enum dcp_out_trunc_round_mode trunc_mode;
+ bool spatial_dither_enable;
ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
- if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
- depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
- frame_random_enable = true;
- rgb_random_enable = true;
- highpass_random_enable = true;
-
- } else {
- depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
- frame_random_enable = false;
- rgb_random_enable = false;
- highpass_random_enable = false;
+ spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
+ /* Default to 12 bit truncation without rounding */
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
+ trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
+
+ if (bit_depth_params->flags.TRUNCATE_ENABLED) {
+ /* Don't enable dithering if truncation is enabled */
+ spatial_dither_enable = false;
+ trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ?
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND :
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
+
+ if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 ||
+ bit_depth_params->flags.TRUNCATE_DEPTH == 1)
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT;
+ else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2)
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT;
+ else {
+ /*
+ * Invalid truncate/round depth. Setting here to 12bit
+ * to prevent use-before-initialize errors.
+ */
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
+ BREAK_TO_DEBUGGER();
+ }
}
- spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
-
set_clamp(xfm_dce, depth);
-
- switch (depth_reduction_mode) {
- case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
- /* Spatial Dither: Set round/truncate to bypass (12bit),
- * enable Dither (30bpp) */
- set_round(xfm_dce,
- DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
- DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
-
- set_dither(xfm_dce, true, spatial_dither_mode,
- DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
- rgb_random_enable, highpass_random_enable);
- break;
- case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
- /* Round: Enable round (10bit), disable Dither */
- set_round(xfm_dce,
- DCP_OUT_TRUNC_ROUND_MODE_ROUND,
- DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
-
- set_dither(xfm_dce, false, spatial_dither_mode,
- DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
- rgb_random_enable, highpass_random_enable);
- break;
- case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
- /* Truncate: Enable truncate (10bit), disable Dither */
- set_round(xfm_dce,
- DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
- DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
-
- set_dither(xfm_dce, false, spatial_dither_mode,
- DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
- rgb_random_enable, highpass_random_enable);
- break;
-
- case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
- /* Truncate: Set round/truncate to bypass (12bit),
- * disable Dither */
- set_round(xfm_dce,
- DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
- DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
-
- set_dither(xfm_dce, false, spatial_dither_mode,
- DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
- rgb_random_enable, highpass_random_enable);
- break;
- default:
- /* Invalid DCP Depth reduction mode */
- BREAK_TO_DEBUGGER();
- break;
- }
+ set_round(xfm_dce, trunc_mode, trunc_round_depth);
+ set_dither(xfm_dce,
+ spatial_dither_enable,
+ DCP_SPATIAL_DITHER_MODE_A_AA_A,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP,
+ bit_depth_params->flags.FRAME_RANDOM,
+ bit_depth_params->flags.RGB_RANDOM,
+ bit_depth_params->flags.HIGHPASS_RANDOM);
}
static int dce_transform_get_max_num_of_supported_lines(
@@ -725,8 +695,7 @@ static int dce_transform_get_max_num_of_supported_lines(
break;
default:
- dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
- "%s: Invalid LB pixel depth",
+ DC_LOG_WARNING("%s: Invalid LB pixel depth",
__func__);
BREAK_TO_DEBUGGER();
break;
@@ -823,8 +792,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- dm_logger_write(xfm->ctx->logger, LOG_WARNING,
- "%s: Capability not supported",
+ DC_LOG_WARNING("%s: Capability not supported",
__func__);
}
}
@@ -879,6 +847,7 @@ static void dce_transform_set_gamut_remap(
const struct xfm_grph_csc_adjustment *adjust)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int i = 0;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
/* Bypass if type is bypass or hw */
@@ -887,20 +856,8 @@ static void dce_transform_set_gamut_remap(
struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
- arr_matrix[0] = adjust->temperature_matrix[0];
- arr_matrix[1] = adjust->temperature_matrix[1];
- arr_matrix[2] = adjust->temperature_matrix[2];
- arr_matrix[3] = dal_fixed31_32_zero;
-
- arr_matrix[4] = adjust->temperature_matrix[3];
- arr_matrix[5] = adjust->temperature_matrix[4];
- arr_matrix[6] = adjust->temperature_matrix[5];
- arr_matrix[7] = dal_fixed31_32_zero;
-
- arr_matrix[8] = adjust->temperature_matrix[6];
- arr_matrix[9] = adjust->temperature_matrix[7];
- arr_matrix[10] = adjust->temperature_matrix[8];
- arr_matrix[11] = dal_fixed31_32_zero;
+ for (i = 0; i < GAMUT_MATRIX_SIZE; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(
arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
@@ -1126,7 +1083,7 @@ void dce110_opp_set_csc_adjustment(
CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
program_color_matrix(
- xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+ xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW);
/* We did everything ,now program DxOUTPUT_CSC_CONTROL */
configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
@@ -1177,207 +1134,159 @@ void dce110_opp_set_csc_default(
default_adjust->out_color_space);
}
-static void program_pwl(
- struct dce_transform *xfm_dce,
- const struct pwl_params *params)
+static void program_pwl(struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
{
- uint32_t value;
int retval;
+ uint8_t max_tries = 10;
+ uint8_t counter = 0;
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb = params->rgb_resulted;
- {
- uint8_t max_tries = 10;
- uint8_t counter = 0;
+ /* Power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 1);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 1);
- /* Power on LUT memory */
- if (REG(DCFE_MEM_PWR_CTRL))
- REG_UPDATE(DCFE_MEM_PWR_CTRL,
- DCP_REGAMMA_MEM_PWR_DIS, 1);
- else
- REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
- REGAMMA_LUT_LIGHT_SLEEP_DIS, 1);
-
- while (counter < max_tries) {
- if (REG(DCFE_MEM_PWR_STATUS)) {
- value = REG_READ(DCFE_MEM_PWR_STATUS);
- REG_GET(DCFE_MEM_PWR_STATUS,
- DCP_REGAMMA_MEM_PWR_STATE,
- &retval);
-
- if (retval == 0)
- break;
- ++counter;
- } else {
- value = REG_READ(DCFE_MEM_LIGHT_SLEEP_CNTL);
- REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL,
- REGAMMA_LUT_MEM_PWR_STATE,
- &retval);
-
- if (retval == 0)
- break;
- ++counter;
- }
+ while (counter < max_tries) {
+ if (REG(DCFE_MEM_PWR_STATUS)) {
+ REG_GET(DCFE_MEM_PWR_STATUS,
+ DCP_REGAMMA_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ } else {
+ REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
}
+ }
- if (counter == max_tries) {
- dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
- "%s: regamma lut was not powered on "
+ if (counter == max_tries) {
+ DC_LOG_WARNING("%s: regamma lut was not powered on "
"in a timely manner,"
" programming still proceeds\n",
__func__);
- }
}
REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK,
- REGAMMA_LUT_WRITE_EN_MASK, 7);
+ REGAMMA_LUT_WRITE_EN_MASK, 7);
REG_WRITE(REGAMMA_LUT_INDEX, 0);
/* Program REGAMMA_LUT_DATA */
- {
- uint32_t i = 0;
- const struct pwl_result_data *rgb = params->rgb_resulted;
+ while (i != params->hw_points_num) {
- while (i != params->hw_points_num) {
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg);
- REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg);
-
- ++rgb;
- ++i;
- }
+ ++rgb;
+ ++i;
}
/* we are done with DCP LUT memory; re-enable low power mode */
if (REG(DCFE_MEM_PWR_CTRL))
REG_UPDATE(DCFE_MEM_PWR_CTRL,
- DCP_REGAMMA_MEM_PWR_DIS, 0);
+ DCP_REGAMMA_MEM_PWR_DIS, 0);
else
REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
- REGAMMA_LUT_LIGHT_SLEEP_DIS, 0);
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 0);
}
-static void regamma_config_regions_and_segments(
- struct dce_transform *xfm_dce,
- const struct pwl_params *params)
+static void regamma_config_regions_and_segments(struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
{
const struct gamma_curve *curve;
- {
- REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0,
- REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x,
- REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0);
- }
- {
- REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0,
- REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope);
+ REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x,
+ REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0);
- }
- {
- REG_SET(REGAMMA_CNTLA_END_CNTL1, 0,
- REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x);
- }
- {
- REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0,
- REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y,
- REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[2].custom_float_slope);
- }
+ REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope);
- curve = params->arr_curve_points;
+ REG_SET(REGAMMA_CNTLA_END_CNTL1, 0,
+ REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x);
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- }
-
- curve += 2;
+ REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0,
+ REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y,
+ REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[1].custom_float_slope);
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
+ curve = params->arr_curve_points;
+ REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
-
+ REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
-
+ REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
-
+ REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
-
+ REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- }
+ REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
curve += 2;
- {
- REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0,
- REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- }
+ REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
}
-void dce110_opp_program_regamma_pwl(
- struct transform *xfm,
- const struct pwl_params *params)
+void dce110_opp_program_regamma_pwl(struct transform *xfm,
+ const struct pwl_params *params)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
@@ -1388,47 +1297,42 @@ void dce110_opp_program_regamma_pwl(
program_pwl(xfm_dce, params);
}
-void dce110_opp_power_on_regamma_lut(
- struct transform *xfm,
- bool power_on)
+void dce110_opp_power_on_regamma_lut(struct transform *xfm,
+ bool power_on)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
if (REG(DCFE_MEM_PWR_CTRL))
REG_UPDATE_2(DCFE_MEM_PWR_CTRL,
- DCP_REGAMMA_MEM_PWR_DIS, power_on,
- DCP_LUT_MEM_PWR_DIS, power_on);
+ DCP_REGAMMA_MEM_PWR_DIS, power_on,
+ DCP_LUT_MEM_PWR_DIS, power_on);
else
REG_UPDATE_2(DCFE_MEM_LIGHT_SLEEP_CNTL,
- REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on,
- DCP_LUT_LIGHT_SLEEP_DIS, power_on);
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on,
+ DCP_LUT_LIGHT_SLEEP_DIS, power_on);
}
void dce110_opp_set_regamma_mode(struct transform *xfm,
- enum opp_regamma mode)
+ enum opp_regamma mode)
{
struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
REG_SET(REGAMMA_CONTROL, 0,
- GRPH_REGAMMA_MODE, mode);
+ GRPH_REGAMMA_MODE, mode);
}
static const struct transform_funcs dce_transform_funcs = {
.transform_reset = dce_transform_reset,
- .transform_set_scaler =
- dce_transform_set_scaler,
- .transform_set_gamut_remap =
- dce_transform_set_gamut_remap,
+ .transform_set_scaler = dce_transform_set_scaler,
+ .transform_set_gamut_remap = dce_transform_set_gamut_remap,
.opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
.opp_set_csc_default = dce110_opp_set_csc_default,
.opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
.opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
.opp_set_regamma_mode = dce110_opp_set_regamma_mode,
- .transform_set_pixel_storage_depth =
- dce_transform_set_pixel_storage_depth,
- .transform_get_optimal_number_of_taps =
- dce_transform_get_optimal_number_of_taps
+ .transform_set_pixel_storage_depth = dce_transform_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index bfc94b4..948281d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -248,6 +248,7 @@
XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index e7a6948..41f83ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -69,7 +69,7 @@ static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
******************************************************************************/
/***************************PIPE_CONTROL***********************************/
-static bool dce100_enable_display_power_gating(
+bool dce100_enable_display_power_gating(
struct dc *dc,
uint8_t controller_id,
struct dc_bios *dcb,
@@ -148,5 +148,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.set_bandwidth = dce100_set_bandwidth;
+ dc->hwss.pplib_apply_display_requirements =
+ dce100_pplib_apply_display_requirements;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index cb5384e..c6ec0ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -38,5 +38,9 @@ void dce100_set_bandwidth(
struct dc_state *context,
bool decrease_allowed);
+bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+
#endif /* __DC_HWSS_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3ea43e2..3092f76 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -51,6 +51,9 @@
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_abm.h"
+
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
@@ -320,7 +323,29 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
@@ -622,6 +647,12 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.display_clock != NULL)
dce_disp_clk_destroy(&pool->base.display_clock);
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
if (pool->base.irqs != NULL)
dal_irq_service_destroy(&pool->base.irqs);
}
@@ -829,6 +860,25 @@ static bool construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
/* get static clock information for PPLIB or firmware, save
* max_clock_state
@@ -849,9 +899,11 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 6923662..775d3bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -34,6 +34,8 @@
#include "dce110_compressor.h"
+#define DC_LOGGER \
+ cp110->base.ctx->logger
#define DCP_REG(reg)\
(reg + cp110->offsets.dcp_offset)
#define DMIF_REG(reg)\
@@ -120,14 +122,10 @@ static void wait_for_fbc_state_changed(
}
if (counter == 10) {
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
- "%s: wait counter exceeded, changes to HW not applied",
+ DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
__func__);
} else {
- dm_logger_write(
- cp110->base.ctx->logger, LOG_SYNC,
- "FBC status changed to %d", enabled);
+ DC_LOG_SYNC("FBC status changed to %d", enabled);
}
@@ -310,9 +308,7 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
fbc_pitch = fbc_pitch / 8;
else
- dm_logger_write(
- compressor->ctx->logger, LOG_WARNING,
- "%s: Unexpected DCE11 compression ratio",
+ DC_LOG_WARNING("%s: Unexpected DCE11 compression ratio",
__func__);
/* Clear content first. */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index d844fad..30dd62f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -57,6 +57,8 @@
#include "dce/dce_11_0_sh_mask.h"
#include "custom_float.h"
+#include "atomfirmware.h"
+
/*
* All values are in milliseconds;
* For eDP, after power-up/power/down,
@@ -68,6 +70,8 @@
#define CTX \
hws->ctx
+#define DC_LOGGER \
+ ctx->logger
#define REG(reg)\
hws->regs->reg
@@ -257,9 +261,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
}
}
-static bool dce110_set_input_transfer_func(
- struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
+static bool
+dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
const struct dc_transfer_func *tf = NULL;
@@ -275,30 +279,24 @@ static bool dce110_set_input_transfer_func(
build_prescale_params(&prescale_params, plane_state);
ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
- if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
if (tf == NULL) {
/* Default case if no input transfer function specified */
- ipp->funcs->ipp_set_degamma(ipp,
- IPP_DEGAMMA_MODE_HW_sRGB);
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_sRGB);
} else if (tf->type == TF_TYPE_PREDEFINED) {
switch (tf->tf) {
case TRANSFER_FUNCTION_SRGB:
- ipp->funcs->ipp_set_degamma(ipp,
- IPP_DEGAMMA_MODE_HW_sRGB);
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_sRGB);
break;
case TRANSFER_FUNCTION_BT709:
- ipp->funcs->ipp_set_degamma(ipp,
- IPP_DEGAMMA_MODE_HW_xvYCC);
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_HW_xvYCC);
break;
case TRANSFER_FUNCTION_LINEAR:
- ipp->funcs->ipp_set_degamma(ipp,
- IPP_DEGAMMA_MODE_BYPASS);
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
- result = false;
- break;
default:
result = false;
break;
@@ -313,10 +311,9 @@ static bool dce110_set_input_transfer_func(
return result;
}
-static bool convert_to_custom_float(
- struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
- uint32_t hw_points_num)
+static bool convert_to_custom_float(struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
{
struct custom_float_format fmt;
@@ -328,26 +325,20 @@ static bool convert_to_custom_float(
fmt.mantissa_bits = 12;
fmt.sign = true;
- if (!convert_to_custom_float_format(
- arr_points[0].x,
- &fmt,
- &arr_points[0].custom_float_x)) {
+ if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
+ &arr_points[0].custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- arr_points[0].offset,
- &fmt,
- &arr_points[0].custom_float_offset)) {
+ if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
+ &arr_points[0].custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- arr_points[0].slope,
- &fmt,
- &arr_points[0].custom_float_slope)) {
+ if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
+ &arr_points[0].custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -355,26 +346,20 @@ static bool convert_to_custom_float(
fmt.mantissa_bits = 10;
fmt.sign = false;
- if (!convert_to_custom_float_format(
- arr_points[1].x,
- &fmt,
- &arr_points[1].custom_float_x)) {
+ if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
+ &arr_points[1].custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- arr_points[1].y,
- &fmt,
- &arr_points[1].custom_float_y)) {
+ if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
+ &arr_points[1].custom_float_y)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- arr_points[2].slope,
- &fmt,
- &arr_points[2].custom_float_slope)) {
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -383,50 +368,38 @@ static bool convert_to_custom_float(
fmt.sign = true;
while (i != hw_points_num) {
- if (!convert_to_custom_float_format(
- rgb->red,
- &fmt,
- &rgb->red_reg)) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- rgb->green,
- &fmt,
- &rgb->green_reg)) {
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- rgb->blue,
- &fmt,
- &rgb->blue_reg)) {
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- rgb->delta_red,
- &fmt,
- &rgb->delta_red_reg)) {
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- rgb->delta_green,
- &fmt,
- &rgb->delta_green_reg)) {
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(
- rgb->delta_blue,
- &fmt,
- &rgb->delta_blue_reg)) {
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -438,8 +411,13 @@ static bool convert_to_custom_float(
return true;
}
-static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
- *output_tf, struct pwl_params *regamma_params)
+#define MAX_LOW_POINT 25
+#define NUMBER_REGIONS 16
+#define NUMBER_SW_SEGMENTS 16
+
+static bool
+dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
+ struct pwl_params *regamma_params)
{
struct curve_points *arr_points;
struct pwl_result_data *rgb_resulted;
@@ -451,11 +429,10 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
struct fixed31_32 y1_min;
struct fixed31_32 y3_max;
- int32_t segment_start, segment_end;
- uint32_t i, j, k, seg_distr[16], increment, start_index, hw_points;
+ int32_t region_start, region_end;
+ uint32_t i, j, k, seg_distr[NUMBER_REGIONS], increment, start_index, hw_points;
- if (output_tf == NULL || regamma_params == NULL ||
- output_tf->type == TF_TYPE_BYPASS)
+ if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
arr_points = regamma_params->arr_points;
@@ -468,34 +445,20 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
/* 16 segments
* segments are from 2^-11 to 2^5
*/
- segment_start = -11;
- segment_end = 5;
-
- seg_distr[0] = 2;
- seg_distr[1] = 2;
- seg_distr[2] = 2;
- seg_distr[3] = 2;
- seg_distr[4] = 2;
- seg_distr[5] = 2;
- seg_distr[6] = 3;
- seg_distr[7] = 4;
- seg_distr[8] = 4;
- seg_distr[9] = 4;
- seg_distr[10] = 4;
- seg_distr[11] = 5;
- seg_distr[12] = 5;
- seg_distr[13] = 5;
- seg_distr[14] = 5;
- seg_distr[15] = 5;
+ region_start = -11;
+ region_end = region_start + NUMBER_REGIONS;
+
+ for (i = 0; i < NUMBER_REGIONS; i++)
+ seg_distr[i] = 4;
} else {
/* 10 segments
* segment is from 2^-10 to 2^0
*/
- segment_start = -10;
- segment_end = 0;
+ region_start = -10;
+ region_end = 0;
- seg_distr[0] = 3;
+ seg_distr[0] = 4;
seg_distr[1] = 4;
seg_distr[2] = 4;
seg_distr[3] = 4;
@@ -503,8 +466,8 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
seg_distr[5] = 4;
seg_distr[6] = 4;
seg_distr[7] = 4;
- seg_distr[8] = 5;
- seg_distr[9] = 5;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
seg_distr[10] = -1;
seg_distr[11] = -1;
seg_distr[12] = -1;
@@ -519,10 +482,12 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
}
j = 0;
- for (k = 0; k < (segment_end - segment_start); k++) {
- increment = 32 / (1 << seg_distr[k]);
- start_index = (segment_start + k + 25) * 32;
- for (i = start_index; i < start_index + 32; i += increment) {
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
if (j == hw_points - 1)
break;
rgb_resulted[j].red = output_tf->tf_pts.red[i];
@@ -533,20 +498,15 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
}
/* last point */
- start_index = (segment_end + 25) * 32;
- rgb_resulted[hw_points - 1].red =
- output_tf->tf_pts.red[start_index];
- rgb_resulted[hw_points - 1].green =
- output_tf->tf_pts.green[start_index];
- rgb_resulted[hw_points - 1].blue =
- output_tf->tf_pts.blue[start_index];
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_start));
+ dal_fixed31_32_from_int(region_start));
arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
- arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
+ dal_fixed31_32_from_int(region_end));
y_r = rgb_resulted[0].red;
y_g = rgb_resulted[0].green;
@@ -555,9 +515,8 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
arr_points[0].y = y1_min;
- arr_points[0].slope = dal_fixed31_32_div(
- arr_points[0].y,
- arr_points[0].x);
+ arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y,
+ arr_points[0].x);
y_r = rgb_resulted[hw_points - 1].red;
y_g = rgb_resulted[hw_points - 1].green;
@@ -569,24 +528,18 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
arr_points[1].y = y3_max;
- arr_points[2].y = y3_max;
arr_points[1].slope = dal_fixed31_32_zero;
- arr_points[2].slope = dal_fixed31_32_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
* and the slope to be such that we hit 1.0 at 10000 nits.
*/
- const struct fixed31_32 end_value =
- dal_fixed31_32_from_int(125);
+ const struct fixed31_32 end_value = dal_fixed31_32_from_int(125);
arr_points[1].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
- arr_points[2].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
}
regamma_params->hw_points_num = hw_points;
@@ -594,18 +547,15 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
i = 1;
for (k = 0; k < 16 && i < 16; k++) {
if (seg_distr[k] != -1) {
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
+ regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
regamma_params->arr_curve_points[i].offset =
- regamma_params->arr_curve_points[k].
- offset + (1 << seg_distr[k]);
+ regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
i++;
}
if (seg_distr[k] != -1)
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
+ regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
@@ -620,15 +570,9 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
rgb_plus_1->blue = rgb->blue;
- rgb->delta_red = dal_fixed31_32_sub(
- rgb_plus_1->red,
- rgb->red);
- rgb->delta_green = dal_fixed31_32_sub(
- rgb_plus_1->green,
- rgb->green);
- rgb->delta_blue = dal_fixed31_32_sub(
- rgb_plus_1->blue,
- rgb->blue);
+ rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
++rgb_plus_1;
++rgb;
@@ -640,9 +584,9 @@ static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
return true;
}
-static bool dce110_set_output_transfer_func(
- struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream)
+static bool
+dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct transform *xfm = pipe_ctx->plane_res.xfm;
@@ -650,13 +594,11 @@ static bool dce110_set_output_transfer_func(
xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
if (stream->out_transfer_func &&
- stream->out_transfer_func->type ==
- TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf ==
- TRANSFER_FUNCTION_SRGB) {
+ stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) {
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
- } else if (dce110_translate_regamma_to_hw_format(
- stream->out_transfer_func, &xfm->regamma_params)) {
+ } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func,
+ &xfm->regamma_params)) {
xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
} else {
@@ -679,7 +621,7 @@ static enum dc_status bios_parser_crtc_source_select(
const struct dc_sink *sink = pipe_ctx->stream->sink;
crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
- crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1;
+ crtc_source_select.controller_id = pipe_ctx->stream_res.tg->inst + 1;
/*TODO: Need to un-hardcode color depth, dp_audio and account for
* the case where signal and sink signal is different (translator
* encoder)*/
@@ -742,15 +684,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct dc_link *link = pipe_ctx->stream->sink->link;
- /* 1. update AVI info frame (HDMI, DP)
- * we always need to update info frame
- */
+
uint32_t active_total_with_borders;
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- /* TODOFPGA may change to hwss.update_info_frame */
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+ /* update AVI info frame (HDMI, DP)*/
+ /* TODO: FPGA may change to hwss.update_info_frame */
dce110_update_info_frame(pipe_ctx);
+
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
@@ -771,12 +720,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
}
- /* For MST, there are multiply stream go to only one link.
- * connect DIG back_end to front_end while enable_stream and
- * disconnect them during disable_stream
- * BY this, it is logic clean to separate stream and link */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+
+
}
@@ -792,10 +737,14 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
static bool is_panel_powered_on(struct dce_hwseq *hws)
{
- uint32_t value;
+ uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
- REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
- return value == 1;
+
+ REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+ REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+ return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
static enum bp_result link_transmitter_control(
@@ -814,11 +763,11 @@ static enum bp_result link_transmitter_control(
* eDP only.
*/
void hwss_edp_wait_for_hpd_ready(
- struct link_encoder *enc,
- bool power_up)
+ struct dc_link *link,
+ bool power_up)
{
- struct dc_context *ctx = enc->ctx;
- struct graphics_object_id connector = enc->connector;
+ struct dc_context *ctx = link->ctx;
+ struct graphics_object_id connector = link->link_enc->connector;
struct gpio *hpd;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
@@ -876,22 +825,22 @@ void hwss_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
if (false == edp_hpd_high) {
- dm_logger_write(ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s: wait timed out!\n", __func__);
}
}
void hwss_edp_power_control(
- struct link_encoder *enc,
- bool power_up)
+ struct dc_link *link,
+ bool power_up)
{
- struct dc_context *ctx = enc->ctx;
+ struct dc_context *ctx = link->ctx;
struct dce_hwseq *hwseq = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
enum bp_result bp_result;
- if (dal_graphics_object_id_get_connector_id(enc->connector)
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
return;
@@ -900,32 +849,30 @@ void hwss_edp_power_control(
if (power_up != is_panel_powered_on(hwseq)) {
/* Send VBIOS command to prompt eDP panel power */
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ DC_LOG_HW_RESUME_S3(
"%s: Panel Power action: %s\n",
__func__, (power_up ? "On":"Off"));
cntl.action = power_up ?
TRANSMITTER_CONTROL_POWER_ON :
TRANSMITTER_CONTROL_POWER_OFF;
- cntl.transmitter = enc->transmitter;
- cntl.connector_obj_id = enc->connector;
+ cntl.transmitter = link->link_enc->transmitter;
+ cntl.connector_obj_id = link->link_enc->connector;
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
- cntl.hpd_sel = enc->hpd_source;
+ cntl.hpd_sel = link->link_enc->hpd_source;
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
if (bp_result != BP_RESULT_OK)
- dm_logger_write(ctx->logger, LOG_ERROR,
+ DC_LOG_ERROR(
"%s: Panel Power bp_result: %d\n",
__func__, bp_result);
} else {
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ DC_LOG_HW_RESUME_S3(
"%s: Skipping Panel Power action: %s\n",
__func__, (power_up ? "On":"Off"));
}
-
- hwss_edp_wait_for_hpd_ready(enc, true);
}
/*todo: cloned in stream enc, fix*/
@@ -934,21 +881,21 @@ void hwss_edp_power_control(
* eDP only. Control the backlight of the eDP panel
*/
void hwss_edp_backlight_control(
- struct dc_link *link,
- bool enable)
+ struct dc_link *link,
+ bool enable)
{
- struct dce_hwseq *hws = link->dc->hwseq;
- struct dc_context *ctx = link->dc->ctx;
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
- if (dal_graphics_object_id_get_connector_id(link->link_id)
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
return;
}
if (enable && is_panel_backlight_on(hws)) {
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
return;
@@ -956,7 +903,7 @@ void hwss_edp_backlight_control(
/* Send VBIOS command to control eDP panel backlight */
- dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ DC_LOG_HW_RESUME_S3(
"%s: backlight action: %s\n",
__func__, (enable ? "On":"Off"));
@@ -970,6 +917,7 @@ void hwss_edp_backlight_control(
/*todo: unhardcode*/
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
+ cntl.signal = SIGNAL_TYPE_EDP;
/* For eDP, the following delays might need to be considered
* after link training completed:
@@ -982,7 +930,13 @@ void hwss_edp_backlight_control(
* Enable it in the future if necessary.
*/
/* dc_service_sleep_in_milliseconds(50); */
- link_transmitter_control(link->dc->ctx->dc_bios, &cntl);
+ /*edp 1.2*/
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
+ edp_receiver_ready_T7(link);
+ link_transmitter_control(ctx->dc_bios, &cntl);
+ /*edp 1.2*/
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
+ edp_receiver_ready_T9(link);
}
void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
@@ -1002,7 +956,11 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+ if (option != KEEP_ACQUIRED_RESOURCE ||
+ !dc->debug.az_endpoint_mute_only) {
+ /*only disalbe az_endpoint if power down or free*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+ }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -1025,12 +983,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
*/
}
- /* blank at encoder level */
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, false);
- pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
- }
+
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
pipe_ctx->stream_res.stream_enc->id,
@@ -1042,15 +995,32 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
- struct dc_link *link = pipe_ctx->stream->sink->link;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
/* only 3 items below are used by unblank */
params.pixel_clk_khz =
pipe_ctx->stream->timing.pix_clk_khz;
params.link_settings.link_rate = link_settings->link_rate;
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
- if (link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, true);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, true);
+ stream->bl_pwm_level = 0;
+ }
+}
+void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
+ link->dc->hwss.edp_backlight_control(link, false);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
}
@@ -1152,7 +1122,7 @@ static void build_audio_output(
audio_output->pll_info.dto_source =
translate_to_dto_source(
- pipe_ctx->pipe_idx + 1);
+ pipe_ctx->stream_res.tg->inst + 1);
/* TODO hard code to enable for now. Need get from stream */
audio_output->pll_info.ss_enabled = true;
@@ -1164,7 +1134,7 @@ static void build_audio_output(
static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
- uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
switch (pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB8888:
@@ -1361,10 +1331,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
resource_build_info_frame(pipe_ctx);
dce110_update_info_frame(pipe_ctx);
- if (!pipe_ctx_old->stream) {
- if (!pipe_ctx->stream->dpms_off)
- core_link_enable_stream(context, pipe_ctx);
- }
+ if (!pipe_ctx_old->stream)
+ core_link_enable_stream(context, pipe_ctx);
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
@@ -1401,7 +1369,7 @@ static void power_down_encoders(struct dc *dc)
}
dc->links[i]->link_enc->funcs->disable_output(
- dc->links[i]->link_enc, signal, dc->links[i]);
+ dc->links[i]->link_enc, signal);
}
}
@@ -1462,10 +1430,37 @@ static void disable_vga_and_power_gate_all_controllers(
enable_display_pipe_clock_gating(ctx,
true);
- dc->hwss.power_down_front_end(dc, i);
+ dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
}
}
+static struct dc_link *get_link_for_edp_not_in_use(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dc_link *link = NULL;
+
+ /* check if eDP panel is suppose to be set mode, if yes, no need to disable */
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
+ return NULL;
+ }
+
+ /* check if there is an eDP panel not in use */
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->local_sink &&
+ dc->links[i]->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link = dc->links[i];
+ break;
+ }
+ }
+
+ return link;
+}
+
/**
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
@@ -1473,11 +1468,37 @@ static void disable_vga_and_power_gate_all_controllers(
* 3. Enable power gating for controller
* 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
*/
-void dce110_enable_accelerated_mode(struct dc *dc)
+void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
{
- power_down_all_hw_blocks(dc);
+ struct dc_bios *dcb = dc->ctx->dc_bios;
- disable_vga_and_power_gate_all_controllers(dc);
+ /* vbios already light up eDP, so we can leverage vbios and skip eDP
+ * programming
+ */
+ bool can_eDP_fast_boot_optimize =
+ (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
+
+ /* if OS doesn't light up eDP and eDP link is available, we want to disable */
+ struct dc_link *edp_link_to_turnoff = NULL;
+
+ if (can_eDP_fast_boot_optimize) {
+ edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+
+ if (!edp_link_to_turnoff)
+ dc->apply_edp_fast_boot_optimization = true;
+ }
+
+ if (!dc->apply_edp_fast_boot_optimization) {
+ if (edp_link_to_turnoff) {
+ /*turn off backlight before DP_blank and encoder powered down*/
+ dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
+ }
+ /*resume from S3, no vbios posting, no need to power down again*/
+ power_down_all_hw_blocks(dc);
+ disable_vga_and_power_gate_all_controllers(dc);
+ if (edp_link_to_turnoff)
+ dc->hwss.edp_power_control(edp_link_to_turnoff, false);
+ }
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
}
@@ -1498,7 +1519,7 @@ static uint32_t compute_pstate_blackout_duration(
return total_dest_line_time_ns;
}
-void dce110_set_displaymarks(
+static void dce110_set_displaymarks(
const struct dc *dc,
struct dc_state *context)
{
@@ -1612,6 +1633,8 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
value |= 0x80;
if (events->cursor_update)
value |= 0x2;
+ if (events->force_trigger)
+ value |= 0x1;
#if defined(CONFIG_DRM_AMD_DC_FBC)
value |= 0x84;
@@ -1748,60 +1771,69 @@ static void apply_min_clocks(
/*
* Check if FBC can be enabled
*/
-static enum dc_status validate_fbc(struct dc *dc,
- struct dc_state *context)
+static bool should_enable_fbc(struct dc *dc,
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ uint32_t i;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &context->res_ctx;
+
ASSERT(dc->fbc_compressor);
/* FBC memory should be allocated */
if (!dc->ctx->fbc_gpu_addr)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only supports single display */
if (context->stream_count != 1)
- return DC_ERROR_UNEXPECTED;
+ return false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ *pipe_idx = i;
+ break;
+ }
+ }
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* PSR should not be enabled */
if (pipe_ctx->stream->sink->link->psr_enabled)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Nothing to compress */
if (!pipe_ctx->plane_state)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- return DC_ERROR_UNEXPECTED;
+ return false;
- return DC_OK;
+ return true;
}
/*
* Enable FBC
*/
-static enum dc_status enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(struct dc *dc,
+ struct dc_state *context)
{
- enum dc_status status = validate_fbc(dc, context);
+ uint32_t pipe_idx = 0;
- if (status == DC_OK) {
+ if (should_enable_fbc(dc, context, &pipe_idx)) {
/* Program GRPH COMPRESSED ADDRESS and PITCH */
struct compr_addr_and_pitch_params params = {0, 0, 0};
struct compressor *compr = dc->fbc_compressor;
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
- params.source_view_width =
- pipe_ctx->stream->timing.h_addressable;
- params.source_view_height =
- pipe_ctx->stream->timing.v_addressable;
+ params.source_view_width = pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height = pipe_ctx->stream->timing.v_addressable;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
@@ -1810,40 +1842,9 @@ static enum dc_status enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
- return status;
}
#endif
-static enum dc_status apply_ctx_to_hw_fpga(
- struct dc *dc,
- struct dc_state *context)
-{
- enum dc_status status = DC_ERROR_UNEXPECTED;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL)
- continue;
-
- if (pipe_ctx->stream == pipe_ctx_old->stream)
- continue;
-
- status = apply_single_controller_ctx_to_hw(
- pipe_ctx,
- context,
- dc);
-
- if (status != DC_OK)
- return status;
- }
-
- return DC_OK;
-}
-
static void dce110_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
@@ -1888,7 +1889,7 @@ static void dce110_reset_hw_ctx_wrap(
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
- dc->hwss.power_down_front_end(dc, pipe_ctx_old->pipe_idx);
+ dc->hwss.disable_plane(dc, pipe_ctx_old);
pipe_ctx_old->stream = NULL;
}
@@ -1913,11 +1914,6 @@ enum dc_status dce110_apply_ctx_to_hw(
if (context->stream_count <= 0)
return DC_OK;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- apply_ctx_to_hw_fpga(dc, context);
- return DC_OK;
- }
-
/* Apply new context */
dcb->funcs->set_scratch_critical_state(dcb, true);
@@ -2076,8 +2072,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->stream == pipe_ctx_old->stream)
continue;
- if (pipe_ctx->stream && pipe_ctx_old->stream
- && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
continue;
if (pipe_ctx->top_pipe)
@@ -2113,16 +2108,10 @@ enum dc_status dce110_apply_ctx_to_hw(
context,
dc);
- if (dc->hwss.power_on_front_end)
- dc->hwss.power_on_front_end(dc, pipe_ctx, context);
-
if (DC_OK != status)
return status;
}
- /* pplib is notified if disp_num changed */
- dc->hwss.set_bandwidth(dc, context, true);
-
/* to save power */
apply_min_clocks(dc, context, &clocks_state, false);
@@ -2145,16 +2134,8 @@ static void set_default_colors(struct pipe_ctx *pipe_ctx)
struct default_adjustment default_adjust = { 0 };
default_adjust.force_hw_default = false;
- if (pipe_ctx->plane_state == NULL)
- default_adjust.in_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.in_color_space =
- pipe_ctx->plane_state->color_space;
- if (pipe_ctx->stream == NULL)
- default_adjust.out_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.out_color_space =
- pipe_ctx->stream->output_color_space;
+ default_adjust.in_color_space = pipe_ctx->plane_state->color_space;
+ default_adjust.out_color_space = pipe_ctx->stream->output_color_space;
default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
@@ -2212,13 +2193,14 @@ static void program_surface_visibility(const struct dc *dc,
} else if (!pipe_ctx->plane_state->visible)
blank_target = true;
- dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode);
+ dce_set_blender_mode(dc->hwseq, pipe_ctx->stream_res.tg->inst, blender_mode);
pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
}
static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
{
+ int i = 0;
struct xfm_grph_csc_adjustment adjust;
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -2226,33 +2208,10 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
- adjust.temperature_matrix[0] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[0];
- adjust.temperature_matrix[1] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[1];
- adjust.temperature_matrix[2] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[2];
- adjust.temperature_matrix[3] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[4];
- adjust.temperature_matrix[4] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[5];
- adjust.temperature_matrix[5] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[6];
- adjust.temperature_matrix[6] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[8];
- adjust.temperature_matrix[7] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[9];
- adjust.temperature_matrix[8] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[10];
+
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2276,11 +2235,10 @@ static void set_plane_config(
memset(&tbl_entry, 0, sizeof(tbl_entry));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
- dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+ dce_enable_fe_clock(dc->hwseq, mi->inst, true);
set_default_colors(pipe_ctx);
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
- == true) {
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
tbl_entry.color_space =
pipe_ctx->stream->output_color_space;
@@ -2294,33 +2252,10 @@ static void set_plane_config(
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
- adjust.temperature_matrix[0] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[0];
- adjust.temperature_matrix[1] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[1];
- adjust.temperature_matrix[2] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[2];
- adjust.temperature_matrix[3] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[4];
- adjust.temperature_matrix[4] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[5];
- adjust.temperature_matrix[5] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[6];
- adjust.temperature_matrix[6] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[8];
- adjust.temperature_matrix[7] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[9];
- adjust.temperature_matrix[8] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[10];
+
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2365,7 +2300,7 @@ static void update_plane_addr(const struct dc *dc,
plane_state->status.requested_address = plane_state->address;
}
-void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
+static void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -2458,20 +2393,16 @@ static void dce110_enable_timing_synchronization(
for (i = 1 /* skip the master */; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
- grouped_pipes[i]->stream_res.tg, gsl_params.gsl_group);
-
-
+ grouped_pipes[i]->stream_res.tg,
+ gsl_params.gsl_group);
for (i = 1 /* skip the master */; i < group_size; i++) {
DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
- /* Regardless of success of the wait above, remove the reset or
- * the driver will start timing out on Display requests. */
- DC_SYNC_INFO("GSL: disabling trigger-reset.\n");
- grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(grouped_pipes[i]->stream_res.tg);
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg);
}
-
/* GSL Vblank synchronization is a one time sync mechanism, assumption
* is that the sync'ed displays will not drift out of sync over time*/
DC_SYNC_INFO("GSL: Restoring register states.\n");
@@ -2481,6 +2412,39 @@ static void dce110_enable_timing_synchronization(
DC_SYNC_INFO("GSL: Set-up complete.\n");
}
+static void dce110_enable_per_frame_crtc_position_reset(
+ struct dc *dc,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcp_gsl_params gsl_params = { 0 };
+ int i;
+
+ gsl_params.gsl_group = 0;
+ gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
+ grouped_pipes[i]->stream_res.tg, &gsl_params);
+
+ DC_SYNC_INFO("GSL: enabling trigger-reset\n");
+
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+ grouped_pipes[i]->stream_res.tg,
+ gsl_params.gsl_master,
+ &grouped_pipes[i]->stream->triggered_crtc_reset);
+
+ DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
+ for (i = 1; i < group_size; i++)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
+
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg);
+
+}
+
static void init_hw(struct dc *dc)
{
int i;
@@ -2513,6 +2477,10 @@ static void init_hw(struct dc *dc)
* required signal (which may be different from the
* default signal on connector). */
struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
+ dc->hwss.edp_power_control(link, true);
+
link->link_enc->funcs->hw_init(link->link_enc);
}
@@ -2567,9 +2535,13 @@ void dce110_fill_display_configs(
ASSERT(pipe_ctx != NULL);
+ /* only notify active stream */
+ if (stream->dpms_off)
+ continue;
+
num_cfgs++;
cfg->signal = pipe_ctx->stream->signal;
- cfg->pipe_idx = pipe_ctx->pipe_idx;
+ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
cfg->src_height = stream->src.height;
cfg->src_width = stream->src.width;
cfg->ddi_channel_mapping =
@@ -2722,10 +2694,8 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
- struct pipe_ctx *cur_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
unsigned int i;
-
+ struct dc_context *ctx = dc->ctx;
memset(&tbl_entry, 0, sizeof(tbl_entry));
if (dc->current_state)
@@ -2734,7 +2704,7 @@ static void dce110_program_front_end_for_pipe(
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
- dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+ dce_enable_fe_clock(dc->hwseq, mi->inst, true);
set_default_colors(pipe_ctx);
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
@@ -2752,33 +2722,10 @@ static void dce110_program_front_end_for_pipe(
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
- adjust.temperature_matrix[0] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[0];
- adjust.temperature_matrix[1] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[1];
- adjust.temperature_matrix[2] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[2];
- adjust.temperature_matrix[3] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[4];
- adjust.temperature_matrix[4] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[5];
- adjust.temperature_matrix[5] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[6];
- adjust.temperature_matrix[6] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[8];
- adjust.temperature_matrix[7] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[9];
- adjust.temperature_matrix[8] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[10];
+
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
@@ -2815,14 +2762,15 @@ static void dce110_program_front_end_for_pipe(
plane_state->rotation);
/* Moved programming gamma from dc to hwss */
- if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
- dc->hwss.set_input_transfer_func(
- pipe_ctx, pipe_ctx->plane_state);
- dc->hwss.set_output_transfer_func(
- pipe_ctx, pipe_ctx->stream);
- }
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
- dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ DC_LOG_SURFACE(
"Pipe:%d 0x%x: addr hi:0x%x, "
"addr low:0x%x, "
"src: %d, %d, %d,"
@@ -2845,7 +2793,7 @@ static void dce110_program_front_end_for_pipe(
pipe_ctx->plane_state->clip_rect.width,
pipe_ctx->plane_state->clip_rect.height);
- dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ DC_LOG_SURFACE(
"Pipe %d: width, height, x, y\n"
"viewport:%d, %d, %d, %d\n"
"recout: %d, %d, %d, %d\n",
@@ -2889,13 +2837,12 @@ static void dce110_apply_ctx_for_surface(
continue;
/* Need to allocate mem before program front end for Fiji */
- if (pipe_ctx->plane_res.mi != NULL)
- pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
- pipe_ctx->plane_res.mi,
- pipe_ctx->stream->timing.h_total,
- pipe_ctx->stream->timing.v_total,
- pipe_ctx->stream->timing.pix_clk_khz,
- context->stream_count);
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
@@ -2916,8 +2863,11 @@ static void dce110_apply_ctx_for_surface(
}
}
-static void dce110_power_down_fe(struct dc *dc, int fe_idx)
+static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ int fe_idx = pipe_ctx->plane_res.mi ?
+ pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx;
+
/* Do not power down fe when stream is active on dce*/
if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
return;
@@ -2959,6 +2909,52 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
}
}
+void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+ if (ipp->funcs->ipp_cursor_set_position)
+ ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+ if (mi->funcs->set_cursor_position)
+ mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+}
+
+void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ if (pipe_ctx->plane_res.ipp &&
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+ pipe_ctx->plane_res.ipp, attributes);
+
+ if (pipe_ctx->plane_res.mi &&
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.mi, attributes);
+
+ if (pipe_ctx->plane_res.xfm &&
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.xfm, attributes);
+}
+
static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
static void optimize_shared_resources(struct dc *dc) {}
@@ -2977,13 +2973,15 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.power_down = dce110_power_down,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dce110_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
+ .blank_stream = dce110_blank_stream,
.enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
.enable_display_power_gating = dce110_enable_display_power_gating,
- .power_down_front_end = dce110_power_down_fe,
+ .disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
.set_bandwidth = dce110_set_bandwidth,
.set_drr = set_drr,
@@ -2996,8 +2994,12 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
.ready_shared_resources = ready_shared_resources,
.optimize_shared_resources = optimize_shared_resources,
+ .pplib_apply_display_requirements = pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .set_cursor_position = dce110_set_cursor_position,
+ .set_cursor_attribute = dce110_set_cursor_attribute
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 4d72bb9..5d7e9f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -39,11 +39,7 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
-void dce110_set_display_clock(struct dc_state *context);
-void dce110_set_displaymarks(
- const struct dc *dc,
- struct dc_state *context);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
@@ -52,15 +48,14 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
+void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
-void dce110_enable_accelerated_mode(struct dc *dc);
+void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dce110_power_down(struct dc *dc);
-void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
-
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
@@ -70,12 +65,16 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
void hwss_edp_power_control(
- struct link_encoder *enc,
- bool power_up);
+ struct dc_link *link,
+ bool power_up);
void hwss_edp_backlight_control(
struct dc_link *link,
bool enable);
+void hwss_edp_wait_for_hpd_ready(
+ struct dc_link *link,
+ bool power_up);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index a06c602..7bab8c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -237,26 +237,14 @@ static void program_size_and_rotation(
if (rotation == ROTATION_ANGLE_90 ||
rotation == ROTATION_ANGLE_270) {
- uint32_t swap;
- swap = local_size.video.luma_size.x;
- local_size.video.luma_size.x =
- local_size.video.luma_size.y;
- local_size.video.luma_size.y = swap;
-
- swap = local_size.video.luma_size.width;
- local_size.video.luma_size.width =
- local_size.video.luma_size.height;
- local_size.video.luma_size.height = swap;
-
- swap = local_size.video.chroma_size.x;
- local_size.video.chroma_size.x =
- local_size.video.chroma_size.y;
- local_size.video.chroma_size.y = swap;
-
- swap = local_size.video.chroma_size.width;
- local_size.video.chroma_size.width =
- local_size.video.chroma_size.height;
- local_size.video.chroma_size.height = swap;
+ swap(local_size.video.luma_size.x,
+ local_size.video.luma_size.y);
+ swap(local_size.video.luma_size.width,
+ local_size.video.luma_size.height);
+ swap(local_size.video.chroma_size.x,
+ local_size.video.chroma_size.y);
+ swap(local_size.video.chroma_size.width,
+ local_size.video.chroma_size.height);
}
value = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index feb397b..4245e1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -727,7 +727,7 @@ void dce110_opp_v_set_csc_adjustment(
CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
program_color_matrix_v(
- xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+ xfm_dce, tbl_entry, GRPH_COLOR_MATRIX_SW);
/* We did everything ,now program DxOUTPUT_CSC_CONTROL */
configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
index e98ed30..9b65b77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -175,7 +175,7 @@ static void regamma_config_regions_and_segments(
value = 0;
set_reg_field_value(
value,
- params->arr_points[2].custom_float_slope,
+ params->arr_points[1].custom_float_slope,
GAMMA_CORR_CNTLA_END_CNTL2,
GAMMA_CORR_CNTLA_EXP_REGION_END_BASE);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 42df17f..b1f14be 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -52,6 +52,8 @@
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#define DC_LOGGER \
+ dc->ctx->logger
#if defined(CONFIG_DRM_AMD_DC_FBC)
#include "dce110/dce110_compressor.h"
#endif
@@ -700,7 +702,7 @@ static void get_pixel_clock_parameters(
pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
- pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
@@ -771,8 +773,7 @@ static bool dce110_validate_bandwidth(
{
bool result = false;
- dm_logger_write(
- dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(
"%s: start",
__func__);
@@ -786,8 +787,7 @@ static bool dce110_validate_bandwidth(
result = true;
if (!result)
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
- "%s: %dx%d@%d Bandwidth validation failed!\n",
+ DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n",
__func__,
context->streams[0]->timing.h_addressable,
context->streams[0]->timing.v_addressable,
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
return result;
}
+enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+ struct dc_caps *caps)
+{
+ if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
+ ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
static bool dce110_validate_surface_sets(
struct dc_state *context)
{
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
plane->src_rect.height > 1080))
return false;
+ /* we don't have the logic to support underlay
+ * only yet so block the use case where we get
+ * NV12 plane as top layer
+ */
+ if (j == 0)
+ return false;
+
/* irrespective of plane format,
* stream should be RGB encoded
*/
@@ -973,7 +990,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
dc->hwss.enable_display_power_gating(
dc,
- pipe_ctx->pipe_idx,
+ pipe_ctx->stream_res.tg->inst,
dcb, PIPE_GATING_CONTROL_DISABLE);
/*
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
.link_enc_create = dce110_link_encoder_create,
.validate_guaranteed = dce110_validate_guaranteed,
.validate_bandwidth = dce110_validate_bandwidth,
+ .validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
.add_stream_to_ctx = dce110_add_stream_to_ctx,
.validate_global = dce110_validate_global
@@ -1152,10 +1170,11 @@ static bool construct(
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.underlay_pipe_index = pool->base.pipe_count;
-
+ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 150;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
+ dc->caps.is_apu = true;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 4befce6..be71539 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1224,26 +1224,46 @@ void dce110_timing_generator_setup_global_swap_lock(
/* This pipe will belong to GSL Group zero. */
set_reg_field_value(value,
- 1,
- DCP_GSL_CONTROL,
- DCP_GSL0_EN);
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
set_reg_field_value(value,
- gsl_params->gsl_master == tg->inst,
- DCP_GSL_CONTROL,
- DCP_GSL_MASTER_EN);
+ gsl_params->gsl_master == tg->inst,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
set_reg_field_value(value,
- HFLIP_READY_DELAY,
- DCP_GSL_CONTROL,
- DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+ HFLIP_READY_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
/* Keep signal low (pending high) during 6 lines.
* Also defines minimum interval before re-checking signal. */
set_reg_field_value(value,
- HFLIP_CHECK_DELAY,
- DCP_GSL_CONTROL,
- DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+ HFLIP_CHECK_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmDCP_GSL_CONTROL), value);
+ value = 0;
+
+ set_reg_field_value(value,
+ gsl_params->gsl_master,
+ DCIO_GSL0_CNTL,
+ DCIO_GSL0_VSYNC_SEL);
+
+ set_reg_field_value(value,
+ 0,
+ DCIO_GSL0_CNTL,
+ DCIO_GSL0_TIMING_SYNC_SEL);
+
+ set_reg_field_value(value,
+ 0,
+ DCIO_GSL0_CNTL,
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmDCIO_GSL0_CNTL), value);
{
@@ -1253,38 +1273,38 @@ void dce110_timing_generator_setup_global_swap_lock(
CRTC_REG(mmCRTC_V_TOTAL));
set_reg_field_value(value,
- 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
- DCP_GSL_CONTROL,
- DCP_GSL_SYNC_SOURCE);
+ 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
/* Checkpoint relative to end of frame */
check_point = get_reg_field_value(value_crtc_vtotal,
- CRTC_V_TOTAL,
- CRTC_V_TOTAL);
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0);
}
set_reg_field_value(value,
- 1,
- DCP_GSL_CONTROL,
- DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
dm_write_reg(tg->ctx, address, value);
/********************************************************************/
address = CRTC_REG(mmCRTC_GSL_CONTROL);
- value = 0;
+ value = dm_read_reg(tg->ctx, address);
set_reg_field_value(value,
- check_point - FLIP_READY_BACK_LOOKUP,
- CRTC_GSL_CONTROL,
- CRTC_GSL_CHECK_LINE_NUM);
+ check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
set_reg_field_value(value,
- VFLIP_READY_DELAY,
- CRTC_GSL_CONTROL,
- CRTC_GSL_FORCE_DELAY);
+ VFLIP_READY_DELAY,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
dm_write_reg(tg->ctx, address, value);
}
@@ -1555,6 +1575,138 @@ void dce110_timing_generator_enable_reset_trigger(
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
}
+void dce110_timing_generator_enable_crtc_reset(
+ struct timing_generator *tg,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp)
+{
+ uint32_t value = 0;
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Setup trigger edge */
+ switch (crtc_tp->event) {
+ case CRTC_EVENT_VSYNC_RISING:
+ rising_edge = 1;
+ break;
+
+ case CRTC_EVENT_VSYNC_FALLING:
+ falling_edge = 1;
+ break;
+ }
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ set_reg_field_value(value,
+ source_tg_inst,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ rising_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_RISING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ falling_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+
+ /**************************************************************/
+
+ switch (crtc_tp->delay) {
+ case TRIGGER_DELAY_NEXT_LINE:
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 0, /* force H count to H_TOTAL and V count to V_TOTAL */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 0, /* TriggerB - we never use TriggerA */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL));
+
+ set_reg_field_value(value,
+ 1,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR);
+
+ set_reg_field_value(value,
+ 2,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_AUTO_FORCE_VSYNC_MODE);
+
+ break;
+
+ case TRIGGER_DELAY_NEXT_PIXEL:
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL));
+
+ set_reg_field_value(value,
+ 1,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR);
+
+ set_reg_field_value(value,
+ 0,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_AUTO_FORCE_VSYNC_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL), value);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 2, /* force H count to H_TOTAL and V count to V_TOTAL */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* TriggerB - we never use TriggerA */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+ break;
+ }
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE));
+
+ set_reg_field_value(value,
+ 2,
+ CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value);
+}
void dce110_timing_generator_disable_reset_trigger(
struct timing_generator *tg)
{
@@ -1564,34 +1716,48 @@ void dce110_timing_generator_disable_reset_trigger(
value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
set_reg_field_value(value,
- 0, /* force counter now mode is disabled */
- CRTC_FORCE_COUNT_NOW_CNTL,
- CRTC_FORCE_COUNT_NOW_MODE);
+ 0, /* force counter now mode is disabled */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
set_reg_field_value(value,
- 1, /* clear trigger status */
- CRTC_FORCE_COUNT_NOW_CNTL,
- CRTC_FORCE_COUNT_NOW_CLEAR);
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL));
+
+ set_reg_field_value(value,
+ 1,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR);
+
+ set_reg_field_value(value,
+ 0,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_AUTO_FORCE_VSYNC_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERT_SYNC_CONTROL), value);
+
/********************************************************************/
value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
set_reg_field_value(value,
- TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
- CRTC_TRIGB_CNTL,
- CRTC_TRIGB_SOURCE_SELECT);
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
set_reg_field_value(value,
- TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
- CRTC_TRIGB_CNTL,
- CRTC_TRIGB_POLARITY_SELECT);
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
set_reg_field_value(value,
- 1, /* clear trigger status */
- CRTC_TRIGB_CNTL,
- CRTC_TRIGB_CLEAR);
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
}
@@ -1611,10 +1777,16 @@ bool dce110_timing_generator_did_triggered_reset_occur(
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value = dm_read_reg(tg->ctx,
CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
-
- return get_reg_field_value(value,
- CRTC_FORCE_COUNT_NOW_CNTL,
- CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+ uint32_t value1 = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_VERT_SYNC_CONTROL));
+ bool force = get_reg_field_value(value,
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+ bool vert_sync = get_reg_field_value(value1,
+ CRTC_VERT_SYNC_CONTROL,
+ CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED) != 0;
+
+ return (force || vert_sync);
}
/**
@@ -1905,6 +2077,125 @@ bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width)
return true;
}
+static bool dce110_is_tg_enabled(struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_CONTROL);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_CONTROL,
+ CRTC_CURRENT_MASTER_EN_STATE);
+ return field == 1;
+}
+
+bool dce110_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
+{
+ uint32_t cntl_addr = 0;
+ uint32_t addr = 0;
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!dce110_is_tg_enabled(tg))
+ return false;
+
+ cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
+
+ /* First, disable CRC before we configure it. */
+ dm_write_reg(tg->ctx, cntl_addr, 0);
+
+ if (!params->enable)
+ return true;
+
+ /* Program frame boundaries */
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable. Only using CRC0*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+
+ return true;
+}
+
+bool dce110_get_crc(struct timing_generator *tg,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_CRC_CNTL);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_CRC_CNTL, CRTC_CRC_EN);
+
+ /* Early return if CRC is not enabled for this CRTC */
+ if (!field)
+ return false;
+
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
+
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+
+ return true;
+}
+
static const struct timing_generator_funcs dce110_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
.program_timing = dce110_tg_program_timing,
@@ -1928,6 +2219,7 @@ static const struct timing_generator_funcs dce110_tg_funcs = {
.setup_global_swap_lock =
dce110_timing_generator_setup_global_swap_lock,
.enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .enable_crtc_reset = dce110_timing_generator_enable_crtc_reset,
.disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
.tear_down_global_swap_lock =
dce110_timing_generator_tear_down_global_swap_lock,
@@ -1939,6 +2231,9 @@ static const struct timing_generator_funcs dce110_tg_funcs = {
dce110_timing_generator_set_static_screen_control,
.set_test_pattern = dce110_timing_generator_set_test_pattern,
.arm_vert_intr = dce110_arm_vert_intr,
+ .is_tg_enabled = dce110_is_tg_enabled,
+ .configure_crc = dce110_configure_crc,
+ .get_crc = dce110_get_crc,
};
void dce110_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 82737de..734d496 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -174,6 +174,12 @@ void dce110_timing_generator_setup_global_swap_lock(
void dce110_timing_generator_tear_down_global_swap_lock(
struct timing_generator *tg);
+/* Reset crtc position on master VSync */
+void dce110_timing_generator_enable_crtc_reset(
+ struct timing_generator *tg,
+ int source,
+ struct crtc_trigger_info *crtc_tp);
+
/* Reset slave controllers on master VSync */
void dce110_timing_generator_enable_reset_trigger(
struct timing_generator *tg,
@@ -270,4 +276,10 @@ void dce110_tg_set_colors(struct timing_generator *tg,
bool dce110_arm_vert_intr(
struct timing_generator *tg, uint8_t width);
+bool dce110_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params);
+
+bool dce110_get_crc(struct timing_generator *tg,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 59b4cd3..8ad0481 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -38,6 +38,8 @@
#include "timing_generator.h"
+#define DC_LOGGER \
+ tg->ctx->logger
/** ********************************************************************************
*
* DCE11 Timing Generator Implementation
@@ -606,8 +608,7 @@ static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_gener
static bool dce110_timing_generator_v_did_triggered_reset_occur(
struct timing_generator *tg)
{
- dm_logger_write(tg->ctx->logger, LOG_ERROR,
- "Timing Sync not supported on underlay pipe\n");
+ DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return false;
}
@@ -615,8 +616,7 @@ static void dce110_timing_generator_v_setup_global_swap_lock(
struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params)
{
- dm_logger_write(tg->ctx->logger, LOG_ERROR,
- "Timing Sync not supported on underlay pipe\n");
+ DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
@@ -624,24 +624,21 @@ static void dce110_timing_generator_v_enable_reset_trigger(
struct timing_generator *tg,
int source_tg_inst)
{
- dm_logger_write(tg->ctx->logger, LOG_ERROR,
- "Timing Sync not supported on underlay pipe\n");
+ DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
static void dce110_timing_generator_v_disable_reset_trigger(
struct timing_generator *tg)
{
- dm_logger_write(tg->ctx->logger, LOG_ERROR,
- "Timing Sync not supported on underlay pipe\n");
+ DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
static void dce110_timing_generator_v_tear_down_global_swap_lock(
struct timing_generator *tg)
{
- dm_logger_write(tg->ctx->logger, LOG_ERROR,
- "Timing Sync not supported on underlay pipe\n");
+ DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n");
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index 47390dc..8ba3c12 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -30,6 +30,8 @@
#include "dce/dce_11_0_sh_mask.h"
#define SCLV_PHASES 64
+#define DC_LOGGER \
+ xfm->ctx->logger
struct sclv_ratios_inits {
uint32_t h_int_scale_ratio_luma;
@@ -670,8 +672,7 @@ static void dce110_xfmv_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- dm_logger_write(xfm->ctx->logger, LOG_WARNING,
- "%s: Capability not supported",
+ DC_LOG_WARNING("%s: Capability not supported",
__func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
index 6964992..faae12c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -33,7 +33,8 @@
#include "include/logger_interface.h"
#include "dce112_compressor.h"
-
+#define DC_LOGGER \
+ cp110->base.ctx->logger
#define DCP_REG(reg)\
(reg + cp110->offsets.dcp_offset)
#define DMIF_REG(reg)\
@@ -129,8 +130,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
LOW_POWER_TILING_NUM_PIPES);
break;
default:
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Invalid LPT NUM_PIPES!!!",
__func__);
break;
@@ -175,8 +175,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
LOW_POWER_TILING_NUM_BANKS);
break;
default:
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Invalid LPT NUM_BANKS!!!",
__func__);
break;
@@ -209,8 +208,7 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
break;
default:
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Invalid LPT INTERLEAVE_SIZE!!!",
__func__);
break;
@@ -253,15 +251,13 @@ static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
LOW_POWER_TILING_ROW_SIZE);
break;
default:
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Invalid LPT ROW_SIZE!!!",
__func__);
break;
}
} else {
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: LPT MC Configuration is not provided",
__func__);
}
@@ -311,8 +307,7 @@ static void wait_for_fbc_state_changed(
}
if (counter == 10) {
- dm_logger_write(
- cp110->base.ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: wait counter exceeded, changes to HW not applied",
__func__);
}
@@ -525,8 +520,7 @@ void dce112_compressor_program_compressed_surface_address_and_pitch(
if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
fbc_pitch = fbc_pitch / 8;
else
- dm_logger_write(
- compressor->ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Unexpected DCE11 compression ratio",
__func__);
@@ -690,8 +684,7 @@ void dce112_compressor_program_lpt_control(
LOW_POWER_TILING_MODE);
break;
default:
- dm_logger_write(
- compressor->ctx->logger, LOG_WARNING,
+ DC_LOG_WARNING(
"%s: Invalid selected DRAM channels for LPT!!!",
__func__);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 663e0a0..cd1e3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -56,6 +56,8 @@
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce100_resource.h"
+#define DC_LOGGER \
+ dc->ctx->logger
#ifndef mmDP_DPHY_INTERNAL_CTRL
#define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
@@ -722,8 +724,7 @@ bool dce112_validate_bandwidth(
{
bool result = false;
- dm_logger_write(
- dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(
"%s: start",
__func__);
@@ -737,7 +738,7 @@ bool dce112_validate_bandwidth(
result = true;
if (!result)
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ DC_LOG_BANDWIDTH_VALIDATION(
"%s: Bandwidth validation failed!",
__func__);
@@ -1100,9 +1101,12 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
+
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 1a0b54d..e96ff86 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -31,9 +31,10 @@
#include "dce110/dce110_hw_sequencer.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#include "reg_helper.h"
#define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 5c48c22..4659a4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -54,10 +54,11 @@
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
-#include "vega10/NBIO/nbio_6_1_offset.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "nbio/nbio_6_1_offset.h"
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
@@ -830,11 +831,14 @@ static bool construct(
/* TODO: Fill more data from GreenlandAsicCapability.cpp */
pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
+
dc->debug = debug_defaults;
/*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 2502182..7bee781 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -25,9 +25,10 @@
#include "dm_services.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#include "dc_types.h"
#include "dc_bios_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index bc388aa..666fcb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-DCE80 = dce80_timing_generator.o dce80_compressor.o dce80_hw_sequencer.o \
+DCE80 = dce80_timing_generator.o dce80_hw_sequencer.o \
dce80_resource.o
AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
deleted file mode 100644
index 951f2ca..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dm_services.h"
-
-#include "dce/dce_8_0_d.h"
-#include "dce/dce_8_0_sh_mask.h"
-#include "gmc/gmc_7_1_sh_mask.h"
-#include "gmc/gmc_7_1_d.h"
-
-#include "include/logger_interface.h"
-#include "dce80_compressor.h"
-
-#define DCP_REG(reg)\
- (reg + cp80->offsets.dcp_offset)
-#define DMIF_REG(reg)\
- (reg + cp80->offsets.dmif_offset)
-
-static const struct dce80_compressor_reg_offsets reg_offsets[] = {
-{
- .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-},
-{
- .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-},
-{
- .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-},
-{
- .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-},
-{
- .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-},
-{
- .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
- .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
- - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
-}
-};
-
-static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
-
-enum fbc_idle_force {
- /* Bit 0 - Display registers updated */
- FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
- /* Bit 2 - FBC_GRPH_COMP_EN register updated */
- FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
- /* Bit 3 - FBC_SRC_SEL register updated */
- FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
- /* Bit 4 - FBC_MIN_COMPRESSION register updated */
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
- /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
- FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
- /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
- /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
- /* Bit 24 - Memory write to region 0 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
- /* Bit 25 - Memory write to region 1 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
- /* Bit 26 - Memory write to region 2 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
- /* Bit 27 - Memory write to region 3 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
- /* Bit 28 - Memory write from any client other than MCIF */
- FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
- /* Bit 29 - CG statics screen signal is inactive */
- FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
-{
- /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
- return cp80->base.raw_size * cp80->base.banks_num *
- cp80->base.dram_channels_num;
-}
-
-static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
- uint32_t lpt_control)
-{
- /*LPT MC Config */
- if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
- /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
- * 00 - 1 CHANNEL
- * 01 - 2 CHANNELS
- * 02 - 4 OR 6 CHANNELS
- * (Only for discrete GPU, N/A for CZ)
- * 03 - 8 OR 12 CHANNELS
- * (Only for discrete GPU, N/A for CZ) */
- switch (cp80->base.dram_channels_num) {
- case 2:
- set_reg_field_value(
- lpt_control,
- 1,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_PIPES);
- break;
- case 1:
- set_reg_field_value(
- lpt_control,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_PIPES);
- break;
- default:
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: Invalid LPT NUM_PIPES!!!",
- __func__);
- break;
- }
-
- /* The mapping for LPT NUM_BANKS is in
- * GRPH_CONTROL.GRPH_NUM_BANKS register field
- * Specifies the number of memory banks for tiling
- * purposes. Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES:
- * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
- * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
- * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
- * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
- switch (cp80->base.banks_num) {
- case 16:
- set_reg_field_value(
- lpt_control,
- 3,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_BANKS);
- break;
- case 8:
- set_reg_field_value(
- lpt_control,
- 2,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_BANKS);
- break;
- case 4:
- set_reg_field_value(
- lpt_control,
- 1,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_BANKS);
- break;
- case 2:
- set_reg_field_value(
- lpt_control,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_NUM_BANKS);
- break;
- default:
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: Invalid LPT NUM_BANKS!!!",
- __func__);
- break;
- }
-
- /* The mapping is in DMIF_ADDR_CALC.
- * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
- * Carrizo specifies the memory interleave per pipe.
- * It effectively specifies the location of pipe bits in
- * the memory address.
- * POSSIBLE VALUES:
- * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
- * interleave
- * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
- * interleave
- */
- switch (cp80->base.channel_interleave_size) {
- case 256: /*256B */
- set_reg_field_value(
- lpt_control,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
- break;
- case 512: /*512B */
- set_reg_field_value(
- lpt_control,
- 1,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
- break;
- default:
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: Invalid LPT INTERLEAVE_SIZE!!!",
- __func__);
- break;
- }
-
- /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
- * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
- * for Carrizo. Specifies the size of dram row in bytes.
- * This should match up with NOOFCOLS field in
- * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
- * This register DMIF_ADDR_CALC is not used by the
- * hardware as it is only used for addrlib assertions.
- * POSSIBLE VALUES:
- * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
- * boundary
- * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
- * boundary
- * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
- * boundary */
- switch (cp80->base.raw_size) {
- case 4096: /*4 KB */
- set_reg_field_value(
- lpt_control,
- 2,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ROW_SIZE);
- break;
- case 2048:
- set_reg_field_value(
- lpt_control,
- 1,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ROW_SIZE);
- break;
- case 1024:
- set_reg_field_value(
- lpt_control,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ROW_SIZE);
- break;
- default:
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: Invalid LPT ROW_SIZE!!!",
- __func__);
- break;
- }
- } else {
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: LPT MC Configuration is not provided",
- __func__);
- }
-
- return lpt_control;
-}
-
-static bool is_source_bigger_than_epanel_size(
- struct dce80_compressor *cp80,
- uint32_t source_view_width,
- uint32_t source_view_height)
-{
- if (cp80->base.embedded_panel_h_size != 0 &&
- cp80->base.embedded_panel_v_size != 0 &&
- ((source_view_width * source_view_height) >
- (cp80->base.embedded_panel_h_size *
- cp80->base.embedded_panel_v_size)))
- return true;
-
- return false;
-}
-
-static uint32_t align_to_chunks_number_per_line(
- struct dce80_compressor *cp80,
- uint32_t pixels)
-{
- return 256 * ((pixels + 255) / 256);
-}
-
-static void wait_for_fbc_state_changed(
- struct dce80_compressor *cp80,
- bool enabled)
-{
- uint8_t counter = 0;
- uint32_t addr = mmFBC_STATUS;
- uint32_t value;
-
- while (counter < 10) {
- value = dm_read_reg(cp80->base.ctx, addr);
- if (get_reg_field_value(
- value,
- FBC_STATUS,
- FBC_ENABLE_STATUS) == enabled)
- break;
- udelay(10);
- counter++;
- }
-
- if (counter == 10) {
- dm_logger_write(
- cp80->base.ctx->logger, LOG_WARNING,
- "%s: wait counter exceeded, changes to HW not applied",
- __func__);
- }
-}
-
-void dce80_compressor_power_up_fbc(struct compressor *compressor)
-{
- uint32_t value;
- uint32_t addr;
-
- addr = mmFBC_CNTL;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
- set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
- set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
- dm_write_reg(compressor->ctx, addr, value);
-
- addr = mmFBC_COMP_MODE;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
- set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
- set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
- dm_write_reg(compressor->ctx, addr, value);
-
- addr = mmFBC_COMP_CNTL;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
- dm_write_reg(compressor->ctx, addr, value);
- /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
- /* 1 ==> 4:1 */
- /* 2 ==> 8:1 */
- /* 0xF ==> 1:1 */
- set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
- dm_write_reg(compressor->ctx, addr, value);
- compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
-
- value = 0;
- dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
-
- value = 0xFFFFFF;
- dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
-}
-
-void dce80_compressor_enable_fbc(
- struct compressor *compressor,
- uint32_t paths_num,
- struct compr_addr_and_pitch_params *params)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
-
- if (compressor->options.bits.FBC_SUPPORT &&
- (compressor->options.bits.DUMMY_BACKEND == 0) &&
- (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
- (!is_source_bigger_than_epanel_size(
- cp80,
- params->source_view_width,
- params->source_view_height))) {
-
- uint32_t addr;
- uint32_t value;
-
- /* Before enabling FBC first need to enable LPT if applicable
- * LPT state should always be changed (enable/disable) while FBC
- * is disabled */
- if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
- (params->source_view_width *
- params->source_view_height <=
- dce8_one_lpt_channel_max_resolution)) {
- dce80_compressor_enable_lpt(compressor);
- }
-
- addr = mmFBC_CNTL;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
- set_reg_field_value(
- value,
- params->inst,
- FBC_CNTL, FBC_SRC_SEL);
- dm_write_reg(compressor->ctx, addr, value);
-
- /* Keep track of enum controller_id FBC is attached to */
- compressor->is_enabled = true;
- compressor->attached_inst = params->inst;
- cp80->offsets = reg_offsets[params->inst];
-
- /*Toggle it as there is bug in HW */
- set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
- dm_write_reg(compressor->ctx, addr, value);
- set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
- dm_write_reg(compressor->ctx, addr, value);
-
- wait_for_fbc_state_changed(cp80, true);
- }
-}
-
-void dce80_compressor_disable_fbc(struct compressor *compressor)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
-
- if (compressor->options.bits.FBC_SUPPORT &&
- dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
- uint32_t reg_data;
- /* Turn off compression */
- reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
- set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
- dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
-
- /* Reset enum controller_id to undefined */
- compressor->attached_inst = 0;
- compressor->is_enabled = false;
-
- /* Whenever disabling FBC make sure LPT is disabled if LPT
- * supported */
- if (compressor->options.bits.LPT_SUPPORT)
- dce80_compressor_disable_lpt(compressor);
-
- wait_for_fbc_state_changed(cp80, false);
- }
-}
-
-bool dce80_compressor_is_fbc_enabled_in_hw(
- struct compressor *compressor,
- uint32_t *inst)
-{
- /* Check the hardware register */
- uint32_t value;
-
- value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
- if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
- if (inst != NULL)
- *inst = compressor->attached_inst;
- return true;
- }
-
- value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
- if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
- if (inst != NULL)
- *inst = compressor->attached_inst;
- return true;
- }
-
- return false;
-}
-
-bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
-{
- /* Check the hardware register */
- uint32_t value = dm_read_reg(compressor->ctx,
- mmLOW_POWER_TILING_CONTROL);
-
- return get_reg_field_value(
- value,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ENABLE);
-}
-
-void dce80_compressor_program_compressed_surface_address_and_pitch(
- struct compressor *compressor,
- struct compr_addr_and_pitch_params *params)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
- uint32_t value = 0;
- uint32_t fbc_pitch = 0;
- uint32_t compressed_surf_address_low_part =
- compressor->compr_surface_address.addr.low_part;
-
- /* Clear content first. */
- dm_write_reg(
- compressor->ctx,
- DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
- 0);
- dm_write_reg(compressor->ctx,
- DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
-
- if (compressor->options.bits.LPT_SUPPORT) {
- uint32_t lpt_alignment = lpt_size_alignment(cp80);
-
- if (lpt_alignment != 0) {
- compressed_surf_address_low_part =
- ((compressed_surf_address_low_part
- + (lpt_alignment - 1)) / lpt_alignment)
- * lpt_alignment;
- }
- }
-
- /* Write address, HIGH has to be first. */
- dm_write_reg(compressor->ctx,
- DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
- compressor->compr_surface_address.addr.high_part);
- dm_write_reg(compressor->ctx,
- DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
- compressed_surf_address_low_part);
-
- fbc_pitch = align_to_chunks_number_per_line(
- cp80,
- params->source_view_width);
-
- if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
- fbc_pitch = fbc_pitch / 8;
- else
- dm_logger_write(
- compressor->ctx->logger, LOG_WARNING,
- "%s: Unexpected DCE8 compression ratio",
- __func__);
-
- /* Clear content first. */
- dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
-
- /* Write FBC Pitch. */
- set_reg_field_value(
- value,
- fbc_pitch,
- GRPH_COMPRESS_PITCH,
- GRPH_COMPRESS_PITCH);
- dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
-
-}
-
-void dce80_compressor_disable_lpt(struct compressor *compressor)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
- uint32_t value;
- uint32_t addr;
- uint32_t inx;
-
- /* Disable all pipes LPT Stutter */
- for (inx = 0; inx < 3; inx++) {
- value =
- dm_read_reg(
- compressor->ctx,
- DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
- set_reg_field_value(
- value,
- 0,
- DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
- STUTTER_ENABLE_NONLPTCH);
- dm_write_reg(
- compressor->ctx,
- DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
- value);
- }
-
- /* Disable LPT */
- addr = mmLOW_POWER_TILING_CONTROL;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(
- value,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ENABLE);
- dm_write_reg(compressor->ctx, addr, value);
-
- /* Clear selection of Channel(s) containing Compressed Surface */
- addr = mmGMCON_LPT_TARGET;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(
- value,
- 0xFFFFFFFF,
- GMCON_LPT_TARGET,
- STCTRL_LPT_TARGET);
- dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
-}
-
-void dce80_compressor_enable_lpt(struct compressor *compressor)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
- uint32_t value;
- uint32_t addr;
- uint32_t value_control;
- uint32_t channels;
-
- /* Enable LPT Stutter from Display pipe */
- value = dm_read_reg(compressor->ctx,
- DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
- set_reg_field_value(
- value,
- 1,
- DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
- STUTTER_ENABLE_NONLPTCH);
- dm_write_reg(compressor->ctx,
- DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
-
- /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
- * will disable LPT.
- * STCTRL_LPT_TARGETn corresponds to channel n. */
- addr = mmLOW_POWER_TILING_CONTROL;
- value_control = dm_read_reg(compressor->ctx, addr);
- channels = get_reg_field_value(value_control,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_MODE);
-
- addr = mmGMCON_LPT_TARGET;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(
- value,
- channels + 1, /* not mentioned in programming guide,
- but follow DCE8.1 */
- GMCON_LPT_TARGET,
- STCTRL_LPT_TARGET);
- dm_write_reg(compressor->ctx, addr, value);
-
- /* Enable LPT */
- addr = mmLOW_POWER_TILING_CONTROL;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(
- value,
- 1,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ENABLE);
- dm_write_reg(compressor->ctx, addr, value);
-}
-
-void dce80_compressor_program_lpt_control(
- struct compressor *compressor,
- struct compr_addr_and_pitch_params *params)
-{
- struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
- uint32_t rows_per_channel;
- uint32_t lpt_alignment;
- uint32_t source_view_width;
- uint32_t source_view_height;
- uint32_t lpt_control = 0;
-
- if (!compressor->options.bits.LPT_SUPPORT)
- return;
-
- lpt_control = dm_read_reg(compressor->ctx,
- mmLOW_POWER_TILING_CONTROL);
-
- /* POSSIBLE VALUES for Low Power Tiling Mode:
- * 00 - Use channel 0
- * 01 - Use Channel 0 and 1
- * 02 - Use Channel 0,1,2,3
- * 03 - reserved */
- switch (compressor->lpt_channels_num) {
- /* case 2:
- * Use Channel 0 & 1 / Not used for DCE 11 */
- case 1:
- /*Use Channel 0 for LPT for DCE 11 */
- set_reg_field_value(
- lpt_control,
- 0,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_MODE);
- break;
- default:
- dm_logger_write(
- compressor->ctx->logger, LOG_WARNING,
- "%s: Invalid selected DRAM channels for LPT!!!",
- __func__);
- break;
- }
-
- lpt_control = lpt_memory_control_config(cp80, lpt_control);
-
- /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
- * FBC compressed surface pitch.
- * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
- * Surface Pitch) / (Row Size * Number of Channels *
- * Number of Banks)). */
- rows_per_channel = 0;
- lpt_alignment = lpt_size_alignment(cp80);
- source_view_width =
- align_to_chunks_number_per_line(
- cp80,
- params->source_view_width);
- source_view_height = (params->source_view_height + 1) & (~0x1);
-
- if (lpt_alignment != 0) {
- rows_per_channel = source_view_width * source_view_height * 4;
- rows_per_channel =
- (rows_per_channel % lpt_alignment) ?
- (rows_per_channel / lpt_alignment + 1) :
- rows_per_channel / lpt_alignment;
- }
-
- set_reg_field_value(
- lpt_control,
- rows_per_channel,
- LOW_POWER_TILING_CONTROL,
- LOW_POWER_TILING_ROWS_PER_CHAN);
-
- dm_write_reg(compressor->ctx,
- mmLOW_POWER_TILING_CONTROL, lpt_control);
-}
-
-/*
- * DCE 11 Frame Buffer Compression Implementation
- */
-
-void dce80_compressor_set_fbc_invalidation_triggers(
- struct compressor *compressor,
- uint32_t fbc_trigger)
-{
- /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
- * for DCE 11 regions cannot be used - does not work with S/G
- */
- uint32_t addr = mmFBC_CLIENT_REGION_MASK;
- uint32_t value = dm_read_reg(compressor->ctx, addr);
-
- set_reg_field_value(
- value,
- 0,
- FBC_CLIENT_REGION_MASK,
- FBC_MEMORY_REGION_MASK);
- dm_write_reg(compressor->ctx, addr, value);
-
- /* Setup events when to clear all CSM entries (effectively marking
- * current compressed data invalid)
- * For DCE 11 CSM metadata 11111 means - "Not Compressed"
- * Used as the initial value of the metadata sent to the compressor
- * after invalidation, to indicate that the compressor should attempt
- * to compress all chunks on the current pass. Also used when the chunk
- * is not successfully written to memory.
- * When this CSM value is detected, FBC reads from the uncompressed
- * buffer. Set events according to passed in value, these events are
- * valid for DCE8:
- * - bit 0 - display register updated
- * - bit 28 - memory write from any client except from MCIF
- * - bit 29 - CG static screen signal is inactive
- * In addition, DCE8.1 also needs to set new DCE8.1 specific events
- * that are used to trigger invalidation on certain register changes,
- * for example enabling of Alpha Compression may trigger invalidation of
- * FBC once bit is set. These events are as follows:
- * - Bit 2 - FBC_GRPH_COMP_EN register updated
- * - Bit 3 - FBC_SRC_SEL register updated
- * - Bit 4 - FBC_MIN_COMPRESSION register updated
- * - Bit 5 - FBC_ALPHA_COMP_EN register updated
- * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
- * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
- */
- addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
- value = dm_read_reg(compressor->ctx, addr);
- set_reg_field_value(
- value,
- fbc_trigger |
- FBC_IDLE_FORCE_GRPH_COMP_EN |
- FBC_IDLE_FORCE_SRC_SEL_CHANGE |
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
- FBC_IDLE_FORCE_ALPHA_COMP_EN |
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
- FBC_IDLE_FORCE_CLEAR_MASK,
- FBC_IDLE_FORCE_CLEAR_MASK);
- dm_write_reg(compressor->ctx, addr, value);
-}
-
-void dce80_compressor_construct(struct dce80_compressor *compressor,
- struct dc_context *ctx)
-{
- struct dc_bios *bp = ctx->dc_bios;
- struct embedded_panel_info panel_info;
-
- compressor->base.options.raw = 0;
- compressor->base.options.bits.FBC_SUPPORT = true;
- compressor->base.options.bits.LPT_SUPPORT = true;
- /* For DCE 11 always use one DRAM channel for LPT */
- compressor->base.lpt_channels_num = 1;
- compressor->base.options.bits.DUMMY_BACKEND = false;
-
- /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
- * should not be supported */
- if (compressor->base.memory_bus_width == 64)
- compressor->base.options.bits.LPT_SUPPORT = false;
-
- compressor->base.options.bits.CLK_GATING_DISABLED = false;
-
- compressor->base.ctx = ctx;
- compressor->base.embedded_panel_h_size = 0;
- compressor->base.embedded_panel_v_size = 0;
- compressor->base.memory_bus_width = ctx->asic_id.vram_width;
- compressor->base.allocated_size = 0;
- compressor->base.preferred_requested_size = 0;
- compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
- compressor->base.banks_num = 0;
- compressor->base.raw_size = 0;
- compressor->base.channel_interleave_size = 0;
- compressor->base.dram_channels_num = 0;
- compressor->base.lpt_channels_num = 0;
- compressor->base.attached_inst = 0;
- compressor->base.is_enabled = false;
-
- if (BP_RESULT_OK ==
- bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
- compressor->base.embedded_panel_h_size =
- panel_info.lcd_timing.horizontal_addressable;
- compressor->base.embedded_panel_v_size =
- panel_info.lcd_timing.vertical_addressable;
- }
-}
-
-struct compressor *dce80_compressor_create(struct dc_context *ctx)
-{
- struct dce80_compressor *cp80 =
- kzalloc(sizeof(struct dce80_compressor), GFP_KERNEL);
-
- if (!cp80)
- return NULL;
-
- dce80_compressor_construct(cp80, ctx);
- return &cp80->base;
-}
-
-void dce80_compressor_destroy(struct compressor **compressor)
-{
- kfree(TO_DCE80_COMPRESSOR(*compressor));
- *compressor = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
deleted file mode 100644
index cca58b0..0000000
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_COMPRESSOR_DCE80_H__
-#define __DC_COMPRESSOR_DCE80_H__
-
-#include "../inc/compressor.h"
-
-#define TO_DCE80_COMPRESSOR(compressor)\
- container_of(compressor, struct dce80_compressor, base)
-
-struct dce80_compressor_reg_offsets {
- uint32_t dcp_offset;
- uint32_t dmif_offset;
-};
-
-struct dce80_compressor {
- struct compressor base;
- struct dce80_compressor_reg_offsets offsets;
-};
-
-struct compressor *dce80_compressor_create(struct dc_context *ctx);
-
-void dce80_compressor_construct(struct dce80_compressor *cp80,
- struct dc_context *ctx);
-
-void dce80_compressor_destroy(struct compressor **cp);
-
-/* FBC RELATED */
-void dce80_compressor_power_up_fbc(struct compressor *cp);
-
-void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
- struct compr_addr_and_pitch_params *params);
-
-void dce80_compressor_disable_fbc(struct compressor *cp);
-
-void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
- uint32_t fbc_trigger);
-
-void dce80_compressor_program_compressed_surface_address_and_pitch(
- struct compressor *cp,
- struct compr_addr_and_pitch_params *params);
-
-bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
- uint32_t *fbc_mapped_crtc_id);
-
-/* LPT RELATED */
-void dce80_compressor_enable_lpt(struct compressor *cp);
-
-void dce80_compressor_disable_lpt(struct compressor *cp);
-
-void dce80_compressor_program_lpt_control(struct compressor *cp,
- struct compr_addr_and_pitch_params *params);
-
-bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index ccfcf1c..6c6a1a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -70,47 +70,11 @@ static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
/***************************PIPE_CONTROL***********************************/
-static bool dce80_enable_display_power_gating(
- struct dc *dc,
- uint8_t controller_id,
- struct dc_bios *dcb,
- enum pipe_gating_control power_gating)
-{
- enum bp_result bp_result = BP_RESULT_OK;
- enum bp_pipe_control_action cntl;
- struct dc_context *ctx = dc->ctx;
-
- if (power_gating == PIPE_GATING_CONTROL_INIT)
- cntl = ASIC_PIPE_INIT;
- else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
- cntl = ASIC_PIPE_ENABLE;
- else
- cntl = ASIC_PIPE_DISABLE;
-
- if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
-
- bp_result = dcb->funcs->enable_disp_power_gating(
- dcb, controller_id + 1, cntl);
-
- /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
- * by default when command table is called
- */
- dm_write_reg(ctx,
- HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
- 0);
- }
-
- if (bp_result == BP_RESULT_OK)
- return true;
- else
- return false;
-}
-
void dce80_hw_sequencer_construct(struct dc *dc)
{
dce110_hw_sequencer_construct(dc);
- dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating;
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.set_bandwidth = dce100_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 9c18efd..5d854a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -53,6 +53,8 @@
#include "reg_helper.h"
+#include "dce/dce_dmcu.h"
+#include "dce/dce_abm.h"
/* TODO remove this include */
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -364,6 +366,29 @@ static const struct resource_caps res_cap_83 = {
.num_pll = 2,
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE80_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE80(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE80(_MASK)
+};
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -643,6 +668,12 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
if (pool->base.dp_clock_source != NULL)
dce80_clock_source_destroy(&pool->base.dp_clock_source);
@@ -790,9 +821,11 @@ static bool dce80_construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.timing_generator_count = res_cap.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
/*************************************************
* Create resources *
@@ -848,7 +881,25 @@ static bool dce80_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
static_clk_info.max_clocks_state;
@@ -954,9 +1005,11 @@ static bool dce81_construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap_81.num_timing_generator;
+ pool->base.timing_generator_count = res_cap_81.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.is_apu = true;
/*************************************************
* Create resources *
@@ -1012,6 +1065,25 @@ static bool dce81_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
@@ -1118,9 +1190,11 @@ static bool dce83_construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = res_cap_83.num_timing_generator;
+ pool->base.timing_generator_count = res_cap_83.num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.is_apu = true;
/*************************************************
* Create resources *
@@ -1172,6 +1246,25 @@ static bool dce83_construct(
goto res_create_fail;
}
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
pool->base.display_clock->max_clks_state =
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 2658948..3ba4712 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -84,7 +84,7 @@ static const struct dce110_timing_generator_offsets reg_offsets[] = {
#define DCP_REG(reg) (reg + tg110->offsets.dcp)
#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
-void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
+static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
{
uint64_t pix_dur;
uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
@@ -115,6 +115,68 @@ static void program_timing(struct timing_generator *tg,
dce110_tg_program_timing(tg, timing, use_vbios);
}
+static void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
.program_timing = program_timing,
@@ -150,6 +212,8 @@ static const struct timing_generator_funcs dce80_tg_funcs = {
/* DCE8.0 overrides */
.enable_advanced_request =
dce80_timing_generator_enable_advanced_request,
+ .configure_crc = dce110_configure_crc,
+ .get_crc = dce110_get_crc,
};
void dce80_timing_generator_construct(
@@ -176,64 +240,3 @@ void dce80_timing_generator_construct(
tg110->min_h_back_porch = 4;
}
-void dce80_timing_generator_enable_advanced_request(
- struct timing_generator *tg,
- bool enable,
- const struct dc_crtc_timing *timing)
-{
- struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
- uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
- uint32_t value = dm_read_reg(tg->ctx, addr);
-
- if (enable) {
- set_reg_field_value(
- value,
- 0,
- CRTC_START_LINE_CONTROL,
- CRTC_LEGACY_REQUESTOR_EN);
- } else {
- set_reg_field_value(
- value,
- 1,
- CRTC_START_LINE_CONTROL,
- CRTC_LEGACY_REQUESTOR_EN);
- }
-
- if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
- set_reg_field_value(
- value,
- 3,
- CRTC_START_LINE_CONTROL,
- CRTC_ADVANCED_START_LINE_POSITION);
- set_reg_field_value(
- value,
- 0,
- CRTC_START_LINE_CONTROL,
- CRTC_PREFETCH_EN);
- } else {
- set_reg_field_value(
- value,
- 4,
- CRTC_START_LINE_CONTROL,
- CRTC_ADVANCED_START_LINE_POSITION);
- set_reg_field_value(
- value,
- 1,
- CRTC_START_LINE_CONTROL,
- CRTC_PREFETCH_EN);
- }
-
- set_reg_field_value(
- value,
- 1,
- CRTC_START_LINE_CONTROL,
- CRTC_PROGRESSIVE_START_LINE_EARLY);
-
- set_reg_field_value(
- value,
- 1,
- CRTC_START_LINE_CONTROL,
- CRTC_INTERLACE_START_LINE_EARLY);
-
- dm_write_reg(tg->ctx, addr, value);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
index 9cebb24..8ff1b06 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -36,10 +36,4 @@ void dce80_timing_generator_construct(
uint32_t instance,
const struct dce110_timing_generator_offsets *offsets);
-/******** HW programming ************/
-void dce80_timing_generator_enable_advanced_request(
- struct timing_generator *tg,
- bool enable,
- const struct dc_crtc_timing *timing);
-
#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index f565a60..5469bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -23,9 +23,10 @@
# Makefile for DCN.
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
- dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
- dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_hubbub.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7f579cb..881a1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
-
+#include "dc.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "dcn10_cm_common.h"
+#include "custom_float.h"
#define REG(reg) reg
@@ -121,3 +122,450 @@ void cm_helper_program_xfer_func(
}
}
+
+
+
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (fixpoint == true)
+ arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
+ else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
+ return true;
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_REGIONS 32
+#define NUMBER_SW_SEGMENTS 16
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE();
+
+ arr_points = lut_params->arr_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < NUMBER_REGIONS ; i++)
+ seg_distr[i] = 3;
+
+ region_start = -MAX_LOW_POINT;
+ region_end = NUMBER_REGIONS - MAX_LOW_POINT;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+
+ region_start = -10;
+ region_end = 0;
+ }
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(region_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(region_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+ rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
+ rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
+ rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
+ rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red);
+ rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->arr_points,
+ hw_points, fixpoint);
+
+ return true;
+}
+
+#define NUM_DEGAMMA_REGIONS 12
+
+
+bool cm_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE();
+
+ arr_points = lut_params->arr_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ region_start = -NUM_DEGAMMA_REGIONS;
+ region_end = 0;
+
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+ /* 12 segments
+ * segments are from 2^-12 to 0
+ */
+ for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
+ seg_distr[i] = 4;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(region_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(region_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->arr_points,
+ hw_points, false);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 64836dc..7a531b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -96,4 +96,19 @@ void cm_helper_program_xfer_func(
const struct pwl_params *params,
const struct xfer_func_reg *reg);
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint);
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
+bool cm_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params);
+
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index a9d55d0..e305c28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -177,37 +177,17 @@ void dpp_reset(struct dpp *dpp_base)
dpp->filter_h = NULL;
dpp->filter_v = NULL;
- /* set boundary mode to 0 */
- REG_SET(DSCL_CONTROL, 0, SCL_BOUNDARY_MODE, 0);
+ memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
+ memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
}
static void dpp1_cm_set_regamma_pwl(
- struct dpp *dpp_base, const struct pwl_params *params)
-{
- struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
-
- dpp1_cm_power_on_regamma_lut(dpp_base, true);
- dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
-
- if (dpp->is_write_to_ram_a_safe)
- dpp1_cm_program_regamma_luta_settings(dpp_base, params);
- else
- dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
-
- dpp1_cm_program_regamma_lut(
- dpp_base, params->rgb_resulted, params->hw_points_num);
-}
-
-static void dpp1_cm_set_regamma_mode(
- struct dpp *dpp_base,
- enum opp_regamma mode)
+ struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t re_mode = 0;
- uint32_t obuf_bypass = 0; /* need for pipe split */
- uint32_t obuf_hupscale = 0;
switch (mode) {
case OPP_REGAMMA_BYPASS:
@@ -216,21 +196,33 @@ static void dpp1_cm_set_regamma_mode(
case OPP_REGAMMA_SRGB:
re_mode = 1;
break;
- case OPP_REGAMMA_3_6:
+ case OPP_REGAMMA_XVYCC:
re_mode = 2;
break;
case OPP_REGAMMA_USER:
+ re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3;
+ if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0)
+ break;
+
+ dpp1_cm_power_on_regamma_lut(dpp_base, true);
+ dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
+
+ if (dpp->is_write_to_ram_a_safe)
+ dpp1_cm_program_regamma_luta_settings(dpp_base, params);
+ else
+ dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
+
+ dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted,
+ params->hw_points_num);
+ dpp->pwl_data = *params;
+
re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4;
dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe;
break;
default:
break;
}
-
REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode);
- REG_UPDATE_2(OBUF_CONTROL,
- OBUF_BYPASS, obuf_bypass,
- OBUF_H_2X_UPSCALE_EN, obuf_hupscale);
}
static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
@@ -263,8 +255,10 @@ static void dpp1_set_degamma_format_float(
void dpp1_cnv_setup (
struct dpp *dpp_base,
- enum surface_pixel_format input_format,
- enum expansion_mode mode)
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space)
{
uint32_t pixel_format;
uint32_t alpha_en;
@@ -274,8 +268,10 @@ void dpp1_cnv_setup (
bool is_float;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
bool force_disable_cursor = false;
+ struct out_csc_color_matrix tbl_entry;
+ int i = 0;
- dpp1_setup_format_flags(input_format, &fmt);
+ dpp1_setup_format_flags(format, &fmt);
alpha_en = 1;
pixel_format = 0;
color_space = COLOR_SPACE_SRGB;
@@ -305,7 +301,7 @@ void dpp1_cnv_setup (
dpp1_set_degamma_format_float(dpp_base, is_float);
- switch (input_format) {
+ switch (format) {
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
pixel_format = 1;
break;
@@ -361,7 +357,23 @@ void dpp1_cnv_setup (
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
- dpp1_program_input_csc(dpp_base, color_space, select);
+ // if input adjustments exist, program icsc with those values
+
+ if (input_csc_color_matrix.enable_adjustment
+ == true) {
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = input_color_space;
+
+ if (color_space >= COLOR_SPACE_YCBCR601)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_BYPASS;
+
+ dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry);
+ } else
+ dpp1_program_input_csc(dpp_base, color_space, select, NULL);
if (force_disable_cursor) {
REG_UPDATE(CURSOR_CONTROL,
@@ -373,10 +385,9 @@ void dpp1_cnv_setup (
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr)
+ enum dc_cursor_color_format color_format)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- enum dc_cursor_color_format color_format = attr->color_format;
REG_UPDATE_2(CURSOR0_CONTROL,
CUR0_MODE, color_format,
@@ -389,13 +400,6 @@ void dpp1_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
-
- /* TODO: Fixed vs float */
-
- REG_UPDATE_3(FORMAT_CONTROL,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 1,
- FORMAT_EXPANSION_MODE, 0);
}
@@ -420,27 +424,47 @@ void dpp1_set_cursor_position(
}
+void dpp1_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (enable) {
+ if (dpp->tf_mask->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL,
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1);
+ } else
+ REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
- .opp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
- .opp_set_csc_default = dpp1_cm_set_output_csc_default,
- .opp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
- .opp_program_regamma_lut = dpp1_cm_program_regamma_lut,
- .opp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
- .opp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
- .opp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
- .opp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
- .opp_set_regamma_mode = dpp1_cm_set_regamma_mode,
- .ipp_set_degamma = dpp1_set_degamma,
- .ipp_program_input_lut = dpp1_program_input_lut,
- .ipp_program_degamma_pwl = dpp1_set_degamma_pwl,
- .ipp_setup = dpp1_cnv_setup,
- .ipp_full_bypass = dpp1_full_bypass,
+ .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
+ .dpp_set_csc_default = dpp1_cm_set_output_csc_default,
+ .dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
+ .dpp_program_regamma_lut = dpp1_cm_program_regamma_lut,
+ .dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
+ .dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
+ .dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
+ .dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
+ .dpp_program_bias_and_scale = dpp1_program_bias_and_scale,
+ .dpp_set_degamma = dpp1_set_degamma,
+ .dpp_program_input_lut = dpp1_program_input_lut,
+ .dpp_program_degamma_pwl = dpp1_set_degamma_pwl,
+ .dpp_setup = dpp1_cnv_setup,
+ .dpp_full_bypass = dpp1_full_bypass,
.set_cursor_attributes = dpp1_set_cursor_attributes,
.set_cursor_position = dpp1_set_cursor_position,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
};
static struct dpp_caps dcn10_dpp_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 34daf89..17b062a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -54,7 +54,6 @@
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
SRI(SCL_BLACK_OFFSET, DSCL, id), \
- SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
@@ -72,7 +71,6 @@
SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \
SRI(RECOUT_START, DSCL, id), \
SRI(RECOUT_SIZE, DSCL, id), \
- SRI(OBUF_CONTROL, DSCL, id), \
SRI(CM_ICSC_CONTROL, CM, id), \
SRI(CM_ICSC_C11_C12, CM, id), \
SRI(CM_ICSC_C33_C34, CM, id), \
@@ -114,7 +112,9 @@
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
- SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CM_HDR_MULT_COEF, CM, id)
@@ -127,6 +127,9 @@
SRI(CM_OCSC_CONTROL, CM, id), \
SRI(CM_OCSC_C11_C12, CM, id), \
SRI(CM_OCSC_C33_C34, CM, id), \
+ SRI(CM_BNS_VALUES_R, CM, id), \
+ SRI(CM_BNS_VALUES_G, CM, id), \
+ SRI(CM_BNS_VALUES_B, CM, id), \
SRI(CM_MEM_PWR_CTRL, CM, id), \
SRI(CM_RGAM_LUT_DATA, CM, id), \
SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\
@@ -191,7 +194,6 @@
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
- TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
@@ -235,7 +237,6 @@
TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\
TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
- TF_SF(DSCL0_OBUF_CONTROL, OBUF_BYPASS, mask_sh), \
TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \
TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \
TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \
@@ -307,7 +308,9 @@
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
- TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh)
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
TF_REG_LIST_SH_MASK_DCN(mask_sh),\
@@ -329,6 +332,12 @@
TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \
TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \
TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_BIAS_R, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_BIAS_G, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_BIAS_B, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_SCALE_R, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_SCALE_G, mask_sh), \
+ TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_SCALE_B, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \
TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \
@@ -387,7 +396,6 @@
TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \
- TF_SF(DSCL0_OBUF_CONTROL, OBUF_H_2X_UPSCALE_EN, mask_sh), \
TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \
TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \
@@ -406,7 +414,8 @@
TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
- TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh)
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh)
#define TF_REG_FIELD_LIST(type) \
type EXT_OVERSCAN_LEFT; \
@@ -431,7 +440,6 @@
type AUTOCAL_PIPE_ID; \
type SCL_BLACK_OFFSET_RGB_Y; \
type SCL_BLACK_OFFSET_CBCR; \
- type SCL_BOUNDARY_MODE; \
type SCL_V_NUM_TAPS; \
type SCL_H_NUM_TAPS; \
type SCL_V_NUM_TAPS_C; \
@@ -552,8 +560,6 @@
type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_RGAM_LUT_MODE; \
type CM_CMOUT_ROUND_TRUNC_MODE; \
- type OBUF_BYPASS; \
- type OBUF_H_2X_UPSCALE_EN; \
type CM_BLNDGAM_LUT_MODE; \
type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \
type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
@@ -729,8 +735,9 @@
type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_CONFIG_STATUS; \
type CM_BLNDGAM_LUT_INDEX; \
- type CM_BLNDGAM_LUT_DATA; \
+ type BLNDGAM_MEM_PWR_FORCE; \
type CM_3DLUT_MODE; \
type CM_3DLUT_SIZE; \
type CM_3DLUT_INDEX; \
@@ -904,6 +911,7 @@
type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_CONFIG_STATUS; \
type CM_SHAPER_LUT_WRITE_SEL; \
type CM_SHAPER_LUT_INDEX; \
type CM_SHAPER_LUT_DATA; \
@@ -913,6 +921,12 @@
type CM_ICSC_C12; \
type CM_ICSC_C33; \
type CM_ICSC_C34; \
+ type CM_BNS_BIAS_R; \
+ type CM_BNS_BIAS_G; \
+ type CM_BNS_BIAS_B; \
+ type CM_BNS_SCALE_R; \
+ type CM_BNS_SCALE_G; \
+ type CM_BNS_SCALE_B; \
type CM_DGAM_RAMB_EXP_REGION_START_B; \
type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
type CM_DGAM_RAMB_EXP_REGION_START_G; \
@@ -998,257 +1012,260 @@
type CM_BYPASS; \
type FORMAT_CONTROL__ALPHA_EN; \
type CUR0_COLOR0; \
- type CUR0_COLOR1
-
-
+ type CUR0_COLOR1; \
+ type DPPCLK_RATE_CONTROL; \
+ type DPP_CLOCK_ENABLE; \
+ type CM_HDR_MULT_COEF;
struct dcn_dpp_shift {
- TF_REG_FIELD_LIST(uint8_t);
+ TF_REG_FIELD_LIST(uint8_t)
};
struct dcn_dpp_mask {
- TF_REG_FIELD_LIST(uint32_t);
+ TF_REG_FIELD_LIST(uint32_t)
};
-
-
+#define DPP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \
+ uint32_t OTG_H_BLANK; \
+ uint32_t OTG_V_BLANK; \
+ uint32_t SCL_MODE; \
+ uint32_t LB_DATA_FORMAT; \
+ uint32_t LB_MEMORY_CTRL; \
+ uint32_t DSCL_AUTOCAL; \
+ uint32_t SCL_BLACK_OFFSET; \
+ uint32_t SCL_TAP_CONTROL; \
+ uint32_t SCL_COEF_RAM_TAP_SELECT; \
+ uint32_t SCL_COEF_RAM_TAP_DATA; \
+ uint32_t DSCL_2TAP_CONTROL; \
+ uint32_t MPC_SIZE; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_HORZ_FILTER_INIT; \
+ uint32_t SCL_HORZ_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT; \
+ uint32_t SCL_VERT_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C; \
+ uint32_t RECOUT_START; \
+ uint32_t RECOUT_SIZE; \
+ uint32_t CM_GAMUT_REMAP_CONTROL; \
+ uint32_t CM_GAMUT_REMAP_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_C33_C34; \
+ uint32_t CM_COMA_C11_C12; \
+ uint32_t CM_COMA_C33_C34; \
+ uint32_t CM_COMB_C11_C12; \
+ uint32_t CM_COMB_C33_C34; \
+ uint32_t CM_OCSC_CONTROL; \
+ uint32_t CM_OCSC_C11_C12; \
+ uint32_t CM_OCSC_C33_C34; \
+ uint32_t CM_MEM_PWR_CTRL; \
+ uint32_t CM_RGAM_LUT_DATA; \
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_RGAM_LUT_INDEX; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMB_REGION_0_1; \
+ uint32_t CM_RGAM_RAMB_REGION_32_33; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMA_REGION_0_1; \
+ uint32_t CM_RGAM_RAMA_REGION_32_33; \
+ uint32_t CM_RGAM_CONTROL; \
+ uint32_t CM_CMOUT_CONTROL; \
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_BLNDGAM_CONTROL; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \
+ uint32_t CM_BLNDGAM_LUT_INDEX; \
+ uint32_t CM_3DLUT_MODE; \
+ uint32_t CM_3DLUT_INDEX; \
+ uint32_t CM_3DLUT_DATA; \
+ uint32_t CM_3DLUT_DATA_30BIT; \
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL; \
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \
+ uint32_t CM_SHAPER_CONTROL; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMB_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMB_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMB_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMB_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMB_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMB_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMB_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMB_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMB_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMB_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMB_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMB_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMB_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMB_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMB_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMB_REGION_32_33; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMA_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMA_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMA_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMA_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMA_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMA_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMA_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMA_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMA_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMA_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMA_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMA_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMA_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMA_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMA_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMA_REGION_32_33; \
+ uint32_t CM_SHAPER_LUT_INDEX; \
+ uint32_t CM_SHAPER_LUT_DATA; \
+ uint32_t CM_ICSC_CONTROL; \
+ uint32_t CM_ICSC_C11_C12; \
+ uint32_t CM_ICSC_C33_C34; \
+ uint32_t CM_BNS_VALUES_R; \
+ uint32_t CM_BNS_VALUES_G; \
+ uint32_t CM_BNS_VALUES_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMB_REGION_0_1; \
+ uint32_t CM_DGAM_RAMB_REGION_14_15; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMA_REGION_0_1; \
+ uint32_t CM_DGAM_RAMA_REGION_14_15; \
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_DGAM_LUT_INDEX; \
+ uint32_t CM_DGAM_LUT_DATA; \
+ uint32_t CM_CONTROL; \
+ uint32_t CM_DGAM_CONTROL; \
+ uint32_t CM_IGAM_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_INDEX; \
+ uint32_t CM_IGAM_LUT_SEQ_COLOR; \
+ uint32_t FORMAT_CONTROL; \
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR0_CONTROL; \
+ uint32_t CURSOR0_COLOR0; \
+ uint32_t CURSOR0_COLOR1; \
+ uint32_t DPP_CONTROL; \
+ uint32_t CM_HDR_MULT_COEF;
struct dcn_dpp_registers {
- uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
- uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
- uint32_t OTG_H_BLANK;
- uint32_t OTG_V_BLANK;
- uint32_t SCL_MODE;
- uint32_t LB_DATA_FORMAT;
- uint32_t LB_MEMORY_CTRL;
- uint32_t DSCL_AUTOCAL;
- uint32_t SCL_BLACK_OFFSET;
- uint32_t DSCL_CONTROL;
- uint32_t SCL_TAP_CONTROL;
- uint32_t SCL_COEF_RAM_TAP_SELECT;
- uint32_t SCL_COEF_RAM_TAP_DATA;
- uint32_t DSCL_2TAP_CONTROL;
- uint32_t MPC_SIZE;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
- uint32_t SCL_HORZ_FILTER_INIT;
- uint32_t SCL_HORZ_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT;
- uint32_t SCL_VERT_FILTER_INIT_BOT;
- uint32_t SCL_VERT_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT_BOT_C;
- uint32_t RECOUT_START;
- uint32_t RECOUT_SIZE;
- uint32_t CM_GAMUT_REMAP_CONTROL;
- uint32_t CM_GAMUT_REMAP_C11_C12;
- uint32_t CM_GAMUT_REMAP_C33_C34;
- uint32_t CM_COMA_C11_C12;
- uint32_t CM_COMA_C33_C34;
- uint32_t CM_COMB_C11_C12;
- uint32_t CM_COMB_C33_C34;
- uint32_t CM_OCSC_CONTROL;
- uint32_t CM_OCSC_C11_C12;
- uint32_t CM_OCSC_C33_C34;
- uint32_t CM_MEM_PWR_CTRL;
- uint32_t CM_RGAM_LUT_DATA;
- uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_RGAM_LUT_INDEX;
- uint32_t CM_RGAM_RAMB_START_CNTL_B;
- uint32_t CM_RGAM_RAMB_START_CNTL_G;
- uint32_t CM_RGAM_RAMB_START_CNTL_R;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMB_END_CNTL1_B;
- uint32_t CM_RGAM_RAMB_END_CNTL2_B;
- uint32_t CM_RGAM_RAMB_END_CNTL1_G;
- uint32_t CM_RGAM_RAMB_END_CNTL2_G;
- uint32_t CM_RGAM_RAMB_END_CNTL1_R;
- uint32_t CM_RGAM_RAMB_END_CNTL2_R;
- uint32_t CM_RGAM_RAMB_REGION_0_1;
- uint32_t CM_RGAM_RAMB_REGION_32_33;
- uint32_t CM_RGAM_RAMA_START_CNTL_B;
- uint32_t CM_RGAM_RAMA_START_CNTL_G;
- uint32_t CM_RGAM_RAMA_START_CNTL_R;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMA_END_CNTL1_B;
- uint32_t CM_RGAM_RAMA_END_CNTL2_B;
- uint32_t CM_RGAM_RAMA_END_CNTL1_G;
- uint32_t CM_RGAM_RAMA_END_CNTL2_G;
- uint32_t CM_RGAM_RAMA_END_CNTL1_R;
- uint32_t CM_RGAM_RAMA_END_CNTL2_R;
- uint32_t CM_RGAM_RAMA_REGION_0_1;
- uint32_t CM_RGAM_RAMA_REGION_32_33;
- uint32_t CM_RGAM_CONTROL;
- uint32_t CM_CMOUT_CONTROL;
- uint32_t OBUF_CONTROL;
- uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_BLNDGAM_CONTROL;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
- uint32_t CM_BLNDGAM_LUT_INDEX;
- uint32_t CM_BLNDGAM_LUT_DATA;
- uint32_t CM_3DLUT_MODE;
- uint32_t CM_3DLUT_INDEX;
- uint32_t CM_3DLUT_DATA;
- uint32_t CM_3DLUT_DATA_30BIT;
- uint32_t CM_3DLUT_READ_WRITE_CONTROL;
- uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
- uint32_t CM_SHAPER_CONTROL;
- uint32_t CM_SHAPER_RAMB_START_CNTL_B;
- uint32_t CM_SHAPER_RAMB_START_CNTL_G;
- uint32_t CM_SHAPER_RAMB_START_CNTL_R;
- uint32_t CM_SHAPER_RAMB_END_CNTL_B;
- uint32_t CM_SHAPER_RAMB_END_CNTL_G;
- uint32_t CM_SHAPER_RAMB_END_CNTL_R;
- uint32_t CM_SHAPER_RAMB_REGION_0_1;
- uint32_t CM_SHAPER_RAMB_REGION_2_3;
- uint32_t CM_SHAPER_RAMB_REGION_4_5;
- uint32_t CM_SHAPER_RAMB_REGION_6_7;
- uint32_t CM_SHAPER_RAMB_REGION_8_9;
- uint32_t CM_SHAPER_RAMB_REGION_10_11;
- uint32_t CM_SHAPER_RAMB_REGION_12_13;
- uint32_t CM_SHAPER_RAMB_REGION_14_15;
- uint32_t CM_SHAPER_RAMB_REGION_16_17;
- uint32_t CM_SHAPER_RAMB_REGION_18_19;
- uint32_t CM_SHAPER_RAMB_REGION_20_21;
- uint32_t CM_SHAPER_RAMB_REGION_22_23;
- uint32_t CM_SHAPER_RAMB_REGION_24_25;
- uint32_t CM_SHAPER_RAMB_REGION_26_27;
- uint32_t CM_SHAPER_RAMB_REGION_28_29;
- uint32_t CM_SHAPER_RAMB_REGION_30_31;
- uint32_t CM_SHAPER_RAMB_REGION_32_33;
- uint32_t CM_SHAPER_RAMA_START_CNTL_B;
- uint32_t CM_SHAPER_RAMA_START_CNTL_G;
- uint32_t CM_SHAPER_RAMA_START_CNTL_R;
- uint32_t CM_SHAPER_RAMA_END_CNTL_B;
- uint32_t CM_SHAPER_RAMA_END_CNTL_G;
- uint32_t CM_SHAPER_RAMA_END_CNTL_R;
- uint32_t CM_SHAPER_RAMA_REGION_0_1;
- uint32_t CM_SHAPER_RAMA_REGION_2_3;
- uint32_t CM_SHAPER_RAMA_REGION_4_5;
- uint32_t CM_SHAPER_RAMA_REGION_6_7;
- uint32_t CM_SHAPER_RAMA_REGION_8_9;
- uint32_t CM_SHAPER_RAMA_REGION_10_11;
- uint32_t CM_SHAPER_RAMA_REGION_12_13;
- uint32_t CM_SHAPER_RAMA_REGION_14_15;
- uint32_t CM_SHAPER_RAMA_REGION_16_17;
- uint32_t CM_SHAPER_RAMA_REGION_18_19;
- uint32_t CM_SHAPER_RAMA_REGION_20_21;
- uint32_t CM_SHAPER_RAMA_REGION_22_23;
- uint32_t CM_SHAPER_RAMA_REGION_24_25;
- uint32_t CM_SHAPER_RAMA_REGION_26_27;
- uint32_t CM_SHAPER_RAMA_REGION_28_29;
- uint32_t CM_SHAPER_RAMA_REGION_30_31;
- uint32_t CM_SHAPER_RAMA_REGION_32_33;
- uint32_t CM_SHAPER_LUT_INDEX;
- uint32_t CM_SHAPER_LUT_DATA;
- uint32_t CM_ICSC_CONTROL;
- uint32_t CM_ICSC_C11_C12;
- uint32_t CM_ICSC_C33_C34;
- uint32_t CM_DGAM_RAMB_START_CNTL_B;
- uint32_t CM_DGAM_RAMB_START_CNTL_G;
- uint32_t CM_DGAM_RAMB_START_CNTL_R;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMB_END_CNTL1_B;
- uint32_t CM_DGAM_RAMB_END_CNTL2_B;
- uint32_t CM_DGAM_RAMB_END_CNTL1_G;
- uint32_t CM_DGAM_RAMB_END_CNTL2_G;
- uint32_t CM_DGAM_RAMB_END_CNTL1_R;
- uint32_t CM_DGAM_RAMB_END_CNTL2_R;
- uint32_t CM_DGAM_RAMB_REGION_0_1;
- uint32_t CM_DGAM_RAMB_REGION_14_15;
- uint32_t CM_DGAM_RAMA_START_CNTL_B;
- uint32_t CM_DGAM_RAMA_START_CNTL_G;
- uint32_t CM_DGAM_RAMA_START_CNTL_R;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMA_END_CNTL1_B;
- uint32_t CM_DGAM_RAMA_END_CNTL2_B;
- uint32_t CM_DGAM_RAMA_END_CNTL1_G;
- uint32_t CM_DGAM_RAMA_END_CNTL2_G;
- uint32_t CM_DGAM_RAMA_END_CNTL1_R;
- uint32_t CM_DGAM_RAMA_END_CNTL2_R;
- uint32_t CM_DGAM_RAMA_REGION_0_1;
- uint32_t CM_DGAM_RAMA_REGION_14_15;
- uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_DGAM_LUT_INDEX;
- uint32_t CM_DGAM_LUT_DATA;
- uint32_t CM_CONTROL;
- uint32_t CM_DGAM_CONTROL;
- uint32_t CM_IGAM_CONTROL;
- uint32_t CM_IGAM_LUT_RW_CONTROL;
- uint32_t CM_IGAM_LUT_RW_INDEX;
- uint32_t CM_IGAM_LUT_SEQ_COLOR;
- uint32_t FORMAT_CONTROL;
- uint32_t CNVC_SURFACE_PIXEL_FORMAT;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR0_CONTROL;
- uint32_t CURSOR0_COLOR0;
- uint32_t CURSOR0_COLOR1;
+ DPP_COMMON_REG_VARIABLE_LIST
};
struct dcn10_dpp {
@@ -1266,6 +1283,8 @@ struct dcn10_dpp {
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
};
enum dcn10_input_csc_select {
@@ -1274,6 +1293,16 @@ enum dcn10_input_csc_select {
INPUT_CSC_SELECT_COMA
};
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ enum dc_cursor_color_format color_format);
+
+void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
@@ -1310,7 +1339,12 @@ void dpp1_power_on_degamma_lut(
void dpp1_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
- enum dcn10_input_csc_select select);
+ enum dcn10_input_csc_select select,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp1_program_bias_and_scale(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *params);
void dpp1_program_input_lut(
struct dpp *dpp_base,
@@ -1356,7 +1390,7 @@ void dpp1_cm_program_regamma_lutb_settings(
const struct pwl_params *params);
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
@@ -1372,11 +1406,22 @@ void dpp1_dscl_set_scaler_manual_scale(
void dpp1_cnv_setup (
struct dpp *dpp_base,
- enum surface_pixel_format input_format,
- enum expansion_mode mode);
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space);
void dpp1_full_bypass(struct dpp *dpp_base);
+void dpp1_dppclk_control(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable);
+
+void dpp1_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index ed1216b..fb32975e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -49,6 +49,8 @@
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
struct dcn10_input_csc_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
@@ -117,8 +119,6 @@ static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = {
0x2568, 0x43ee, 0xdbb2} }
};
-
-
static void program_gamut_remap(
struct dcn10_dpp *dpp,
const uint16_t *regval,
@@ -193,6 +193,7 @@ void dpp1_cm_set_gamut_remap(
const struct dpp_grph_csc_adjustment *adjust)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int i = 0;
if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
/* Bypass if type is bypass or hw */
@@ -201,20 +202,8 @@ void dpp1_cm_set_gamut_remap(
struct fixed31_32 arr_matrix[12];
uint16_t arr_reg_val[12];
- arr_matrix[0] = adjust->temperature_matrix[0];
- arr_matrix[1] = adjust->temperature_matrix[1];
- arr_matrix[2] = adjust->temperature_matrix[2];
- arr_matrix[3] = dal_fixed31_32_zero;
-
- arr_matrix[4] = adjust->temperature_matrix[3];
- arr_matrix[5] = adjust->temperature_matrix[4];
- arr_matrix[6] = adjust->temperature_matrix[5];
- arr_matrix[7] = dal_fixed31_32_zero;
-
- arr_matrix[8] = adjust->temperature_matrix[6];
- arr_matrix[9] = adjust->temperature_matrix[7];
- arr_matrix[10] = adjust->temperature_matrix[8];
- arr_matrix[11] = dal_fixed31_32_zero;
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
convert_float_matrix(
arr_reg_val, arr_matrix, 12);
@@ -223,39 +212,63 @@ void dpp1_cm_set_gamut_remap(
}
}
+static void dpp1_cm_program_color_matrix(
+ struct dcn10_dpp *dpp,
+ const uint16_t *regval)
+{
+ uint32_t mode;
+ struct color_matrices_reg gam_regs;
+
+ REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ mode = 4;
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
+
+ if (mode == 4) {
+
+ gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
enum dc_color_space colorspace)
{
-
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- uint32_t ocsc_mode = 0;
+ const uint16_t *regval = NULL;
+ int arr_size;
+ uint32_t ocsc_mode = 4;
- switch (colorspace) {
- case COLOR_SPACE_SRGB:
- case COLOR_SPACE_2020_RGB_FULLRANGE:
- ocsc_mode = 0;
- break;
- case COLOR_SPACE_SRGB_LIMITED:
- case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- ocsc_mode = 1;
- break;
- case COLOR_SPACE_YCBCR601:
- case COLOR_SPACE_YCBCR601_LIMITED:
- ocsc_mode = 2;
- break;
- case COLOR_SPACE_YCBCR709:
- case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
- ocsc_mode = 3;
- break;
- case COLOR_SPACE_UNKNOWN:
- default:
- break;
+ regval = find_color_matrix(colorspace, &arr_size);
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
}
-
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
-
}
static void dpp1_cm_get_reg_field(
@@ -285,114 +298,67 @@ static void dpp1_cm_get_reg_field(
reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
}
-static void dpp1_cm_program_color_matrix(
+static void dpp1_cm_get_degamma_reg_field(
struct dcn10_dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry)
+ struct xfer_func_reg *reg)
{
- uint32_t mode;
- struct color_matrices_reg gam_regs;
-
- REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
-
- if (tbl_entry == NULL) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
- gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
- gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
- gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
-
- if (mode == 4) {
-
- gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
- gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
-
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- tbl_entry->regval,
- &gam_regs);
-
- } else {
-
- gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
- gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
-
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- tbl_entry->regval,
- &gam_regs);
- }
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
}
-
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry)
+ const uint16_t *regval)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
uint32_t ocsc_mode = 4;
-
- /**
- *if (tbl_entry != NULL) {
- * switch (tbl_entry->color_space) {
- * case COLOR_SPACE_SRGB:
- * case COLOR_SPACE_2020_RGB_FULLRANGE:
- * ocsc_mode = 0;
- * break;
- * case COLOR_SPACE_SRGB_LIMITED:
- * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- * ocsc_mode = 1;
- * break;
- * case COLOR_SPACE_YCBCR601:
- * case COLOR_SPACE_YCBCR601_LIMITED:
- * ocsc_mode = 2;
- * break;
- * case COLOR_SPACE_YCBCR709:
- * case COLOR_SPACE_YCBCR709_LIMITED:
- * case COLOR_SPACE_2020_YCBCR:
- * ocsc_mode = 3;
- * break;
- * case COLOR_SPACE_UNKNOWN:
- * default:
- * break;
- * }
- *}
- */
-
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
- dpp1_cm_program_color_matrix(dpp, tbl_entry);
}
-void dpp1_cm_power_on_regamma_lut(
- struct dpp *dpp_base,
- bool power_on)
+void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
+ bool power_on)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
REG_SET(CM_MEM_PWR_CTRL, 0,
- RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+ RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
}
-void dpp1_cm_program_regamma_lut(
- struct dpp *dpp_base,
- const struct pwl_result_data *rgb,
- uint32_t num)
+void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
{
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
for (i = 0 ; i < num; i++) {
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
- REG_SET(CM_RGAM_LUT_DATA, 0,
- CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
- REG_SET(CM_RGAM_LUT_DATA, 0,
- CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
- REG_SET(CM_RGAM_LUT_DATA, 0,
- CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
}
@@ -471,7 +437,8 @@ void dpp1_cm_program_regamma_lutb_settings(
void dpp1_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
- enum dcn10_input_csc_select select)
+ enum dcn10_input_csc_select select,
+ const struct out_csc_color_matrix *tbl_entry)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
int i;
@@ -485,15 +452,19 @@ void dpp1_program_input_csc(
return;
}
- for (i = 0; i < arr_size; i++)
- if (dcn10_input_csc_matrix[i].color_space == color_space) {
- regval = dcn10_input_csc_matrix[i].regval;
- break;
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dcn10_input_csc_matrix[i].color_space == color_space) {
+ regval = dcn10_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
}
-
- if (regval == NULL) {
- BREAK_TO_DEBUGGER();
- return;
+ } else {
+ regval = tbl_entry->regval;
}
if (select == INPUT_CSC_SELECT_COMA)
@@ -528,6 +499,27 @@ void dpp1_program_input_csc(
}
}
+//keep here for now, decide multi dce support later
+void dpp1_program_bias_and_scale(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_SET_2(CM_BNS_VALUES_R, 0,
+ CM_BNS_SCALE_R, params->scale_red,
+ CM_BNS_BIAS_R, params->bias_red);
+
+ REG_SET_2(CM_BNS_VALUES_G, 0,
+ CM_BNS_SCALE_G, params->scale_green,
+ CM_BNS_BIAS_G, params->bias_green);
+
+ REG_SET_2(CM_BNS_VALUES_B, 0,
+ CM_BNS_SCALE_B, params->scale_blue,
+ CM_BNS_BIAS_B, params->bias_blue);
+
+}
+
/*program de gamma RAM B*/
void dpp1_program_degamma_lutb_settings(
struct dpp *dpp_base,
@@ -536,7 +528,7 @@ void dpp1_program_degamma_lutb_settings(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
- dpp1_cm_get_reg_field(dpp, &gam_regs);
+ dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
@@ -565,7 +557,7 @@ void dpp1_program_degamma_luta_settings(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
struct xfer_func_reg gam_regs;
- dpp1_cm_get_reg_field(dpp, &gam_regs);
+ dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
@@ -812,3 +804,12 @@ void dpp1_program_input_lut(
REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
}
+
+void dpp1_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index cbad3641..3eb824d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -648,6 +648,13 @@ void dpp1_dscl_set_scaler_manual_scale(
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+ if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
+ return;
+
+ PERF_TRACE();
+
+ dpp->scl_data = *scl_data;
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
@@ -699,4 +706,5 @@ void dpp1_dscl_set_scaler_manual_scale(
SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+ PERF_TRACE();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
new file mode 100644
index 0000000..738f67f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_hubp.h"
+#include "dcn10_hubbub.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hubbub->ctx
+#define DC_LOGGER \
+ hubbub->ctx->logger
+#define REG(reg)\
+ hubbub->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub->shifts->field_name, hubbub->masks->field_name
+
+void hubbub1_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn_hubbub_wm_set *s;
+
+ memset(wm, 0, sizeof(struct dcn_hubbub_wm));
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
+ }
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
+ }
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
+ }
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
+ }
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+}
+
+bool hubbub1_verify_allow_pstate_change_high(
+ struct hubbub *hubbub)
+{
+ /* pstate latency is ~20us so if we wait over 40us and pstate allow
+ * still not asserted, we are probably stuck and going to hang
+ *
+ * TODO: Figure out why it takes ~100us on linux
+ * pstate takes around ~100us on linux. Unknown currently as to
+ * why it takes that long on linux
+ */
+ static unsigned int pstate_wait_timeout_us = 200;
+ static unsigned int pstate_wait_expected_timeout_us = 40;
+ static unsigned int max_sampled_pstate_wait_us; /* data collection */
+ static bool forced_pstate_allow; /* help with revert wa */
+
+ unsigned int debug_data;
+ unsigned int i;
+
+ if (forced_pstate_allow) {
+ /* we hacked to force pstate allow to prevent hang last time
+ * we verify_allow_pstate_change_high. so disable force
+ * here so we can check status
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
+ forced_pstate_allow = false;
+ }
+
+ /* RV1:
+ * dchubbubdebugind, at: 0x7
+ * description "3-0: Pipe0 cursor0 QOS
+ * 7-4: Pipe1 cursor0 QOS
+ * 11-8: Pipe2 cursor0 QOS
+ * 15-12: Pipe3 cursor0 QOS
+ * 16: Pipe0 Plane0 Allow Pstate Change
+ * 17: Pipe1 Plane0 Allow Pstate Change
+ * 18: Pipe2 Plane0 Allow Pstate Change
+ * 19: Pipe3 Plane0 Allow Pstate Change
+ * 20: Pipe0 Plane1 Allow Pstate Change
+ * 21: Pipe1 Plane1 Allow Pstate Change
+ * 22: Pipe2 Plane1 Allow Pstate Change
+ * 23: Pipe3 Plane1 Allow Pstate Change
+ * 24: Pipe0 cursor0 Allow Pstate Change
+ * 25: Pipe1 cursor0 Allow Pstate Change
+ * 26: Pipe2 cursor0 Allow Pstate Change
+ * 27: Pipe3 cursor0 Allow Pstate Change
+ * 28: WB0 Allow Pstate Change
+ * 29: WB1 Allow Pstate Change
+ * 30: Arbiter's allow_pstate_change
+ * 31: SOC pstate change request
+ */
+
+
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+
+ if (debug_data & (1 << 30)) {
+
+ if (i > pstate_wait_expected_timeout_us)
+ DC_LOG_WARNING("pstate took longer than expected ~%dus\n",
+ i);
+
+ return true;
+ }
+ if (max_sampled_pstate_wait_us < i)
+ max_sampled_pstate_wait_us = i;
+
+ udelay(1);
+ }
+
+ /* force pstate allow to prevent system hang
+ * and break to debugger to investigate
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
+ forced_pstate_allow = true;
+
+ DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
+ debug_data);
+
+ return false;
+}
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+
+void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz)
+{
+ uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ uint32_t prog_wm_value;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+
+ /* clock state B */
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state C */
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state D */
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+
+ REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
+
+#if 0
+ REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+#endif
+}
+
+void hubbub1_update_dchub(
+ struct hubbub *hubbub,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
+ SDPIF_FB_TOP, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
+ SDPIF_FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, 0X03FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
+{
+ uint32_t watermark_change_req;
+
+ REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
+
+ if (watermark_change_req)
+ watermark_change_req = 0;
+ else
+ watermark_change_req = 1;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+}
+
+static const struct hubbub_funcs hubbub1_funcs = {
+ .update_dchub = hubbub1_update_dchub
+};
+
+void hubbub1_construct(struct hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub->ctx = ctx;
+
+ hubbub->funcs = &hubbub1_funcs;
+
+ hubbub->regs = hubbub_regs;
+ hubbub->shifts = hubbub_shift;
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0x7;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
new file mode 100644
index 0000000..a16e908
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBBUB_DCN10_H__
+#define __DC_HUBBUB_DCN10_H__
+
+#include "core_types.h"
+
+#define HUBHUB_REG_LIST_DCN()\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
+ SR(DCHUBBUB_ARB_SAT_LEVEL),\
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA)
+
+#define HUBBUB_SR_WATERMARK_REG_LIST()\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
+
+#define HUBBUB_REG_LIST_DCN10(id)\
+ HUBHUB_REG_LIST_DCN(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_SDPIF_FB_TOP),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SR(DCHUBBUB_SDPIF_AGP_BASE),\
+ SR(DCHUBBUB_SDPIF_AGP_BOT),\
+ SR(DCHUBBUB_SDPIF_AGP_TOP)
+
+struct dcn_hubbub_registers {
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
+ uint32_t DCHUBBUB_ARB_SAT_LEVEL;
+ uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+ uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
+ uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
+ uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
+ uint32_t DCHUBBUB_TEST_DEBUG_DATA;
+ uint32_t DCHUBBUB_SDPIF_FB_TOP;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCHUBBUB_SDPIF_AGP_BASE;
+ uint32_t DCHUBBUB_SDPIF_AGP_BOT;
+ uint32_t DCHUBBUB_SDPIF_AGP_TOP;
+ uint32_t DCHUBBUB_CRC_CTRL;
+};
+
+/* set field name */
+#define HUBBUB_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh)
+
+#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh)
+
+#define DCN_HUBBUB_REG_FIELD_LIST(type) \
+ type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_SAT_LEVEL;\
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV;\
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP
+
+
+struct dcn_hubbub_shift {
+ DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_hubbub_mask {
+ DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
+};
+
+struct dc;
+
+struct dcn_hubbub_wm_set {
+ uint32_t wm_set;
+ uint32_t data_urgent;
+ uint32_t pte_meta_urgent;
+ uint32_t sr_enter;
+ uint32_t sr_exit;
+ uint32_t dram_clk_chanage;
+};
+
+struct dcn_hubbub_wm {
+ struct dcn_hubbub_wm_set sets[4];
+};
+
+struct hubbub_funcs {
+ void (*update_dchub)(
+ struct hubbub *hubbub,
+ struct dchub_init_data *dh_data);
+};
+
+struct hubbub {
+ const struct hubbub_funcs *funcs;
+ struct dc_context *ctx;
+ const struct dcn_hubbub_registers *regs;
+ const struct dcn_hubbub_shift *shifts;
+ const struct dcn_hubbub_mask *masks;
+ unsigned int debug_test_index_pstate;
+};
+
+void hubbub1_update_dchub(
+ struct hubbub *hubbub,
+ struct dchub_init_data *dh_data);
+
+bool hubbub1_verify_allow_pstate_change_high(
+ struct hubbub *hubbub);
+
+void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz);
+
+void hubbub1_toggle_watermark_change_req(
+ struct hubbub *hubbub);
+
+void hubbub1_wm_read_state(struct hubbub *hubbub,
+ struct dcn_hubbub_wm *wm);
+
+void hubbub1_construct(struct hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index b13dee6..39b72f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -29,14 +29,14 @@
#include "dcn10_hubp.h"
#define REG(reg)\
- hubp1->mi_regs->reg
+ hubp1->hubp_regs->reg
#define CTX \
hubp1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
- hubp1->mi_shift->field_name, hubp1->mi_mask->field_name
+ hubp1->hubp_shift->field_name, hubp1->hubp_mask->field_name
void hubp1_set_blank(struct hubp *hubp, bool blank)
{
@@ -48,14 +48,36 @@ void hubp1_set_blank(struct hubp *hubp, bool blank)
HUBP_TTU_DISABLE, blank_en);
if (blank) {
- REG_WAIT(DCHUBP_CNTL,
- HUBP_NO_OUTSTANDING_REQ, 1,
- 1, 200);
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ }
+
hubp->mpcc_id = 0xf;
hubp->opp_id = 0xf;
}
}
+static void hubp1_disconnect(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL,
+ HUBP_TTU_DISABLE, 1);
+
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+}
+
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -88,10 +110,12 @@ static void hubp1_vready_workaround(struct hubp *hubp,
}
void hubp1_program_tiling(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
NUM_BANKS, log_2(info->gfx9.num_banks),
@@ -108,13 +132,14 @@ void hubp1_program_tiling(
}
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
/* Program data and meta surface pitch (calculation from addrlib)
@@ -170,9 +195,10 @@ void hubp1_program_size_and_rotation(
}
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum surface_pixel_format format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
@@ -273,8 +299,9 @@ bool hubp1_program_surface_flip_and_addr(
if (address->grph.addr.quad_part == 0)
break;
- REG_UPDATE(DCSURF_SURFACE_CONTROL,
- PRIMARY_SURFACE_TMZ, address->tmz_surface);
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
if (address->grph.meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
@@ -299,8 +326,11 @@ bool hubp1_program_surface_flip_and_addr(
|| address->video_progressive.chroma_addr.quad_part == 0)
break;
- REG_UPDATE(DCSURF_SURFACE_CONTROL,
- PRIMARY_SURFACE_TMZ, address->tmz_surface);
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->video_progressive.luma_meta_addr.quad_part != 0) {
REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
@@ -342,8 +372,11 @@ bool hubp1_program_surface_flip_and_addr(
if (address->grph_stereo.right_addr.quad_part == 0)
break;
- REG_UPDATE(DCSURF_SURFACE_CONTROL,
- PRIMARY_SURFACE_TMZ, address->tmz_surface);
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
if (address->grph_stereo.right_meta_addr.quad_part != 0) {
@@ -416,13 +449,11 @@ void hubp1_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
- struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
- hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size_and_rotation(
- hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
- hubp1_program_pixel_format(hubp1, format);
+ hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp, format);
}
void hubp1_program_requestor(
@@ -757,42 +788,7 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
}
-enum cursor_pitch {
- CURSOR_PITCH_64_PIXELS = 0,
- CURSOR_PITCH_128_PIXELS,
- CURSOR_PITCH_256_PIXELS
-};
-
-enum cursor_lines_per_chunk {
- CURSOR_LINE_PER_CHUNK_2 = 1,
- CURSOR_LINE_PER_CHUNK_4,
- CURSOR_LINE_PER_CHUNK_8,
- CURSOR_LINE_PER_CHUNK_16
-};
-
-static bool ippn10_cursor_program_control(
- struct dcn10_hubp *hubp1,
- bool pixel_data_invert,
- enum dc_cursor_color_format color_format)
-{
- if (REG(CURSOR_SETTINS))
- REG_SET_2(CURSOR_SETTINS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
- else
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
-
- return true;
-}
-
-static enum cursor_pitch ippn10_get_cursor_pitch(
- unsigned int pitch)
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
{
enum cursor_pitch hw_pitch;
@@ -815,7 +811,7 @@ static enum cursor_pitch ippn10_get_cursor_pitch(
return hw_pitch;
}
-static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+static enum cursor_lines_per_chunk hubp1_get_lines_per_chunk(
unsigned int cur_width,
enum dc_cursor_color_format format)
{
@@ -841,8 +837,8 @@ void hubp1_cursor_set_attributes(
const struct dc_cursor_attributes *attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
- enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
attr->width, attr->color_format);
hubp->curs_attr = *attr;
@@ -855,13 +851,17 @@ void hubp1_cursor_set_attributes(
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
+
REG_UPDATE_3(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
- ippn10_cursor_program_control(hubp1,
- attr->attribute_flags.bits.INVERT_PIXEL_DATA,
- attr->color_format);
+
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
}
void hubp1_cursor_set_position(
@@ -901,7 +901,8 @@ void hubp1_cursor_set_position(
cur_en = 0; /* not visible beyond left edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
@@ -918,6 +919,21 @@ void hubp1_cursor_set_position(
/* TODO Handle surface pixel formats other than 4:4:4 */
}
+void hubp1_clk_cntl(struct hubp *hubp, bool enable)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t clk_enable = enable ? 1 : 0;
+
+ REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable);
+}
+
+void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
+}
+
static struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -933,6 +949,9 @@ static struct hubp_funcs dcn10_hubp_funcs = {
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
+ .hubp_disconnect = hubp1_disconnect,
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
};
/*****************************************/
@@ -943,15 +962,15 @@ void dcn10_hubp_construct(
struct dcn10_hubp *hubp1,
struct dc_context *ctx,
uint32_t inst,
- const struct dcn_mi_registers *mi_regs,
- const struct dcn_mi_shift *mi_shift,
- const struct dcn_mi_mask *mi_mask)
+ const struct dcn_mi_registers *hubp_regs,
+ const struct dcn_mi_shift *hubp_shift,
+ const struct dcn_mi_mask *hubp_mask)
{
hubp1->base.funcs = &dcn10_hubp_funcs;
hubp1->base.ctx = ctx;
- hubp1->mi_regs = mi_regs;
- hubp1->mi_shift = mi_shift;
- hubp1->mi_mask = mi_mask;
+ hubp1->hubp_regs = hubp_regs;
+ hubp1->hubp_shift = hubp_shift;
+ hubp1->hubp_mask = hubp_mask;
hubp1->base.inst = inst;
hubp1->base.opp_id = 0xf;
hubp1->base.mpcc_id = 0xf;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 66db453..4a3703e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -30,7 +30,7 @@
#define TO_DCN10_HUBP(hubp)\
container_of(hubp, struct dcn10_hubp, base)
-#define MI_REG_LIST_DCN(id)\
+#define HUBP_REG_LIST_DCN(id)\
SRI(DCHUBP_CNTL, HUBP, id),\
SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
@@ -96,10 +96,11 @@
SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
- SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+ SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\
+ SRI(HUBP_CLK_CNTL, HUBP, id)
-#define MI_REG_LIST_DCN10(id)\
- MI_REG_LIST_DCN(id),\
+#define HUBP_REG_LIST_DCN10(id)\
+ HUBP_REG_LIST_DCN(id),\
SRI(PREFETCH_SETTINS, HUBPREQ, id),\
SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
@@ -127,280 +128,284 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
-
-
-struct dcn_mi_registers {
- uint32_t DCHUBP_CNTL;
- uint32_t HUBPREQ_DEBUG_DB;
- uint32_t DCSURF_ADDR_CONFIG;
- uint32_t DCSURF_TILING_CONFIG;
- uint32_t DCSURF_SURFACE_PITCH;
- uint32_t DCSURF_SURFACE_PITCH_C;
- uint32_t DCSURF_SURFACE_CONFIG;
- uint32_t DCSURF_FLIP_CONTROL;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
- uint32_t DCSURF_PRI_VIEWPORT_START;
- uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
- uint32_t DCSURF_SEC_VIEWPORT_START;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
- uint32_t DCSURF_PRI_VIEWPORT_START_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
- uint32_t DCSURF_SURFACE_INUSE;
- uint32_t DCSURF_SURFACE_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_INUSE_C;
- uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_CONTROL;
- uint32_t HUBPRET_CONTROL;
- uint32_t DCN_EXPANSION_MODE;
- uint32_t DCHUBP_REQ_SIZE_CONFIG;
- uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
- uint32_t BLANK_OFFSET_0;
- uint32_t BLANK_OFFSET_1;
- uint32_t DST_DIMENSIONS;
- uint32_t DST_AFTER_SCALER;
- uint32_t PREFETCH_SETTINS;
- uint32_t PREFETCH_SETTINGS;
- uint32_t VBLANK_PARAMETERS_0;
- uint32_t REF_FREQ_TO_PIX_FREQ;
- uint32_t VBLANK_PARAMETERS_1;
- uint32_t VBLANK_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_0;
- uint32_t NOM_PARAMETERS_1;
- uint32_t NOM_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_5;
- uint32_t PER_LINE_DELIVERY_PRE;
- uint32_t PER_LINE_DELIVERY;
- uint32_t PREFETCH_SETTINS_C;
- uint32_t PREFETCH_SETTINGS_C;
- uint32_t VBLANK_PARAMETERS_2;
- uint32_t VBLANK_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_2;
- uint32_t NOM_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_6;
- uint32_t NOM_PARAMETERS_7;
- uint32_t DCN_TTU_QOS_WM;
- uint32_t DCN_GLOBAL_TTU_CNTL;
- uint32_t DCN_SURF0_TTU_CNTL0;
- uint32_t DCN_SURF0_TTU_CNTL1;
- uint32_t DCN_SURF1_TTU_CNTL0;
- uint32_t DCN_SURF1_TTU_CNTL1;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_MX_L1_TLB_CNTL;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
- uint32_t DCHUBBUB_SDPIF_FB_BASE;
- uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
- uint32_t DCN_VM_FB_LOCATION_TOP;
- uint32_t DCN_VM_FB_LOCATION_BASE;
- uint32_t DCN_VM_FB_OFFSET;
- uint32_t DCN_VM_AGP_BASE;
- uint32_t DCN_VM_AGP_BOT;
- uint32_t DCN_VM_AGP_TOP;
- uint32_t CURSOR_SETTINS;
- uint32_t CURSOR_SETTINGS;
- uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
- uint32_t CURSOR_SURFACE_ADDRESS;
- uint32_t CURSOR_SIZE;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR_POSITION;
- uint32_t CURSOR_HOT_SPOT;
- uint32_t CURSOR_DST_OFFSET;
-};
-
-#define MI_SF(reg_name, field_name, post_fix)\
+#define HUBP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DCHUBP_CNTL; \
+ uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t DCSURF_ADDR_CONFIG; \
+ uint32_t DCSURF_TILING_CONFIG; \
+ uint32_t DCSURF_SURFACE_PITCH; \
+ uint32_t DCSURF_SURFACE_PITCH_C; \
+ uint32_t DCSURF_SURFACE_CONFIG; \
+ uint32_t DCSURF_FLIP_CONTROL; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_PRI_VIEWPORT_START; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_SEC_VIEWPORT_START; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SURFACE_INUSE; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_INUSE_C; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_CONTROL; \
+ uint32_t HUBPRET_CONTROL; \
+ uint32_t DCN_EXPANSION_MODE; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \
+ uint32_t BLANK_OFFSET_0; \
+ uint32_t BLANK_OFFSET_1; \
+ uint32_t DST_DIMENSIONS; \
+ uint32_t DST_AFTER_SCALER; \
+ uint32_t PREFETCH_SETTINS; \
+ uint32_t PREFETCH_SETTINGS; \
+ uint32_t VBLANK_PARAMETERS_0; \
+ uint32_t REF_FREQ_TO_PIX_FREQ; \
+ uint32_t VBLANK_PARAMETERS_1; \
+ uint32_t VBLANK_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_0; \
+ uint32_t NOM_PARAMETERS_1; \
+ uint32_t NOM_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_5; \
+ uint32_t PER_LINE_DELIVERY_PRE; \
+ uint32_t PER_LINE_DELIVERY; \
+ uint32_t PREFETCH_SETTINS_C; \
+ uint32_t PREFETCH_SETTINGS_C; \
+ uint32_t VBLANK_PARAMETERS_2; \
+ uint32_t VBLANK_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_2; \
+ uint32_t NOM_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_6; \
+ uint32_t NOM_PARAMETERS_7; \
+ uint32_t DCN_TTU_QOS_WM; \
+ uint32_t DCN_GLOBAL_TTU_CNTL; \
+ uint32_t DCN_SURF0_TTU_CNTL0; \
+ uint32_t DCN_SURF0_TTU_CNTL1; \
+ uint32_t DCN_SURF1_TTU_CNTL0; \
+ uint32_t DCN_SURF1_TTU_CNTL1; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_MX_L1_TLB_CNTL; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR; \
+ uint32_t DCHUBBUB_SDPIF_FB_BASE; \
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET; \
+ uint32_t DCN_VM_FB_LOCATION_TOP; \
+ uint32_t DCN_VM_FB_LOCATION_BASE; \
+ uint32_t DCN_VM_FB_OFFSET; \
+ uint32_t DCN_VM_AGP_BASE; \
+ uint32_t DCN_VM_AGP_BOT; \
+ uint32_t DCN_VM_AGP_TOP; \
+ uint32_t CURSOR_SETTINS; \
+ uint32_t CURSOR_SETTINGS; \
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH; \
+ uint32_t CURSOR_SURFACE_ADDRESS; \
+ uint32_t CURSOR_SIZE; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR_POSITION; \
+ uint32_t CURSOR_HOT_SPOT; \
+ uint32_t CURSOR_DST_OFFSET; \
+ uint32_t HUBP_CLK_CNTL
+
+#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
-#define MI_MASK_SH_LIST_DCN(mask_sh)\
- MI_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
- MI_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
- MI_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_SE, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_RB_PER_SE, mask_sh),\
- MI_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
- MI_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
- MI_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
- MI_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
- MI_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
- MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
- MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
- MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
- MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
- MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
- MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
- MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
- MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
- MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
- MI_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
- MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
- MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
- MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
- MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
- MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
- MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
- MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
- MI_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
- MI_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
- MI_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
- MI_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
- MI_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
- MI_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
- MI_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
- MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
- MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
- MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
- MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
- MI_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
- MI_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
- MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
- MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
- MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
- MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
- MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
- MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
- MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
- MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
-
-#define MI_MASK_SH_LIST_DCN10(mask_sh)\
- MI_MASK_SH_LIST_DCN(mask_sh),\
- MI_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
- MI_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
- MI_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
- MI_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
- MI_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
- MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
- MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
- MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
- MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
- MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
- MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
- MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
- MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
- MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
- MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
- MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
- MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
- MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
- MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
- MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
- MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
- MI_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-
-#define DCN_MI_REG_FIELD_LIST(type) \
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_SE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_RB_PER_SE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
+ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+
+#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN(mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
+ HUBP_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
+ HUBP_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
+#define DCN_HUBP_REG_FIELD_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_TTU_DISABLE;\
type HUBP_NO_OUTSTANDING_REQ;\
+ type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
type NUM_PIPES;\
type NUM_BANKS;\
@@ -455,6 +460,13 @@ struct dcn_mi_registers {
type SURFACE_EARLIEST_INUSE_ADDRESS_C;\
type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_TMZ;\
+ type PRIMARY_SURFACE_TMZ_C;\
+ type SECONDARY_SURFACE_TMZ;\
+ type SECONDARY_SURFACE_TMZ_C;\
+ type PRIMARY_META_SURFACE_TMZ;\
+ type PRIMARY_META_SURFACE_TMZ_C;\
+ type SECONDARY_META_SURFACE_TMZ;\
+ type SECONDARY_META_SURFACE_TMZ_C;\
type PRIMARY_SURFACE_DCC_EN;\
type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
type DET_BUF_PLANE1_BASE_ADDRESS;\
@@ -527,6 +539,7 @@ struct dcn_mi_registers {
type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
type ENABLE_L1_TLB;\
type SYSTEM_ACCESS_MODE;\
+ type HUBP_CLOCK_ENABLE;\
type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
@@ -576,19 +589,23 @@ struct dcn_mi_registers {
type CURSOR_DST_X_OFFSET; \
type OUTPUT_FP
+struct dcn_mi_registers {
+ HUBP_COMMON_REG_VARIABLE_LIST;
+};
+
struct dcn_mi_shift {
- DCN_MI_REG_FIELD_LIST(uint8_t);
+ DCN_HUBP_REG_FIELD_LIST(uint8_t);
};
struct dcn_mi_mask {
- DCN_MI_REG_FIELD_LIST(uint32_t);
+ DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
struct dcn10_hubp {
struct hubp base;
- const struct dcn_mi_registers *mi_regs;
- const struct dcn_mi_shift *mi_shift;
- const struct dcn_mi_mask *mi_mask;
+ const struct dcn_mi_registers *hubp_regs;
+ const struct dcn_mi_shift *hubp_shift;
+ const struct dcn_mi_mask *hubp_mask;
};
void hubp1_program_surface_config(
@@ -610,11 +627,11 @@ void hubp1_program_requestor(
struct _vcs_dpi_display_rq_regs_st *rq_regs);
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum surface_pixel_format format);
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
@@ -622,7 +639,7 @@ void hubp1_program_size_and_rotation(
bool horizontal_mirror);
void hubp1_program_tiling(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
@@ -652,13 +669,16 @@ void min_set_viewport(struct hubp *hubp,
const struct rect *viewport,
const struct rect *viewport_c);
+void hubp1_clk_cntl(struct hubp *hubp, bool enable);
+void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
+
void dcn10_hubp_construct(
struct dcn10_hubp *hubp1,
struct dc_context *ctx,
uint32_t inst,
- const struct dcn_mi_registers *mi_regs,
- const struct dcn_mi_shift *mi_shift,
- const struct dcn_mi_mask *mi_mask);
+ const struct dcn_mi_registers *hubp_regs,
+ const struct dcn_mi_shift *hubp_shift,
+ const struct dcn_mi_mask *hubp_mask);
struct dcn_hubp_state {
@@ -680,4 +700,6 @@ struct dcn_hubp_state {
void hubp1_read_state(struct dcn10_hubp *hubp1,
struct dcn_hubp_state *s);
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 05dc01e..8b0f6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -31,7 +31,8 @@
#include "dce110/dce110_hw_sequencer.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dmcu.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
@@ -41,7 +42,11 @@
#include "reg_helper.h"
#include "custom_float.h"
#include "dcn10_hubp.h"
+#include "dcn10_hubbub.h"
+#include "dcn10_cm_common.h"
+#define DC_LOGGER \
+ ctx->logger
#define CTX \
hws->ctx
#define REG(reg)\
@@ -51,18 +56,8 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void log_mpc_crc(struct dc *dc)
-{
- struct dc_context *dc_ctx = dc->ctx;
- struct dce_hwseq *hws = dc->hwseq;
-
- if (REG(MPC_CRC_RESULT_GB))
- DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
- REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
- if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
- DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
- REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
-}
+#define DTN_INFO_MICRO_SEC(ref_cycle) \
+ print_microsec(dc_ctx, ref_cycle)
void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
{
@@ -75,67 +70,27 @@ void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
us_x10 % frac);
}
-#define DTN_INFO_MICRO_SEC(ref_cycle) \
- print_microsec(dc_ctx, ref_cycle)
-struct dcn_hubbub_wm_set {
- uint32_t wm_set;
- uint32_t data_urgent;
- uint32_t pte_meta_urgent;
- uint32_t sr_enter;
- uint32_t sr_exit;
- uint32_t dram_clk_chanage;
-};
+static void log_mpc_crc(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dce_hwseq *hws = dc->hwseq;
-struct dcn_hubbub_wm {
- struct dcn_hubbub_wm_set sets[4];
-};
+ if (REG(MPC_CRC_RESULT_GB))
+ DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
+ REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
+ if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
+ DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
+ REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
+}
-static void dcn10_hubbub_wm_read_state(struct dce_hwseq *hws,
- struct dcn_hubbub_wm *wm)
-{
- struct dcn_hubbub_wm_set *s;
-
- s = &wm->sets[0];
- s->wm_set = 0;
- s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
- s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
- s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
- s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
-
- s = &wm->sets[1];
- s->wm_set = 1;
- s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
- s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
- s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
- s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
-
- s = &wm->sets[2];
- s->wm_set = 2;
- s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
- s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
- s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
- s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
-
- s = &wm->sets[3];
- s->wm_set = 3;
- s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
- s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
- s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
- s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
- s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
-}
-
-static void dcn10_log_hubbub_state(struct dc *dc)
+void dcn10_log_hubbub_state(struct dc *dc)
{
struct dc_context *dc_ctx = dc->ctx;
struct dcn_hubbub_wm wm;
int i;
- dcn10_hubbub_wm_read_state(dc->hwseq, &wm);
+ hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
"sr_enter \t sr_exit \t dram_clk_change \n");
@@ -156,7 +111,7 @@ static void dcn10_log_hubbub_state(struct dc *dc)
DTN_INFO("\n");
}
-static void dcn10_log_hw_state(struct dc *dc)
+void dcn10_log_hw_state(struct dc *dc)
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
@@ -180,7 +135,7 @@ static void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
"%xh \t %xh \t %xh \t "
"%d \t %d \t %d \t %xh \t",
- i,
+ hubp->inst,
s.pixel_format,
s.inuse_addr_hi,
s.viewport_width,
@@ -202,11 +157,11 @@ static void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
"h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
- for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
+ for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
- tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
@@ -215,7 +170,7 @@ static void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
"%d \t %d \t %d \t %d \t %d \t %d \t "
"%d \t %d \t %d \t %d \t %d \t ",
- i,
+ tg->inst,
s.v_blank_start,
s.v_blank_end,
s.v_sync_a_start,
@@ -240,117 +195,6 @@ static void dcn10_log_hw_state(struct dc *dc)
DTN_INFO_END();
}
-static void verify_allow_pstate_change_high(
- struct dce_hwseq *hws)
-{
- /* pstate latency is ~20us so if we wait over 40us and pstate allow
- * still not asserted, we are probably stuck and going to hang
- *
- * TODO: Figure out why it takes ~100us on linux
- * pstate takes around ~100us on linux. Unknown currently as to
- * why it takes that long on linux
- */
- static unsigned int pstate_wait_timeout_us = 200;
- static unsigned int pstate_wait_expected_timeout_us = 40;
- static unsigned int max_sampled_pstate_wait_us; /* data collection */
- static bool forced_pstate_allow; /* help with revert wa */
- static bool should_log_hw_state; /* prevent hw state log by default */
-
- unsigned int debug_index = 0x7;
- unsigned int debug_data;
- unsigned int i;
-
- if (forced_pstate_allow) {
- /* we hacked to force pstate allow to prevent hang last time
- * we verify_allow_pstate_change_high. so disable force
- * here so we can check status
- */
- REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
- forced_pstate_allow = false;
- }
-
- /* description "3-0: Pipe0 cursor0 QOS
- * 7-4: Pipe1 cursor0 QOS
- * 11-8: Pipe2 cursor0 QOS
- * 15-12: Pipe3 cursor0 QOS
- * 16: Pipe0 Plane0 Allow Pstate Change
- * 17: Pipe1 Plane0 Allow Pstate Change
- * 18: Pipe2 Plane0 Allow Pstate Change
- * 19: Pipe3 Plane0 Allow Pstate Change
- * 20: Pipe0 Plane1 Allow Pstate Change
- * 21: Pipe1 Plane1 Allow Pstate Change
- * 22: Pipe2 Plane1 Allow Pstate Change
- * 23: Pipe3 Plane1 Allow Pstate Change
- * 24: Pipe0 cursor0 Allow Pstate Change
- * 25: Pipe1 cursor0 Allow Pstate Change
- * 26: Pipe2 cursor0 Allow Pstate Change
- * 27: Pipe3 cursor0 Allow Pstate Change
- * 28: WB0 Allow Pstate Change
- * 29: WB1 Allow Pstate Change
- * 30: Arbiter's allow_pstate_change
- * 31: SOC pstate change request
- */
-
- REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index);
-
- for (i = 0; i < pstate_wait_timeout_us; i++) {
- debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
-
- if (debug_data & (1 << 30)) {
-
- if (i > pstate_wait_expected_timeout_us)
- dm_logger_write(hws->ctx->logger, LOG_WARNING,
- "pstate took longer than expected ~%dus\n",
- i);
-
- return;
- }
- if (max_sampled_pstate_wait_us < i)
- max_sampled_pstate_wait_us = i;
-
- udelay(1);
- }
-
- /* force pstate allow to prevent system hang
- * and break to debugger to investigate
- */
- REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
- DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
- forced_pstate_allow = true;
-
- if (should_log_hw_state) {
- dcn10_log_hw_state(hws->ctx->dc);
- }
-
- dm_logger_write(hws->ctx->logger, LOG_WARNING,
- "pstate TEST_DEBUG_DATA: 0x%X\n",
- debug_data);
- BREAK_TO_DEBUGGER();
-}
-
-static void enable_dppclk(
- struct dce_hwseq *hws,
- uint8_t plane_id,
- uint32_t requested_pix_clk,
- bool dppclk_div)
-{
- dm_logger_write(hws->ctx->logger, LOG_SURFACE,
- "dppclk_rate_control for pipe %d programed to %d\n",
- plane_id,
- dppclk_div);
-
- if (hws->shifts->DPPCLK_RATE_CONTROL)
- REG_UPDATE_2(DPP_CONTROL[plane_id],
- DPPCLK_RATE_CONTROL, dppclk_div,
- DPP_CLOCK_ENABLE, 1);
- else
- REG_UPDATE(DPP_CONTROL[plane_id],
- DPP_CLOCK_ENABLE, 1);
-}
-
static void enable_power_gating_plane(
struct dce_hwseq *hws,
bool enable)
@@ -376,10 +220,34 @@ static void enable_power_gating_plane(
static void disable_vga(
struct dce_hwseq *hws)
{
+ unsigned int in_vga1_mode = 0;
+ unsigned int in_vga2_mode = 0;
+ unsigned int in_vga3_mode = 0;
+ unsigned int in_vga4_mode = 0;
+
+ REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
+ REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
+ REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
+ REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
+
+ if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
+ in_vga3_mode == 0 && in_vga4_mode == 0)
+ return;
+
REG_WRITE(D1VGA_CONTROL, 0);
REG_WRITE(D2VGA_CONTROL, 0);
REG_WRITE(D3VGA_CONTROL, 0);
REG_WRITE(D4VGA_CONTROL, 0);
+
+ /* HW Engineer's Notes:
+ * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
+ * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
+ *
+ * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
+ * VGA_TEST_ENABLE, to leave it in the same state as before.
+ */
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
static void dpp_pg_control(
@@ -432,312 +300,6 @@ static void dpp_pg_control(
}
}
-static uint32_t convert_and_clamp(
- uint32_t wm_ns,
- uint32_t refclk_mhz,
- uint32_t clamp_value)
-{
- uint32_t ret_val = 0;
- ret_val = wm_ns * refclk_mhz;
- ret_val /= 1000;
-
- if (ret_val > clamp_value)
- ret_val = clamp_value;
-
- return ret_val;
-}
-
-static void program_watermarks(
- struct dce_hwseq *hws,
- struct dcn_watermark_set *watermarks,
- unsigned int refclk_mhz)
-{
- uint32_t force_en = hws->ctx->dc->debug.disable_stutter ? 1 : 0;
- /*
- * Need to clamp to max of the register values (i.e. no wrap)
- * for dcn1, all wm registers are 21-bit wide
- */
- uint32_t prog_wm_value;
-
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
-
- /* Repeat for water mark set A, B, C and D. */
- /* clock state A */
- prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
-
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.urgent_ns, prog_wm_value);
-
- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
-
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-
-
- /* clock state B */
- prog_wm_value = convert_and_clamp(
- watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_ENTER_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
-
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-
- /* clock state C */
- prog_wm_value = convert_and_clamp(
- watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.urgent_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_ENTER_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
-
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-
- /* clock state D */
- prog_wm_value = convert_and_clamp(
- watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.urgent_ns, prog_wm_value);
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_ENTER_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "SR_EXIT_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
-
-
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
- dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
- "DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
-
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
-
- REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
- DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
- REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
- DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
-
- REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
- DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
- DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
-
-#if 0
- REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
-#endif
-}
-
-
-static void dcn10_update_dchub(
- struct dce_hwseq *hws,
- struct dchub_init_data *dh_data)
-{
- /* TODO: port code from dal2 */
- switch (dh_data->fb_mode) {
- case FRAME_BUFFER_MODE_ZFB_ONLY:
- /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
- REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
- SDPIF_FB_TOP, 0);
-
- REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
- SDPIF_FB_BASE, 0x0FFFF);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
- break;
- case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
- dh_data->zfb_size_in_byte - 1) >> 22);
- break;
- case FRAME_BUFFER_MODE_LOCAL_ONLY:
- /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
- SDPIF_AGP_BASE, 0);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
- SDPIF_AGP_BOT, 0X03FFFF);
-
- REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
- SDPIF_AGP_TOP, 0);
- break;
- default:
- break;
- }
-
- dh_data->dchub_initialzied = true;
- dh_data->dchub_info_valid = false;
-}
-
static void hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
@@ -792,6 +354,7 @@ static void power_on_plane(
struct dce_hwseq *hws,
int plane_id)
{
+ struct dc_context *ctx = hws->ctx;
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@@ -799,7 +362,7 @@ static void power_on_plane(
hubp_pg_control(hws, plane_id, true);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
- dm_logger_write(hws->ctx->logger, LOG_DEBUG,
+ DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", plane_id);
}
}
@@ -808,11 +371,8 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[0];
- int pwr_status = 0;
- REG_GET(DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, &pwr_status);
- /* Don't need to blank if hubp is power gated*/
- if (pwr_status == 2)
+ if (!hws->wa_state.DEGVIDCN10_253_applied)
return;
hubp->funcs->set_blank(hubp, true);
@@ -823,16 +383,29 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc)
hubp_pg_control(hws, 0, false);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
+
+ hws->wa_state.DEGVIDCN10_253_applied = false;
}
static void apply_DEGVIDCN10_253_wa(struct dc *dc)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[0];
+ int i;
if (dc->debug.disable_stutter)
return;
+ if (!hws->wa.DEGVIDCN10_253)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!dc->res_pool->hubps[i]->power_gated)
+ return;
+ }
+
+ /* all pipe power gated, apply work around to enable stutter. */
+
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@@ -841,6 +414,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc)
IP_REQUEST_EN, 0);
hubp->funcs->set_hubp_blank_en(hubp, false);
+ hws->wa_state.DEGVIDCN10_253_applied = true;
}
static void bios_golden_init(struct dc *dc)
@@ -859,85 +433,32 @@ static void bios_golden_init(struct dc *dc)
}
}
-static void dcn10_init_hw(struct dc *dc)
+static void false_optc_underflow_wa(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct timing_generator *tg)
{
int i;
- struct abm *abm = dc->res_pool->abm;
- struct dce_hwseq *hws = dc->hwseq;
+ bool underflow;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- REG_WRITE(REFCLK_CNTL, 0);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
- enable_power_gating_plane(dc->hwseq, true);
+ if (!dc->hwseq->wa.false_optc_underflow)
return;
- }
- /* end of FPGA. Below if real ASIC */
-
- bios_golden_init(dc);
- disable_vga(dc->hwseq);
-
- for (i = 0; i < dc->link_count; i++) {
- /* Power up AND update implementation according to the
- * required signal (which may be different from the
- * default signal on connector).
- */
- struct dc_link *link = dc->links[i];
-
- link->link_enc->funcs->hw_init(link->link_enc);
- }
+ underflow = tg->funcs->is_optc_underflow_occurred(tg);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct dpp *dpp = dc->res_pool->dpps[i];
- struct timing_generator *tg = dc->res_pool->timing_generators[i];
-
- dpp->funcs->dpp_reset(dpp);
- dc->res_pool->mpc->funcs->remove(
- dc->res_pool->mpc, &(dc->res_pool->opps[i]->mpc_tree),
- dc->res_pool->opps[i]->inst, i);
-
- /* Blank controller using driver code instead of
- * command table.
- */
- tg->funcs->set_blank(tg, true);
- hwss_wait_for_blank_complete(tg);
- }
-
- for (i = 0; i < dc->res_pool->audio_count; i++) {
- struct audio *audio = dc->res_pool->audios[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- audio->funcs->hw_init(audio);
- }
+ if (old_pipe_ctx->stream != stream)
+ continue;
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
}
- /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+ tg->funcs->set_blank_data_double_buffer(tg, true);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
- enable_power_gating_plane(dc->hwseq, true);
+ if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
+ tg->funcs->clear_optc_underflow(tg);
}
static enum dc_status dcn10_prog_pixclk_crtc_otg(
@@ -948,10 +469,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
- bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
- false:true;
- bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
-
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
@@ -986,11 +503,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
&stream->timing,
true);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
- pipe_ctx->stream_res.opp,
- enableStereo,
- rightEyePolarity);
-
#if 0 /* move to after enable_crtc */
/* TODO: OPP FMT, ABM. etc. should be done here. */
/* or FPGA now. instance 0 only. TODO: move to opp.c */
@@ -1005,12 +517,18 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank_color(
- pipe_ctx->stream_res.tg,
- &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
- hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
+ !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
+ }
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -1035,7 +553,7 @@ static void reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
-
+ struct dc_context *ctx = dc->ctx;
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -1045,6 +563,22 @@ static void reset_back_end_for_pipe(
/* DPMS may already disable */
if (!pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ else if (pipe_ctx->stream_res.audio) {
+ /*
+ * if stream is already disabled outside of commit streams path,
+ * audio disable was skipped. Need to do it here
+ */
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ pipe_ctx->stream_res.audio = NULL;
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
+ }
+
+ }
+
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -1065,222 +599,268 @@ static void reset_back_end_for_pipe(
return;
pipe_ctx->stream = NULL;
- dm_logger_write(dc->ctx->logger, LOG_DEBUG,
- "Reset back end for pipe %d, tg:%d\n",
+ DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
-/* trigger HW to start disconnect plane from stream on the next vsync */
-static void plane_atomic_disconnect(struct dc *dc,
- int fe_idx)
+static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
- struct hubp *hubp = dc->res_pool->hubps[fe_idx];
- struct mpc *mpc = dc->res_pool->mpc;
- int opp_id, z_idx;
- int mpcc_id = -1;
-
- /* look at tree rather than mi here to know if we already reset */
- for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
- struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
+ static bool should_log_hw_state; /* prevent hw state log by default */
- for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
- if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
- mpcc_id = opp->mpc_tree.mpcc[z_idx];
- break;
- }
+ if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
+ if (should_log_hw_state) {
+ dcn10_log_hw_state(dc);
}
- if (mpcc_id != -1)
- break;
- }
- /*Already reset*/
- if (opp_id == dc->res_pool->pipe_count)
- return;
-
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
- hubp->funcs->dcc_control(hubp, false, false);
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
- mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
- dc->res_pool->opps[opp_id]->inst, fe_idx);
+ BREAK_TO_DEBUGGER();
+ }
}
-/* disable HW used by plane.
- * note: cannot disable until disconnect is complete */
-static void plane_atomic_disable(struct dc *dc,
- int fe_idx)
+/* trigger HW to start disconnect plane from stream on the next vsync */
+static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct dce_hwseq *hws = dc->hwseq;
- struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
struct mpc *mpc = dc->res_pool->mpc;
- int opp_id = hubp->opp_id;
-
- if (opp_id == 0xf)
- return;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
- dc->res_pool->opps[hubp->opp_id]->mpcc_disconnect_pending[hubp->mpcc_id] = false;
- /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
- "[debug_mpo: atomic disable finished on mpcc %d]\n",
- fe_idx);*/
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
- hubp->funcs->set_blank(hubp, true);
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
- REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
- HUBP_CLOCK_ENABLE, 0);
- REG_UPDATE(DPP_CONTROL[fe_idx],
- DPP_CLOCK_ENABLE, 0);
+ dc->optimized_required = true;
- if (dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
- REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
- OPP_PIPE_CLOCK_EN, 0);
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
-/*
- * kill power to plane hw
- * note: cannot power down until plane is disable
- */
-static void plane_atomic_power_down(struct dc *dc, int fe_idx)
+static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_context *ctx = dc->ctx;
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
- dpp_pg_control(hws, fe_idx, false);
- hubp_pg_control(hws, fe_idx, false);
+ dpp_pg_control(hws, dpp->inst, false);
+ hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
- dm_logger_write(dc->ctx->logger, LOG_DEBUG,
- "Power gated front end %d\n", fe_idx);
-
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ DC_LOG_DEBUG(
+ "Power gated front end %d\n", pipe_ctx->pipe_idx);
}
}
-
-static void reset_front_end(
- struct dc *dc,
- int fe_idx)
+/* disable HW used by plane.
+ * note: cannot disable until disconnect is complete
+ */
+static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct dce_hwseq *hws = dc->hwseq;
- struct timing_generator *tg;
- int opp_id = dc->res_pool->hubps[fe_idx]->opp_id;
-
- /*Already reset*/
- if (opp_id == 0xf)
- return;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ int opp_id = hubp->opp_id;
- tg = dc->res_pool->timing_generators[opp_id];
- tg->funcs->lock(tg);
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
- plane_atomic_disconnect(dc, fe_idx);
+ hubp->funcs->hubp_clk_cntl(hubp, false);
- REG_UPDATE(OTG_GLOBAL_SYNC_STATUS[tg->inst], VUPDATE_NO_LOCK_EVENT_CLEAR, 1);
- tg->funcs->unlock(tg);
+ dpp->funcs->dpp_dppclk_control(dpp, false, false);
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(hws);
+ if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
+ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+ pipe_ctx->stream_res.opp,
+ false);
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_GLOBAL_SYNC_STATUS[tg->inst],
- VUPDATE_NO_LOCK_EVENT_OCCURRED, 1,
- 1, 100000);
+ hubp->power_gated = true;
+ dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_disable(dc, fe_idx);
+ plane_atomic_power_down(dc, pipe_ctx);
- dm_logger_write(dc->ctx->logger, LOG_DC,
- "Reset front end %d\n",
- fe_idx);
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
}
-static void dcn10_power_down_fe(struct dc *dc, int fe_idx)
+static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+ struct dc_context *ctx = dc->ctx;
- reset_front_end(dc, fe_idx);
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
- dpp_pg_control(hws, fe_idx, false);
- hubp_pg_control(hws, fe_idx, false);
- dpp->funcs->dpp_reset(dpp);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
- dm_logger_write(dc->ctx->logger, LOG_DEBUG,
- "Power gated front end %d\n", fe_idx);
+ plane_atomic_disable(dc, pipe_ctx);
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ apply_DEGVIDCN10_253_wa(dc);
+
+ DC_LOG_DC("Power down front end %d\n",
+ pipe_ctx->pipe_idx);
}
-static void reset_hw_ctx_wrap(
- struct dc *dc,
- struct dc_state *context)
+static void dcn10_init_hw(struct dc *dc)
{
int i;
+ struct abm *abm = dc->res_pool->abm;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ struct dc_state *context = dc->current_state;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+ } else {
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ bios_golden_init(dc);
+ disable_vga(dc->hwseq);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
+ dc->hwss.edp_power_control(link, true);
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+ }
- /* Reset Front End*/
- /* Lock*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
- if (cur_pipe_ctx->stream)
+ if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
}
- /* Disconnect*/
- for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->stream ||
- !pipe_ctx->plane_state ||
- pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ /* Blank controller using driver code instead of
+ * command table.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
- plane_atomic_disconnect(dc, i);
+ if (tg->funcs->is_tg_enabled(tg)) {
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
}
}
- /* Unlock*/
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
- if (cur_pipe_ctx->stream)
+ /* Reset all MPCC muxes */
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = 0xf;
+ hubp->power_gated = false;
+
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+
+ plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
}
- /* Disable and Powerdown*/
- for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
- struct pipe_ctx *pipe_ctx_old =
- &dc->current_state->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /*if (!pipe_ctx_old->stream)
- continue;*/
+ dcn10_disable_plane(dc, pipe_ctx);
- if (pipe_ctx->stream && pipe_ctx->plane_state
- && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
- continue;
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+
+ tg->funcs->tg_init(tg);
+ }
- plane_atomic_disable(dc, i);
+ /* end of FPGA. Below if real ASIC */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+
+ if (dmcu != NULL)
+ dmcu->funcs->dmcu_init(dmcu);
- if (!pipe_ctx->stream || !pipe_ctx->plane_state)
- plane_atomic_power_down(dc, i);
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
+ enable_power_gating_plane(dc->hwseq, true);
+}
+
+static void reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
struct pipe_ctx *pipe_ctx_old =
@@ -1298,7 +878,6 @@ static void reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
-
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1332,21 +911,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-static void toggle_watermark_change_req(struct dce_hwseq *hws)
-{
- uint32_t watermark_change_req;
-
- REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
-
- if (watermark_change_req)
- watermark_change_req = 0;
- else
- watermark_change_req = 1;
- REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
-}
static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
@@ -1366,8 +931,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}
-static bool dcn10_set_input_transfer_func(
- struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
const struct dc_transfer_func *tf = NULL;
@@ -1379,35 +944,32 @@ static bool dcn10_set_input_transfer_func(
if (plane_state->in_transfer_func)
tf = plane_state->in_transfer_func;
- if (plane_state->gamma_correction && dce_use_lut(plane_state))
- dpp_base->funcs->ipp_program_input_lut(dpp_base,
- plane_state->gamma_correction);
+ if (plane_state->gamma_correction &&
+ plane_state->gamma_correction->is_identity)
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ else if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
+ dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
if (tf == NULL)
- dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
else if (tf->type == TF_TYPE_PREDEFINED) {
switch (tf->tf) {
case TRANSFER_FUNCTION_SRGB:
- dpp_base->funcs->ipp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_HW_sRGB);
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
break;
case TRANSFER_FUNCTION_BT709:
- dpp_base->funcs->ipp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_HW_xvYCC);
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
break;
case TRANSFER_FUNCTION_LINEAR:
- dpp_base->funcs->ipp_set_degamma(dpp_base,
- IPP_DEGAMMA_MODE_BYPASS);
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
- result = false;
- break;
default:
result = false;
break;
}
} else if (tf->type == TF_TYPE_BYPASS) {
- dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
/*TF_TYPE_DISTRIBUTED_POINTS*/
result = false;
@@ -1415,324 +977,14 @@ static bool dcn10_set_input_transfer_func(
return result;
}
-/*modify the method to handle rgb for arr_points*/
-static bool convert_to_custom_float(
- struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
- uint32_t hw_points_num)
-{
- struct custom_float_format fmt;
-
- struct pwl_result_data *rgb = rgb_resulted;
-
- uint32_t i = 0;
-
- fmt.exponenta_bits = 6;
- fmt.mantissa_bits = 12;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(
- arr_points[0].x,
- &fmt,
- &arr_points[0].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- arr_points[0].offset,
- &fmt,
- &arr_points[0].custom_float_offset)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- arr_points[0].slope,
- &fmt,
- &arr_points[0].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- fmt.mantissa_bits = 10;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(
- arr_points[1].x,
- &fmt,
- &arr_points[1].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- arr_points[1].y,
- &fmt,
- &arr_points[1].custom_float_y)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- arr_points[1].slope,
- &fmt,
- &arr_points[1].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- fmt.mantissa_bits = 12;
- fmt.sign = true;
-
- while (i != hw_points_num) {
- if (!convert_to_custom_float_format(
- rgb->red,
- &fmt,
- &rgb->red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- rgb->green,
- &fmt,
- &rgb->green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- rgb->blue,
- &fmt,
- &rgb->blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- rgb->delta_red,
- &fmt,
- &rgb->delta_red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- rgb->delta_green,
- &fmt,
- &rgb->delta_green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(
- rgb->delta_blue,
- &fmt,
- &rgb->delta_blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- ++rgb;
- ++i;
- }
-
- return true;
-}
-#define MAX_REGIONS_NUMBER 34
-#define MAX_LOW_POINT 25
-#define NUMBER_SEGMENTS 32
-
-static bool dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func
- *output_tf, struct pwl_params *regamma_params)
-{
- struct curve_points *arr_points;
- struct pwl_result_data *rgb_resulted;
- struct pwl_result_data *rgb;
- struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
-
- int32_t segment_start, segment_end;
- int32_t i;
- uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
-
- if (output_tf == NULL || regamma_params == NULL ||
- output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- arr_points = regamma_params->arr_points;
- rgb_resulted = regamma_params->rgb_resulted;
- hw_points = 0;
-
- memset(regamma_params, 0, sizeof(struct pwl_params));
- memset(seg_distr, 0, sizeof(seg_distr));
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* 32 segments
- * segments are from 2^-25 to 2^7
- */
- for (i = 0; i < 32 ; i++)
- seg_distr[i] = 3;
-
- segment_start = -25;
- segment_end = 7;
- } else {
- /* 10 segments
- * segment is from 2^-10 to 2^0
- * There are less than 256 points, for optimization
- */
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 5;
- seg_distr[9] = 5;
-
- segment_start = -10;
- segment_end = 0;
- }
-
- for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
-
- for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
- hw_points += (1 << seg_distr[k]);
- }
-
- j = 0;
- for (k = 0; k < (segment_end - segment_start); k++) {
- increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
- start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
- if (j == hw_points - 1)
- break;
- rgb_resulted[j].red = output_tf->tf_pts.red[i];
- rgb_resulted[j].green = output_tf->tf_pts.green[i];
- rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
- j++;
- }
- }
- /* last point */
- start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- rgb_resulted[hw_points - 1].red =
- output_tf->tf_pts.red[start_index];
- rgb_resulted[hw_points - 1].green =
- output_tf->tf_pts.green[start_index];
- rgb_resulted[hw_points - 1].blue =
- output_tf->tf_pts.blue[start_index];
-
- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_start));
- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
- arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
-
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
-
- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
-
- arr_points[0].y = y1_min;
- arr_points[0].slope = dal_fixed31_32_div(
- arr_points[0].y,
- arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
-
- /* see comment above, m_arrPoints[1].y should be the Y value for the
- * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
- */
- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
- arr_points[2].y = y3_max;
-
- arr_points[1].slope = dal_fixed31_32_zero;
- arr_points[2].slope = dal_fixed31_32_zero;
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* for PQ, we want to have a straight line from last HW X point,
- * and the slope to be such that we hit 1.0 at 10000 nits.
- */
- const struct fixed31_32 end_value =
- dal_fixed31_32_from_int(125);
-
- arr_points[1].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
- arr_points[2].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
- }
-
- regamma_params->hw_points_num = hw_points;
-
- i = 1;
- for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1) {
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
- regamma_params->arr_curve_points[i].offset =
- regamma_params->arr_curve_points[k].
- offset + (1 << seg_distr[k]);
- }
- i++;
- }
- if (seg_distr[k] != -1)
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
-
- rgb = rgb_resulted;
- rgb_plus_1 = rgb_resulted + 1;
-
- i = 1;
-
- while (i != hw_points + 1) {
- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
- rgb->delta_red = dal_fixed31_32_sub(
- rgb_plus_1->red,
- rgb->red);
- rgb->delta_green = dal_fixed31_32_sub(
- rgb_plus_1->green,
- rgb->green);
- rgb->delta_blue = dal_fixed31_32_sub(
- rgb_plus_1->blue,
- rgb->blue);
-
- ++rgb_plus_1;
- ++rgb;
- ++i;
- }
- convert_to_custom_float(rgb_resulted, arr_points, hw_points);
- return true;
-}
-static bool dcn10_set_output_transfer_func(
- struct pipe_ctx *pipe_ctx,
- const struct dc_stream_state *stream)
+static bool
+dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
@@ -1742,18 +994,21 @@ static bool dcn10_set_output_transfer_func(
dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
if (stream->out_transfer_func &&
- stream->out_transfer_func->type ==
- TF_TYPE_PREDEFINED &&
- stream->out_transfer_func->tf ==
- TRANSFER_FUNCTION_SRGB) {
- dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_SRGB);
- } else if (dcn10_translate_regamma_to_hw_format(
- stream->out_transfer_func, &dpp->regamma_params)) {
- dpp->funcs->opp_program_regamma_pwl(dpp, &dpp->regamma_params);
- dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_USER);
- } else {
- dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_BYPASS);
- }
+ stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
+ dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
+ * update.
+ */
+ else if (cm_helper_translate_curve_to_hw_format(
+ stream->out_transfer_func,
+ &dpp->regamma_params, false)) {
+ dpp->funcs->dpp_program_regamma_pwl(
+ dpp,
+ &dpp->regamma_params, OPP_REGAMMA_USER);
+ } else
+ dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
return true;
}
@@ -1763,8 +1018,6 @@ static void dcn10_pipe_control_lock(
struct pipe_ctx *pipe,
bool lock)
{
- struct hubp *hubp = NULL;
- hubp = dc->res_pool->hubps[pipe->pipe_idx];
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
@@ -1772,7 +1025,7 @@ static void dcn10_pipe_control_lock(
return;
if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
@@ -1780,7 +1033,7 @@ static void dcn10_pipe_control_lock(
pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
static bool wait_for_reset_trigger_to_occur(
@@ -1833,14 +1086,15 @@ static void dcn10_enable_timing_synchronization(
for (i = 1; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
- grouped_pipes[i]->stream_res.tg, grouped_pipes[0]->stream_res.tg->inst);
-
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[0]->stream_res.tg->inst);
DC_SYNC_INFO("Waiting for trigger\n");
/* Need to get only check 1 pipe for having reset as all the others are
* synchronized. Look at last pipe programmed to reset.
*/
+
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
for (i = 1; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
@@ -1849,11 +1103,34 @@ static void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Sync complete\n");
}
-static void print_rq_dlg_ttu(
+static void dcn10_enable_per_frame_crtc_position_reset(
+ struct dc *dc,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ int i;
+
+ DC_SYNC_INFO("Setting up\n");
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ &grouped_pipes[i]->stream->triggered_crtc_reset);
+
+ DC_SYNC_INFO("Waiting for trigger\n");
+
+ for (i = 0; i < group_size; i++)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("Multi-display sync is complete\n");
+}
+
+/*static void print_rq_dlg_ttu(
struct dc *core_dc,
struct pipe_ctx *pipe_ctx)
{
- dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
"\n============== DML TTU Output parameters [%d] ==============\n"
"qos_level_low_wm: %d, \n"
"qos_level_high_wm: %d, \n"
@@ -1883,7 +1160,7 @@ static void print_rq_dlg_ttu(
pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
);
- dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
"\n============== DML DLG Output parameters [%d] ==============\n"
"refcyc_h_blank_end: %d, \n"
"dlg_vblank_end: %d, \n"
@@ -1918,7 +1195,7 @@ static void print_rq_dlg_ttu(
pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
);
- dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
"\ndst_y_per_meta_row_nom_l: %d, \n"
"refcyc_per_meta_chunk_nom_l: %d, \n"
"refcyc_per_line_delivery_pre_l: %d, \n"
@@ -1948,7 +1225,7 @@ static void print_rq_dlg_ttu(
pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
);
- dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
"\n============== DML RQ Output parameters [%d] ==============\n"
"chunk_size: %d \n"
"min_chunk_size: %d \n"
@@ -1970,33 +1247,118 @@ static void print_rq_dlg_ttu(
pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
);
}
+*/
-static void dcn10_power_on_fe(
+static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
+ struct vm_system_aperture_param *apt,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC physical_page_number;
+ uint32_t logical_addr_low;
+ uint32_t logical_addr_high;
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ LOGICAL_ADDR, &logical_addr_low);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ LOGICAL_ADDR, &logical_addr_high);
+
+ apt->sys_default.quad_part = physical_page_number.quad_part << 12;
+ apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
+ apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
+}
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
+ struct vm_context0_param *vm0,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ uint32_t fb_base_value;
+ uint32_t fb_offset_value;
+
+ REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
+ REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
+
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
+
+ /*
+ * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
+ * Therefore we need to do
+ * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
+ */
+ fb_base.quad_part = (uint64_t)fb_base_value << 24;
+ fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
+ vm0->pte_base.quad_part += fb_base.quad_part;
+ vm0->pte_base.quad_part -= fb_offset.quad_part;
+}
+
+
+static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct vm_system_aperture_param apt = { {{ 0 } } };
+ struct vm_context0_param vm0 = { { { 0 } } };
+
+ mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
+ mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
+
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+ hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
+}
+
+static void dcn10_enable_plane(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dce_hwseq *hws = dc->hwseq;
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
+ undo_DEGVIDCN10_253_wa(dc);
+
power_on_plane(dc->hwseq,
- pipe_ctx->pipe_idx);
+ pipe_ctx->plane_res.hubp->inst);
/* enable DCFCLK current DCHUB */
- REG_UPDATE(HUBP_CLK_CNTL[pipe_ctx->pipe_idx],
- HUBP_CLOCK_ENABLE, 1);
+ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
- REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
- OPP_PIPE_CLOCK_EN, 1);
- /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
+ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
+ pipe_ctx->stream_res.opp,
+ true);
+/* TODO: enable/disable in dm as per update type.
if (plane_state) {
- dm_logger_write(dc->ctx->logger, LOG_DC,
+ DC_LOG_DC(dc->ctx->logger,
"Pipe:%d 0x%x: addr hi:0x%x, "
"addr low:0x%x, "
"src: %d, %d, %d,"
@@ -2014,7 +1376,7 @@ static void dcn10_power_on_fe(
plane_state->dst_rect.width,
plane_state->dst_rect.height);
- dm_logger_write(dc->ctx->logger, LOG_DC,
+ DC_LOG_DC(dc->ctx->logger,
"Pipe %d: width, height, x, y format:%d\n"
"viewport:%d, %d, %d, %d\n"
"recout: %d, %d, %d, %d\n",
@@ -2030,14 +1392,18 @@ static void dcn10_power_on_fe(
pipe_ctx->plane_res.scl_data.recout.y);
print_rq_dlg_ttu(dc, pipe_ctx);
}
+*/
+ if (dc->config.gpu_vm_support)
+ dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
}
static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
{
+ int i = 0;
struct dpp_grph_csc_adjustment adjust;
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -2045,33 +1411,9 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
- adjust.temperature_matrix[0] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[0];
- adjust.temperature_matrix[1] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[1];
- adjust.temperature_matrix[2] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[2];
- adjust.temperature_matrix[3] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[4];
- adjust.temperature_matrix[4] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[5];
- adjust.temperature_matrix[5] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[6];
- adjust.temperature_matrix[6] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[8];
- adjust.temperature_matrix[7] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[9];
- adjust.temperature_matrix[8] =
- pipe_ctx->stream->
- gamut_remap_matrix.matrix[10];
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
@@ -2082,25 +1424,27 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix)
{
- int i;
- struct out_csc_color_matrix tbl_entry;
-
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
- == true) {
- enum dc_color_space color_space =
- pipe_ctx->stream->output_color_space;
-
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
- tbl_entry.color_space = color_space;
- //tbl_entry.regval = matrix;
- pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
} else {
- pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
+
+static void program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+ program_csc_matrix(pipe_ctx,
+ colorspace,
+ matrix);
+}
+
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
@@ -2130,7 +1474,7 @@ static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool is_rgb_cspace(enum dc_color_space output_color_space)
+bool is_rgb_cspace(enum dc_color_space output_color_space)
{
switch (output_color_space) {
case COLOR_SPACE_SRGB:
@@ -2188,91 +1532,170 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
-static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
- struct vm_system_aperture_param *apt,
- struct dce_hwseq *hws)
+static uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
{
- PHYSICAL_ADDRESS_LOC physical_page_number;
- uint32_t logical_addr_low;
- uint32_t logical_addr_high;
+ int32_t numerator;
+ int32_t divisor = 1 << fractional_bits;
- REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
- REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
+ uint16_t result;
- REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
- LOGICAL_ADDR, &logical_addr_low);
+ uint16_t d = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_abs(
+ arg));
- REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- LOGICAL_ADDR, &logical_addr_high);
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+ numerator = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_mul_int(
+ arg,
+ divisor));
+ else {
+ numerator = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(
+ dal_fixed31_32_from_int(
+ 1LL << integer_bits),
+ dal_fixed31_32_recip(
+ dal_fixed31_32_from_int(
+ divisor))));
+ }
- apt->sys_default.quad_part = physical_page_number.quad_part << 12;
- apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
- apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
+ if (numerator >= 0)
+ result = (uint16_t)numerator;
+ else
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+ if ((result != 0) && dal_fixed31_32_lt(
+ arg, dal_fixed31_32_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
}
-/* Temporary read settings, future will get values from kmd directly */
-static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
- struct vm_context0_param *vm0,
- struct dce_hwseq *hws)
+void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
+ && plane_state->input_csc_color_matrix.enable_adjustment
+ && plane_state->coeff_reduction_factor.value != 0) {
+ bias_and_scale->scale_blue = fixed_point_to_int_frac(
+ dal_fixed31_32_mul(plane_state->coeff_reduction_factor,
+ dal_fixed31_32_from_fraction(256, 255)),
+ 2,
+ 13);
+ bias_and_scale->scale_red = bias_and_scale->scale_blue;
+ bias_and_scale->scale_green = bias_and_scale->scale_blue;
+ } else {
+ bias_and_scale->scale_blue = 0x2000;
+ bias_and_scale->scale_red = 0x2000;
+ bias_and_scale->scale_green = 0x2000;
+ }
+}
+
+static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
{
- PHYSICAL_ADDRESS_LOC fb_base;
- PHYSICAL_ADDRESS_LOC fb_offset;
- uint32_t fb_base_value;
- uint32_t fb_offset_value;
+ struct dc_bias_and_scale bns_params = {0};
- REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
- REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ COLOR_SPACE_YCBCR601_LIMITED);
- REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
- REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
+ //set scale and bias registers
+ build_prescale_params(&bns_params, plane_state);
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+}
- REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
- REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
- REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
- REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
+static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
- REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
+ /* TODO: proper fix once fpga works */
+
+ if (dc->debug.surface_visual_confirm)
+ dcn10_get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ else
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ else
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_alpha = 0xff;
+ blnd_cfg.global_gain = 0xff;
+
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && per_pixel_alpha;
/*
- * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
- * Therefore we need to do
- * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
- * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
*/
- fb_base.quad_part = (uint64_t)fb_base_value << 24;
- fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
- vm0->pte_base.quad_part += fb_base.quad_part;
- vm0->pte_base.quad_part -= fb_offset.quad_part;
+ mpcc_id = hubp->inst;
+
+ /* check if this MPCC is already being used */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+ /* remove MPCC if being used */
+ if (new_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ hubp->inst,
+ mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
}
-static void dcn10_program_pte_vm(struct hubp *hubp,
- enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
- enum dc_rotation_angle rotation,
- struct dce_hwseq *hws)
+static void update_scaler(struct pipe_ctx *pipe_ctx)
{
- struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- struct vm_system_aperture_param apt = { {{ 0 } } };
- struct vm_context0_param vm0 = { { { 0 } } };
+ bool per_pixel_alpha =
+ pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ /* TODO: proper fix once fpga works */
- mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
- mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
-
- hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
- hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
static void update_dchubp_dpp(
@@ -2280,100 +1703,106 @@ static void update_dchubp_dpp(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
union plane_size size = plane_state->plane_size;
- struct mpcc_cfg mpcc_cfg = {0};
- struct pipe_ctx *top_pipe;
- bool per_pixel_alpha = plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
- /* TODO: proper fix once fpga works */
/* depends on DML calculation, DPP clock value may change dynamically */
- enable_dppclk(
- dc->hwseq,
- pipe_ctx->pipe_idx,
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk,
- context->bw.dcn.calc_clk.dppclk_div);
- dc->current_state->bw.dcn.cur_clk.dppclk_div =
- context->bw.dcn.calc_clk.dppclk_div;
- context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
+ /* If request max dpp clk is lower than current dispclk, no need to
+ * divided by 2
+ */
+ if (plane_state->update_flags.bits.full_update) {
+ bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
+ context->bw.dcn.cur_clk.dispclk_khz / 2;
+
+ dpp->funcs->dpp_dppclk_control(
+ dpp,
+ should_divided_by_2,
+ true);
+
+ dc->current_state->bw.dcn.cur_clk.dppclk_khz =
+ should_divided_by_2 ?
+ context->bw.dcn.cur_clk.dispclk_khz / 2 :
+ context->bw.dcn.cur_clk.dispclk_khz;
+ }
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
- REG_UPDATE(DCHUBP_CNTL[pipe_ctx->pipe_idx], HUBP_VTG_SEL, pipe_ctx->stream_res.tg->inst);
+ if (plane_state->update_flags.bits.full_update) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
- hubp->funcs->hubp_setup(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs,
- &pipe_ctx->rq_regs,
- &pipe_ctx->pipe_dlg_param);
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
- if (dc->config.gpu_vm_support)
- dcn10_program_pte_vm(
- pipe_ctx->plane_res.hubp,
- plane_state->format,
- &plane_state->tiling_info,
- plane_state->rotation,
- hws
- );
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.bpp_change)
+ update_dpp(dpp, plane_state);
- dpp->funcs->ipp_setup(dpp,
- plane_state->format,
- EXPANSION_MODE_ZERO);
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.per_pixel_alpha_change)
+ update_mpcc(dc, pipe_ctx);
- mpcc_cfg.dpp_id = hubp->inst;
- mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
- mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
- for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
- mpcc_cfg.z_index++;
- if (dc->debug.surface_visual_confirm)
- dcn10_get_surface_visual_confirm_color(
- pipe_ctx, &mpcc_cfg.black_color);
- else
- color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space,
- &mpcc_cfg.black_color);
- mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
- /* DCN1.0 has output CM before MPC which seems to screw with
- * pre-multiplied alpha.
- */
- mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
- pipe_ctx->stream->output_color_space)
- && per_pixel_alpha;
- hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
- hubp->opp_id = mpcc_cfg.opp_id;
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change) {
+ update_scaler(pipe_ctx);
+ }
- pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
- pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
- /* scaler configuration */
- pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
- pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change) {
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ }
- hubp->funcs->mem_program_viewport(hubp,
- &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+ }
- /*gamut remap*/
- program_gamut_remap(pipe_ctx);
+ if (plane_state->update_flags.bits.full_update) {
+ /*gamut remap*/
+ program_gamut_remap(pipe_ctx);
- program_csc_matrix(pipe_ctx,
- pipe_ctx->stream->output_color_space,
- pipe_ctx->stream->csc_color_matrix.matrix);
+ program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
+ }
+
+ if (plane_state->update_flags.bits.full_update ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change) {
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror);
+ }
- hubp->funcs->hubp_program_surface_config(
- hubp,
- plane_state->format,
- &plane_state->tiling_info,
- &size,
- plane_state->rotation,
- &plane_state->dcc,
- plane_state->horizontal_mirror);
+ hubp->power_gated = false;
dc->hwss.update_plane_addr(dc, pipe_ctx);
@@ -2381,28 +1810,62 @@ static void update_dchubp_dpp(
hubp->funcs->set_blank(hubp, false);
}
+static void dcn10_otg_blank(
+ struct dc *dc,
+ struct stream_resource stream_res,
+ struct dc_stream_state *stream,
+ bool blank)
+{
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (stream_res.tg->funcs->set_blank_color)
+ stream_res.tg->funcs->set_blank_color(
+ stream_res.tg,
+ &black_color);
+
+ if (!blank) {
+ if (stream_res.tg->funcs->set_blank)
+ stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+ if (stream_res.abm)
+ stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
+ } else if (blank) {
+ if (stream_res.abm)
+ stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
+ if (stream_res.tg->funcs->set_blank)
+ stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+ }
+}
+
+static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+{
+ struct fixed31_32 multiplier = dal_fixed31_32_from_fraction(
+ pipe_ctx->plane_state->sdr_white_level, 80);
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (pipe_ctx->plane_state->sdr_white_level > 80)
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
+ pipe_ctx->plane_res.dpp, hw_mult);
+}
static void program_all_pipe_in_tree(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
-
if (pipe_ctx->top_pipe == NULL) {
-
- /* lock otg_master_update to process all pipes associated with
- * this OTG. this is done only one time.
- */
- /* watermark is for all pipes */
- program_watermarks(dc->hwseq, &context->bw.dcn.watermarks, ref_clk_mhz);
-
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after watermark update */
- verify_allow_pstate_change_high(dc->hwseq);
- }
-
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
@@ -2412,50 +1875,37 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+
+ dcn10_otg_blank(dc, pipe_ctx->stream_res,
+ pipe_ctx->stream, blank);
}
if (pipe_ctx->plane_state != NULL) {
- struct dc_cursor_position position = { 0 };
- struct pipe_ctx *cur_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
- dcn10_power_on_fe(dc, pipe_ctx, context);
-
- /* temporary dcn1 wa:
- * watermark update requires toggle after a/b/c/d sets are programmed
- * if hubp is pg then wm value doesn't get properaged to hubp
- * need to toggle after ungate to ensure wm gets to hubp.
- *
- * final solution: we need to get SMU to do the toggle as
- * DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST is owned by SMU we should have
- * both driver and fw accessing same register
- */
- toggle_watermark_change_req(dc->hwseq);
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dcn10_enable_plane(dc, pipe_ctx, context);
update_dchubp_dpp(dc, pipe_ctx, context);
- /* TODO: this is a hack w/a for switching from mpo to pipe split */
- dc_stream_set_cursor_position(pipe_ctx->stream, &position);
-
- dc_stream_set_cursor_attributes(pipe_ctx->stream,
- &pipe_ctx->stream->cursor_attributes);
+ set_hdr_multiplier(pipe_ctx);
- if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
- dc->hwss.set_input_transfer_func(
- pipe_ctx, pipe_ctx->plane_state);
- dc->hwss.set_output_transfer_func(
- pipe_ctx, pipe_ctx->stream);
- }
- }
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
- if (dc->debug.sanity_checks) {
- /* pstate stuck check after each pipe is programmed */
- verify_allow_pstate_change_high(dc->hwseq);
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for full update.
+ * TODO: This can be further optimized/cleaned up
+ * Always call this for now since it does memcmp inside before
+ * doing heavy calculation and programming
+ */
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
}
- if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
+ if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+ }
}
static void dcn10_pplib_apply_display_requirements(
@@ -2464,16 +1914,10 @@ static void dcn10_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->all_displays_in_sync = false;/*todo*/
- pp_display_cfg->nb_pstate_switch_disable = false;
pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
- context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -2488,7 +1932,6 @@ static void dcn10_pplib_apply_display_requirements(
static void optimize_shared_resources(struct dc *dc)
{
if (dc->current_state->stream_count == 0) {
- apply_DEGVIDCN10_253_wa(dc);
/* S0i2 message */
dcn10_pplib_apply_display_requirements(dc, dc->current_state);
}
@@ -2499,67 +1942,79 @@ static void optimize_shared_resources(struct dc *dc)
static void ready_shared_resources(struct dc *dc, struct dc_state *context)
{
- if (dc->current_state->stream_count == 0 &&
- !dc->debug.disable_stutter)
- undo_DEGVIDCN10_253_wa(dc);
-
/* S0i2 message */
if (dc->current_state->stream_count == 0 &&
context->stream_count != 0)
dcn10_pplib_apply_display_requirements(dc, context);
}
+static struct pipe_ctx *find_top_pipe_for_stream(
+ struct dc *dc,
+ struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ if (!pipe_ctx->top_pipe)
+ return pipe_ctx;
+ }
+ return NULL;
+}
+
static void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context)
{
- int i, be_idx;
+ int i;
+ struct timing_generator *tg;
+ bool removed_pipe[4] = { false };
+ unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+ bool program_water_mark = false;
+ struct dc_context *ctx = dc->ctx;
+ struct pipe_ctx *top_pipe_to_program =
+ find_top_pipe_for_stream(dc, context, stream);
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+ if (!top_pipe_to_program)
+ return;
- be_idx = -1;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (stream == context->res_ctx.pipe_ctx[i].stream) {
- be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
- break;
- }
- }
+ tg = top_pipe_to_program->stream_res.tg;
- ASSERT(be_idx != -1);
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
- for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (old_pipe_ctx->stream_res.tg && old_pipe_ctx->stream_res.tg->inst == be_idx) {
- old_pipe_ctx->stream_res.tg->funcs->set_blank(old_pipe_ctx->stream_res.tg, true);
- dcn10_power_down_fe(dc, old_pipe_ctx->pipe_idx);
- }
- }
- return;
+ /* OTG blank before remove all front end */
+ dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
}
- /* reset unused mpcc */
+ /* Disconnect unused mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
- continue;
-
/*
* Powergate reused pipes that are not powergated
* fairly hacky right now, using opp_id as indicator
+ * TODO: After move dc_post to dc_update, this will
+ * be removed.
*/
-
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (pipe_ctx->plane_res.hubp->opp_id != 0xf && pipe_ctx->stream_res.tg->inst == be_idx) {
- dcn10_power_down_fe(dc, pipe_ctx->pipe_idx);
+ if (old_pipe_ctx->stream_res.tg == tg &&
+ old_pipe_ctx->plane_res.hubp &&
+ old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+ dcn10_disable_plane(dc, old_pipe_ctx);
/*
* power down fe will unlock when calling reset, need
* to lock it back here. Messy, need rework.
@@ -2568,55 +2023,57 @@ static void dcn10_apply_ctx_for_surface(
}
}
+ if (!pipe_ctx->plane_state &&
+ old_pipe_ctx->plane_state &&
+ old_pipe_ctx->stream_res.tg == tg) {
- if ((!pipe_ctx->plane_state && old_pipe_ctx->plane_state)
- || (!pipe_ctx->stream && old_pipe_ctx->stream)) {
- if (old_pipe_ctx->stream_res.tg->inst != be_idx)
- continue;
-
- if (!old_pipe_ctx->top_pipe) {
- ASSERT(0);
- continue;
- }
-
- /* reset mpc */
- dc->res_pool->mpc->funcs->remove(
- dc->res_pool->mpc,
- &(old_pipe_ctx->stream_res.opp->mpc_tree),
- old_pipe_ctx->stream_res.opp->inst,
- old_pipe_ctx->pipe_idx);
- old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[old_pipe_ctx->plane_res.hubp->mpcc_id] = true;
-
- /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
- "[debug_mpo: apply_ctx disconnect pending on mpcc %d]\n",
- old_pipe_ctx->mpcc->inst);*/
-
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
-
- old_pipe_ctx->top_pipe = NULL;
- old_pipe_ctx->bottom_pipe = NULL;
- old_pipe_ctx->plane_state = NULL;
- old_pipe_ctx->stream = NULL;
+ plane_atomic_disconnect(dc, old_pipe_ctx);
+ removed_pipe[i] = true;
- dm_logger_write(dc->ctx->logger, LOG_DC,
+ DC_LOG_DC(
"Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
}
}
+ if (num_planes > 0)
+ program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
+ if (num_planes == 0)
+ false_optc_underflow_wa(dc, stream, tg);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream != stream)
- continue;
+ if (pipe_ctx->stream == stream &&
+ pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.full_update)
+ program_water_mark = true;
- /* looking for top pipe to program */
- if (!pipe_ctx->top_pipe)
- program_all_pipe_in_tree(dc, pipe_ctx, context);
+ if (removed_pipe[i])
+ dcn10_disable_plane(dc, old_pipe_ctx);
}
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ if (program_water_mark) {
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ dcn10_verify_allow_pstate_change_high(dc);
+ }
+
+ /* watermark is for all pipes */
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks, ref_clk_mhz);
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ dcn10_verify_allow_pstate_change_high(dc);
+ }
+ }
+/* DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\n============== Watermark parameters ==============\n"
"a.urgent_ns: %d \n"
"a.cstate_enter_plus_exit: %d \n"
@@ -2639,7 +2096,7 @@ static void dcn10_apply_ctx_for_surface(
context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
context->bw.dcn.watermarks.b.pte_meta_urgent_ns
);
- dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
"\nc.urgent_ns: %d \n"
"c.cstate_enter_plus_exit: %d \n"
"c.cstate_exit: %d \n"
@@ -2662,9 +2119,102 @@ static void dcn10_apply_ctx_for_surface(
context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
context->bw.dcn.watermarks.d.pte_meta_urgent_ns
);
+*/
+}
- if (dc->debug.sanity_checks)
- verify_allow_pstate_change_high(dc->hwseq);
+static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
+{
+ return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+{
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.calc_clk.dppclk_khz;
+ bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.cur_clk.dispclk_khz;
+ int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
+ context->bw.dcn.cur_clk.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ } else {
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return context->bw.dcn.calc_clk.dispclk_khz;
+ }
+
+ return disp_clk_threshold;
+}
+
+static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+ context->bw.dcn.calc_clk.dppclk_khz;
+
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+ /* set disp clk to dpp clk threshold */
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ }
+
+ context->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ context->bw.dcn.cur_clk.dppclk_khz =
+ context->bw.dcn.calc_clk.dppclk_khz;
+ context->bw.dcn.cur_clk.max_supported_dppclk_khz =
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz;
}
static void dcn10_set_bandwidth(
@@ -2678,37 +2228,39 @@ static void dcn10_set_bandwidth(
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
return;
- if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
- dc->res_pool->display_clock->funcs->set_clock(
- dc->res_pool->display_clock,
- context->bw.dcn.calc_clk.dispclk_khz);
- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- }
- if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
+ context->bw.dcn.cur_clk.dcfclk_khz =
+ context->bw.dcn.calc_clk.dcfclk_khz;
smu_req.hard_min_dcefclk_khz =
context->bw.dcn.calc_clk.dcfclk_khz;
}
- if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
- }
- if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
- > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
}
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.fclk_khz,
+ dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
+ context->bw.dcn.cur_clk.fclk_khz =
+ context->bw.dcn.calc_clk.fclk_khz;
+ smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+ }
+
smu_req.display_count = context->stream_count;
if (pp_smu->set_display_requirement)
@@ -2716,25 +2268,21 @@ static void dcn10_set_bandwidth(
*smu_req_cur = smu_req;
- /* Decrease in freq is increase in period so opposite comparison for dram_ccm */
- if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
- < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
- dc->current_state->bw.dcn.calc_clk.dram_ccm_us =
- context->bw.dcn.calc_clk.dram_ccm_us;
- context->bw.dcn.cur_clk.dram_ccm_us =
- context->bw.dcn.calc_clk.dram_ccm_us;
- }
- if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
- < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
- dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
- context->bw.dcn.calc_clk.min_active_dram_ccm_us;
- context->bw.dcn.cur_clk.min_active_dram_ccm_us =
- context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dispclk_khz,
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+
+ ramp_up_dispclk_with_dpp(dc, context);
}
+
dcn10_pplib_apply_display_requirements(dc, context);
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
/* need to fix this function. not doing the right thing here */
@@ -2779,6 +2327,8 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
value |= 0x80;
if (events->cursor_update)
value |= 0x2;
+ if (events->force_trigger)
+ value |= 0x1;
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
@@ -2838,10 +2388,10 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
dcn10_config_stereo_parameters(stream, &flags);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1 ? true:false,
- stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+ &stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
pipe_ctx->stream_res.tg,
@@ -2851,33 +2401,47 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
return;
}
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
static void dcn10_wait_for_mpcc_disconnect(
struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx)
{
- int i;
+ int mpcc_inst;
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
if (!pipe_ctx->stream_res.opp)
return;
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i]) {
- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, i);
- pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i] = false;
- res_pool->hubps[i]->funcs->set_blank(res_pool->hubps[i], true);
- /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+ /*DC_LOG_ERROR(dc->ctx->logger,
"[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
i);*/
}
}
if (dc->debug.sanity_checks) {
- verify_allow_pstate_change_high(dc->hwseq);
+ dcn10_verify_allow_pstate_change_high(dc);
}
}
@@ -2891,7 +2455,7 @@ static bool dcn10_dummy_display_power_gating(
return true;
}
-void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2911,7 +2475,45 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
}
}
+static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+{
+ if (hws->ctx->dc->res_pool->hubbub != NULL)
+ hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+}
+
+static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+ dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+}
+
+static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes->color_format);
+}
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
@@ -2928,13 +2530,14 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.power_down = dce110_power_down,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dce110_unblank_stream,
+ .blank_stream = dce110_blank_stream,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
- .power_down_front_end = dcn10_power_down_fe,
- .power_on_front_end = dcn10_power_on_fe,
+ .disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
.set_bandwidth = dcn10_set_bandwidth,
.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
@@ -2948,8 +2551,13 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.ready_shared_resources = ready_shared_resources,
.optimize_shared_resources = optimize_shared_resources,
+ .pplib_apply_display_requirements =
+ dcn10_pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
- .edp_power_control = hwss_edp_power_control
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index ca53dc1..6c526b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -35,4 +35,6 @@ extern void fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
+bool is_rgb_cspace(enum dc_color_space output_color_space);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index d7b5bd2..819b749 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -33,7 +33,6 @@
#define IPP_REG_LIST_DCN(id) \
SRI(FORMAT_CONTROL, CNVC_CFG, id), \
- SRI(DPP_CONTROL, DPP_TOP, id), \
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
@@ -130,7 +129,6 @@ struct dcn10_ipp_mask {
};
struct dcn10_ipp_registers {
- uint32_t DPP_CONTROL;
uint32_t CURSOR_SETTINS;
uint32_t CURSOR_SETTINGS;
uint32_t CNVC_SURFACE_PIXEL_FORMAT;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 76573e1..179890b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -25,8 +25,6 @@
#include "reg_helper.h"
#include "dcn10_mpc.h"
-#include "dc.h"
-#include "mem_input.h"
#define REG(reg)\
mpc10->mpc_regs->reg
@@ -38,17 +36,13 @@
#define FN(reg_name, field_name) \
mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
-#define MODE_TOP_ONLY 1
-#define MODE_BLEND 3
-#define BLND_PP_ALPHA 0
-#define BLND_GLOBAL_ALPHA 2
-
-static void mpc10_set_bg_color(
- struct dcn10_mpc *mpc10,
+void mpc1_set_bg_color(struct mpc *mpc,
struct tg_color *bg_color,
- int id)
+ int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
@@ -57,15 +51,47 @@ static void mpc10_set_bg_color(
uint32_t bg_g_y = bg_color->color_g_y << 2;
uint32_t bg_b_cb = bg_color->color_b_cb << 2;
- REG_SET(MPCC_BG_R_CR[id], 0,
+ REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
- REG_SET(MPCC_BG_G_Y[id], 0,
+ REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
- REG_SET(MPCC_BG_B_CB[id], 0,
+ REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
-void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+static void mpc1_update_blending(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
+ MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
+ MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
+
+ mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
+}
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id],
+ MPCC_SM_EN, sm_cfg->enable,
+ MPCC_SM_MODE, sm_cfg->sm_mode,
+ MPCC_SM_FRAME_ALT, sm_cfg->frame_alt,
+ MPCC_SM_FIELD_ALT, sm_cfg->field_alt,
+ MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity,
+ MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity);
+}
+void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
@@ -75,39 +101,52 @@ void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
1, 100000);
}
-static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
- int i;
- int last_free_mpcc_id = -1;
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- for (i = 0; i < mpc10->num_mpcc; i++) {
- uint32_t is_idle = 0;
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ return &(mpc->mpcc_array[mpcc_id]);
+}
- if (mpc10->mpcc_in_use_mask & 1 << i)
- continue;
+struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+{
+ struct mpcc *tmp_mpcc = tree->opp_list;
- last_free_mpcc_id = i;
- REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
- if (is_idle)
- return i;
+ while (tmp_mpcc != NULL) {
+ if (tmp_mpcc->dpp_id == dpp_id)
+ return tmp_mpcc;
+ tmp_mpcc = tmp_mpcc->mpcc_bot;
}
+ return NULL;
+}
- /* This assert should never trigger, we have mpcc leak if it does */
- ASSERT(last_free_mpcc_id != -1);
-
- mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
- return last_free_mpcc_id;
+bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ unsigned int top_sel;
+ unsigned int opp_id;
+ unsigned int idle;
+
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
+ if (top_sel == 0xf && opp_id == 0xf && idle)
+ return true;
+ else
+ return false;
}
-static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle;
- REG_GET(MPCC_TOP_SEL[id],
+ REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
if (top_sel == 0xf) {
- REG_GET_2(MPCC_STATUS[id],
+ REG_GET_2(MPCC_STATUS[mpcc_id],
MPCC_BUSY, &mpc_busy,
MPCC_IDLE, &mpc_idle);
@@ -116,230 +155,269 @@ static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int i
}
}
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id)
-{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- /* find z_idx for the dpp to be removed */
- for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
- if (tree_cfg->dpp[z_idx] == dpp_id)
- break;
-
- if (z_idx == tree_cfg->num_pipes) {
- /* In case of resume from S3/S4, remove mpcc from bios left over */
- REG_SET(MPCC_OPP_ID[dpp_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[dpp_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[dpp_id], 0,
- MPCC_BOT_SEL, 0xf);
- return;
- }
-
- mpcc_id = tree_cfg->mpcc[z_idx];
-
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
-
- if (z_idx > 0) {
- int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
-
- if (z_idx + 1 < tree_cfg->num_pipes)
- /* mpcc to be removed is in the middle of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
- else {
- /* mpcc to be removed is at the bottom of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
- REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
- MPCC_MODE, MODE_TOP_ONLY);
- }
- } else if (tree_cfg->num_pipes > 1)
- /* mpcc to be removed is at the top of the tree */
- REG_SET(MUX[opp_id], 0,
- MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
- else
- /* mpcc to be removed is the only one in the tree */
- REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
-
- /* mark this mpcc as not in use */
- mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
- tree_cfg->num_pipes--;
- for (; z_idx < tree_cfg->num_pipes; z_idx++) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
- }
- tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
- tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
-}
-
-static void mpc10_add_to_tree_cfg(
+/*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+struct mpcc *mpc1_insert_plane(
struct mpc *mpc,
- struct mpcc_cfg *cfg,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_mode = MODE_TOP_ONLY;
- int position = cfg->z_index;
- struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
- int z_idx;
+ struct mpcc *new_mpcc = NULL;
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, cfg->opp_id);
+ /* sanity check parameters */
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, cfg->dpp_id);
+ if (insert_above_mpcc) {
+ /* check insert_above_mpcc exist in tree->opp_list */
+ struct mpcc *temp_mpcc = tree->opp_list;
- if (position == 0) {
- /* idle dpp/mpcc is added to the top layer of tree */
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc == NULL)
+ return NULL;
+ }
- if (tree_cfg->num_pipes > 0) {
- /* get instance of previous top mpcc */
- int prev_top_mpcc_id = tree_cfg->mpcc[0];
+ /* Get and update MPCC struct parameters */
+ new_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ new_mpcc->dpp_id = dpp_id;
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, prev_top_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* program mux and MPCC_MODE */
+ if (insert_above_mpcc) {
+ new_mpcc->mpcc_bot = insert_above_mpcc;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
+ } else {
+ new_mpcc->mpcc_bot = NULL;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
+
+ /* update mpc tree mux setting */
+ if (tree->opp_list == insert_above_mpcc) {
+ /* insert the toppest mpcc */
+ tree->opp_list = new_mpcc;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id);
+ } else {
+ /* find insert position */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) {
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id);
+ temp_mpcc->mpcc_bot = new_mpcc;
+ if (!insert_above_mpcc)
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
}
+ }
- /* opp will get new output. from new added mpcc */
- REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
-
- } else if (position == tree_cfg->num_pipes) {
- /* idle dpp/mpcc is added to the bottom layer of tree */
-
- /* get instance of previous bottom mpcc, set to middle layer */
- int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
-
- REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id become new bottom mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
+ /* update the blending configuration */
+ new_mpcc->blnd_cfg = *blnd_cfg;
+ mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
- } else {
- /* idle dpp/mpcc is added to middle of tree */
- int above_mpcc_id = tree_cfg->mpcc[position - 1];
- int below_mpcc_id = tree_cfg->mpcc[position];
-
- /* mpcc above new mpcc_id has new bottom mux*/
- REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id bottom mux is from below mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, below_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* update the stereo mix settings, if provided */
+ if (sm_cfg != NULL) {
+ new_mpcc->sm_cfg = *sm_cfg;
+ mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id);
}
- REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
- MPCC_MODE, mpcc_mode,
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
- MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
- /* update mpc_tree_cfg with new mpcc */
- for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
- }
- tree_cfg->dpp[position] = cfg->dpp_id;
- tree_cfg->mpcc[position] = mpcc_id;
- tree_cfg->num_pipes++;
+ return new_mpcc;
}
-int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+/*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc_to_remove)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- ASSERT(cfg->z_index < mpc10->num_mpcc);
-
- /* check in dpp already exists in mpc tree */
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
- if (z_idx == cfg->tree_cfg->num_pipes) {
- ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
- mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
-
- /*
- * TODO: remove hack
- * Note: currently there is a bug in init_hw such that
- * on resume from hibernate, BIOS sets up MPCC0, and
- * we do mpcc_remove but the mpcc cannot go to idle
- * after remove. This cause us to pick mpcc1 here,
- * which causes a pstate hang for yet unknown reason.
- */
- mpcc_id = cfg->dpp_id;
- /* end hack*/
-
- ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
-
- if (mpc->ctx->dc->debug.sanity_checks)
- mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ bool found = false;
+ int mpcc_id = mpcc_to_remove->mpcc_id;
+
+ if (tree->opp_list == mpcc_to_remove) {
+ found = true;
+ /* remove MPCC from top of tree */
+ if (mpcc_to_remove->mpcc_bot) {
+ /* set the next MPCC in list to be the top MPCC */
+ tree->opp_list = mpcc_to_remove->mpcc_bot;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id);
+ } else {
+ /* there are no other MPCC is list */
+ tree->opp_list = NULL;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf);
+ }
} else {
- ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
- mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ /* find mpcc to remove MPCC list */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+
+ if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) {
+ found = true;
+ temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot;
+ if (mpcc_to_remove->mpcc_bot) {
+ /* remove MPCC in middle of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id);
+ } else {
+ /* remove MPCC from bottom of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ }
}
- /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
- mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
-
- /* set background color */
- mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
+ if (found) {
+ /* turn off MPCC mux registers */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- /* mark this mpcc as in use */
- mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ mpcc_to_remove->dpp_id = 0xf;
+ mpcc_to_remove->mpcc_bot = NULL;
+ } else {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ }
+}
- return mpcc_id;
+static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->sm_cfg.enable = false;
}
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg)
+/*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+void mpc1_mpc_init(struct mpc *mpc)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int mpcc_id;
+ int opp_id;
+
+ mpc10->mpcc_in_use_mask = 0;
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- /* find z_idx for the dpp that requires blending mode update*/
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
+ mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
+ }
- ASSERT(z_idx < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
+ }
+}
- REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ unsigned int opp_id;
+ unsigned int top_sel;
+ unsigned int bot_sel;
+ unsigned int out_mux;
+ struct mpcc *mpcc;
+ int mpcc_id;
+ int bot_mpcc_id;
+
+ REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux);
+
+ if (out_mux != 0xf) {
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel);
+
+ if (bot_sel == mpcc_id)
+ bot_sel = 0xf;
+
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ mpcc->dpp_id = top_sel;
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ if (out_mux == mpcc_id)
+ tree->opp_list = mpcc;
+ if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) {
+ bot_mpcc_id = bot_sel;
+ REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel);
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id);
+
+ mpcc->mpcc_bot = mpcc_bottom;
+ }
+ }
+ }
+ }
+ }
}
const struct mpc_funcs dcn10_mpc_funcs = {
- .add = mpc10_mpcc_add,
- .remove = mpc10_mpcc_remove,
- .wait_for_idle = mpc10_assert_idle_mpcc,
- .update_blend_mode = mpc10_update_blend_mode,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .wait_for_idle = mpc1_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .update_blending = mpc1_update_blending,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
@@ -349,6 +427,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc)
{
+ int i;
+
mpc10->base.ctx = ctx;
mpc10->base.funcs = &dcn10_mpc_funcs;
@@ -359,5 +439,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
mpc10->mpcc_in_use_mask = 0;
mpc10->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 683ce4a..267a299 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -30,9 +30,6 @@
#define TO_DCN10_MPC(mpc_base) \
container_of(mpc_base, struct dcn10_mpc, base)
-#define MAX_MPCC 6
-#define MAX_OPP 6
-
#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MPCC_TOP_SEL, MPCC, inst),\
SRII(MPCC_BOT_SEL, MPCC, inst),\
@@ -42,7 +39,8 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_BG_B_CB, MPCC, inst)
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_SM_CONTROL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MUX, MPC_OUT, inst)
@@ -56,6 +54,7 @@
uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
uint32_t MUX[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -65,12 +64,20 @@
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_ALPHA, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_GAIN, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_EN, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_MODE, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FRAME_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
#define MPC_REG_FIELD_LIST(type) \
@@ -80,12 +87,20 @@
type MPCC_ALPHA_BLND_MODE;\
type MPCC_ALPHA_MULTIPLIED_MODE;\
type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_GLOBAL_ALPHA;\
+ type MPCC_GLOBAL_GAIN;\
type MPCC_IDLE;\
type MPCC_BUSY;\
type MPCC_OPP_ID;\
type MPCC_BG_G_Y;\
type MPCC_BG_R_CR;\
type MPCC_BG_B_CB;\
+ type MPCC_SM_EN;\
+ type MPCC_SM_MODE;\
+ type MPCC_SM_FRAME_ALT;\
+ type MPCC_SM_FIELD_ALT;\
+ type MPCC_SM_FORCE_NEXT_FRAME_POL;\
+ type MPCC_SM_FORCE_NEXT_TOP_POL;\
type MPC_OUT_MUX;
struct dcn_mpc_registers {
@@ -117,22 +132,55 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc);
-int mpc10_mpcc_add(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
-
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id);
-
-void mpc10_assert_idle_mpcc(
- struct mpc *mpc,
- int id);
-
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
+struct mpcc *mpc1_insert_plane(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
+
+void mpc1_mpc_init(
+ struct mpc *mpc);
+
+void mpc1_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc1_set_bg_color(
+ struct mpc *mpc,
+ struct tg_color *bg_color,
+ int id);
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id);
+
+bool mpc1_is_mpcc_idle(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_assert_mpcc_idle_before_connect(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
+
+struct mpcc *mpc1_get_mpcc(
+ struct mpc *mpc,
+ int mpcc_id);
+
+struct mpcc *mpc1_get_mpcc_for_dpp(
+ struct mpc_tree *tree,
+ int dpp_id);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index a136f70..77a1a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -38,7 +38,6 @@
oppn10->base.ctx
-
/************* FORMATTER ************/
/**
@@ -47,7 +46,7 @@
* 2) enable truncation
* 3) HW remove 12bit FMT support for DCE11 power saving reason.
*/
-static void set_truncation(
+static void opp1_set_truncation(
struct dcn10_opp *oppn10,
const struct bit_depth_reduction_params *params)
{
@@ -57,7 +56,7 @@ static void set_truncation(
FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE);
}
-static void set_spatial_dither(
+static void opp1_set_spatial_dither(
struct dcn10_opp *oppn10,
const struct bit_depth_reduction_params *params)
{
@@ -136,14 +135,14 @@ static void set_spatial_dither(
FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
}
-static void oppn10_program_bit_depth_reduction(
+void opp1_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
- set_truncation(oppn10, params);
- set_spatial_dither(oppn10, params);
+ opp1_set_truncation(oppn10, params);
+ opp1_set_spatial_dither(oppn10, params);
/* TODO
* set_temporal_dither(oppn10, params);
*/
@@ -156,7 +155,7 @@ static void oppn10_program_bit_depth_reduction(
* 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
* 1: YCbCr 4:2:2
*/
-static void set_pixel_encoding(
+static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -186,7 +185,7 @@ static void set_pixel_encoding(
* 7 for programable
* 2) Enable clamp if Limited range requested
*/
-static void opp_set_clamping(
+static void opp1_set_clamping(
struct dcn10_opp *oppn10,
const struct clamping_and_pixel_encoding_params *params)
{
@@ -224,7 +223,7 @@ static void opp_set_clamping(
}
-static void oppn10_set_dyn_expansion(
+void opp1_set_dyn_expansion(
struct output_pixel_processor *opp,
enum dc_color_space color_sp,
enum dc_color_depth color_dpth,
@@ -264,17 +263,17 @@ static void oppn10_set_dyn_expansion(
}
}
-static void opp_program_clamping_and_pixel_encoding(
+static void opp1_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
- opp_set_clamping(oppn10, params);
- set_pixel_encoding(oppn10, params);
+ opp1_set_clamping(oppn10, params);
+ opp1_set_pixel_encoding(oppn10, params);
}
-static void oppn10_program_fmt(
+void opp1_program_fmt(
struct output_pixel_processor *opp,
struct bit_depth_reduction_params *fmt_bit_depth,
struct clamping_and_pixel_encoding_params *clamping)
@@ -286,44 +285,113 @@ static void oppn10_program_fmt(
/* dithering is affected by <CrtcSourceSelect>, hence should be
* programmed afterwards */
- oppn10_program_bit_depth_reduction(
+ opp1_program_bit_depth_reduction(
opp,
fmt_bit_depth);
- opp_program_clamping_and_pixel_encoding(
+ opp1_program_clamping_and_pixel_encoding(
opp,
clamping);
return;
}
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+ /* TODO: confirm computation of space2_size */
+ uint32_t space2_size = timing->v_total - timing->v_addressable;
+
+ if (!enable) {
+ active_width = 0;
+ space1_size = 0;
+ space2_size = 0;
+ }
+
+ /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width);
+
+ /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers
+ * In 3D progressive frames, Vactive space happens only in between the 2 frames,
+ * so only need to program OPPBUF_3D_VACT_SPACE1_SIZE
+ * In 3D alternative frames, left and right frames, top and bottom field.
+ */
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE)
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size);
+ else
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+
+ /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */
+ /*
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_DUMMY_DATA_R, data_r);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_G, data_g);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_B, _data_b);
+ */
+}
+
+void opp1_program_oppbuf(
+ struct output_pixel_processor *opp,
+ struct oppbuf_params *oppbuf)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+ /* Program the oppbuf active width to be the frame width from mpc */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width);
-static void oppn10_set_stereo_polarity(
- struct output_pixel_processor *opp,
- bool enable, bool rightEyePolarity)
+ /* Specifies the number of segments in multi-segment mode (DP-MSO operation)
+ * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel.
+ * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel.
+ * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes.
+ * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation);
+
+ /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num);
+
+ /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported).
+ * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
+
+}
+
+void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+ uint32_t regval = enable ? 1 : 0;
- REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+ REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
-static void dcn10_opp_destroy(struct output_pixel_processor **opp)
+void opp1_destroy(struct output_pixel_processor **opp)
{
kfree(TO_DCN10_OPP(*opp));
*opp = NULL;
}
static struct opp_funcs dcn10_opp_funcs = {
- .opp_set_dyn_expansion = oppn10_set_dyn_expansion,
- .opp_program_fmt = oppn10_program_fmt,
- .opp_program_bit_depth_reduction = oppn10_program_bit_depth_reduction,
- .opp_set_stereo_polarity = oppn10_set_stereo_polarity,
- .opp_destroy = dcn10_opp_destroy
+ .opp_set_dyn_expansion = opp1_set_dyn_expansion,
+ .opp_program_fmt = opp1_program_fmt,
+ .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
+ .opp_program_stereo = opp1_program_stereo,
+ .opp_pipe_clock_control = opp1_pipe_clock_control,
+ .opp_destroy = opp1_destroy
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
@@ -333,19 +401,12 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
const struct dcn10_opp_shift *opp_shift,
const struct dcn10_opp_mask *opp_mask)
{
- int i;
+
oppn10->base.ctx = ctx;
oppn10->base.inst = inst;
oppn10->base.funcs = &dcn10_opp_funcs;
- oppn10->base.mpc_tree.dpp[0] = inst;
- oppn10->base.mpc_tree.mpcc[0] = inst;
- oppn10->base.mpc_tree.num_pipes = 1;
- for (i = 0; i < MAX_PIPES; i++)
- oppn10->base.mpcc_disconnect_pending[i] = false;
-
oppn10->regs = regs;
oppn10->opp_shift = opp_shift;
oppn10->opp_mask = opp_mask;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 790ce60..0f10ade 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -41,11 +41,30 @@
SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
SRI(FMT_CLAMP_CNTL, FMT, id), \
SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
- SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI(OPPBUF_CONTROL, OPPBUF, id),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
+ SRI(OPP_PIPE_CONTROL, OPP_PIPE, id)
#define OPP_REG_LIST_DCN10(id) \
OPP_REG_LIST_DCN(id)
+#define OPP_COMMON_REG_VARIABLE_LIST \
+ uint32_t FMT_BIT_DEPTH_CONTROL; \
+ uint32_t FMT_CONTROL; \
+ uint32_t FMT_DITHER_RAND_R_SEED; \
+ uint32_t FMT_DITHER_RAND_G_SEED; \
+ uint32_t FMT_DITHER_RAND_B_SEED; \
+ uint32_t FMT_CLAMP_CNTL; \
+ uint32_t FMT_DYNAMIC_EXP_CNTL; \
+ uint32_t FMT_MAP420_MEMORY_CONTROL; \
+ uint32_t OPPBUF_CONTROL; \
+ uint32_t OPPBUF_CONTROL1; \
+ uint32_t OPPBUF_3D_PARAMETERS_0; \
+ uint32_t OPPBUF_3D_PARAMETERS_1; \
+ uint32_t OPP_PIPE_CONTROL
+
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \
@@ -68,46 +87,19 @@
OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
- OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh), \
+ OPP_SF(OPP_PIPE0_OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh)
#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
- OPP_MASK_SH_LIST_DCN(mask_sh)
+ OPP_MASK_SH_LIST_DCN(mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh)
#define OPP_DCN10_REG_FIELD_LIST(type) \
- type DPG_EN; \
- type DPG_MODE; \
- type DPG_VRES; \
- type DPG_HRES; \
- type DPG_COLOUR0_R_CR; \
- type DPG_COLOUR1_R_CR; \
- type DPG_COLOUR0_B_CB; \
- type DPG_COLOUR1_B_CB; \
- type DPG_COLOUR0_G_Y; \
- type DPG_COLOUR1_G_Y; \
- type CM_OCSC_C11; \
- type CM_OCSC_C12; \
- type CM_OCSC_C13; \
- type CM_OCSC_C14; \
- type CM_OCSC_C21; \
- type CM_OCSC_C22; \
- type CM_OCSC_C23; \
- type CM_OCSC_C24; \
- type CM_OCSC_C31; \
- type CM_OCSC_C32; \
- type CM_OCSC_C33; \
- type CM_OCSC_C34; \
- type CM_COMB_C11; \
- type CM_COMB_C12; \
- type CM_COMB_C13; \
- type CM_COMB_C14; \
- type CM_COMB_C21; \
- type CM_COMB_C22; \
- type CM_COMB_C23; \
- type CM_COMB_C24; \
- type CM_COMB_C31; \
- type CM_COMB_C32; \
- type CM_COMB_C33; \
- type CM_COMB_C34; \
type FMT_TRUNCATE_EN; \
type FMT_TRUNCATE_DEPTH; \
type FMT_TRUNCATE_MODE; \
@@ -129,7 +121,19 @@
type FMT_DYNAMIC_EXP_EN; \
type FMT_DYNAMIC_EXP_MODE; \
type FMT_MAP420MEM_PWR_FORCE; \
- type FMT_STEREOSYNC_OVERRIDE
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_PIXEL_REPETITION;\
+ type OPPBUF_DISPLAY_SEGMENTATION;\
+ type OPPBUF_OVERLAP_PIXEL_NUM;\
+ type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \
+ type OPPBUF_3D_VACT_SPACE1_SIZE; \
+ type OPPBUF_3D_VACT_SPACE2_SIZE; \
+ type OPP_PIPE_CLOCK_EN
+
+struct dcn10_opp_registers {
+ OPP_COMMON_REG_VARIABLE_LIST;
+};
struct dcn10_opp_shift {
OPP_DCN10_REG_FIELD_LIST(uint8_t);
@@ -139,33 +143,6 @@ struct dcn10_opp_mask {
OPP_DCN10_REG_FIELD_LIST(uint32_t);
};
-struct dcn10_opp_registers {
- uint32_t DPG_CONTROL;
- uint32_t DPG_COLOUR_B_CB;
- uint32_t DPG_COLOUR_G_Y;
- uint32_t DPG_COLOUR_R_CR;
- uint32_t CM_OCSC_C11_C12;
- uint32_t CM_OCSC_C13_C14;
- uint32_t CM_OCSC_C21_C22;
- uint32_t CM_OCSC_C23_C24;
- uint32_t CM_OCSC_C31_C32;
- uint32_t CM_OCSC_C33_C34;
- uint32_t CM_COMB_C11_C12;
- uint32_t CM_COMB_C13_C14;
- uint32_t CM_COMB_C21_C22;
- uint32_t CM_COMB_C23_C24;
- uint32_t CM_COMB_C31_C32;
- uint32_t CM_COMB_C33_C34;
- uint32_t FMT_BIT_DEPTH_CONTROL;
- uint32_t FMT_CONTROL;
- uint32_t FMT_DITHER_RAND_R_SEED;
- uint32_t FMT_DITHER_RAND_G_SEED;
- uint32_t FMT_DITHER_RAND_B_SEED;
- uint32_t FMT_CLAMP_CNTL;
- uint32_t FMT_DYNAMIC_EXP_CNTL;
- uint32_t FMT_MAP420_MEMORY_CONTROL;
-};
-
struct dcn10_opp {
struct output_pixel_processor base;
@@ -183,4 +160,28 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
const struct dcn10_opp_shift *opp_shift,
const struct dcn10_opp_mask *opp_mask);
+void opp1_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+void opp1_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void opp1_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
+
+void opp1_destroy(struct output_pixel_processor **opp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index fced178..4bf64d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -23,19 +23,20 @@
*
*/
+
#include "reg_helper.h"
-#include "dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dc.h"
#define REG(reg)\
- tgn10->tg_regs->reg
+ optc1->tg_regs->reg
#define CTX \
- tgn10->base.ctx
+ optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
- tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
@@ -45,8 +46,8 @@
* This is a workaround for a bug that has existed since R5xx and has not been
* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
*/
-static void tgn10_apply_front_porch_workaround(
- struct timing_generator *tg,
+static void optc1_apply_front_porch_workaround(
+ struct timing_generator *optc,
struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
@@ -58,30 +59,30 @@ static void tgn10_apply_front_porch_workaround(
}
}
-static void tgn10_program_global_sync(
- struct timing_generator *tg)
+void optc1_program_global_sync(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (tg->dlg_otg_param.vstartup_start == 0) {
+ if (optc->dlg_otg_param.vstartup_start == 0) {
BREAK_TO_DEBUGGER();
return;
}
REG_SET(OTG_VSTARTUP_PARAM, 0,
- VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+ VSTARTUP_START, optc->dlg_otg_param.vstartup_start);
REG_SET_2(OTG_VUPDATE_PARAM, 0,
- VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
- VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+ VUPDATE_OFFSET, optc->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, optc->dlg_otg_param.vupdate_width);
REG_SET(OTG_VREADY_PARAM, 0,
- VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+ VREADY_OFFSET, optc->dlg_otg_param.vready_offset);
}
-static void tgn10_disable_stereo(struct timing_generator *tg)
+static void optc1_disable_stereo(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
@@ -90,11 +91,6 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
OTG_3D_STRUCTURE_EN, 0,
OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, 0);
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, 0);
}
/**
@@ -102,8 +98,8 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
*/
-static void tgn10_program_timing(
- struct timing_generator *tg,
+void optc1_program_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios)
{
@@ -121,10 +117,10 @@ static void tgn10_program_timing(
uint32_t h_div_2;
int32_t vertical_line_start;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
patched_crtc_timing = *dc_crtc_timing;
- tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+ optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
/* Load horizontal timing */
@@ -217,7 +213,7 @@ static void tgn10_program_timing(
/* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
* program the reg for interrupt postition.
*/
- vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0) {
ASSERT(0);
vertical_line_start = 0;
@@ -233,26 +229,25 @@ static void tgn10_program_timing(
OTG_V_SYNC_A_POL, v_sync_polarity);
v_init = asic_blank_start;
- if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
start_point = 1;
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
v_fp2 = 0;
- if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
- v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+ if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
/* Interlace */
if (patched_crtc_timing.flags.INTERLACE == 1) {
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 1);
v_init = v_init / 2;
- if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
v_fp2 = v_fp2 / 2;
- }
- else
+ } else
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 0);
@@ -270,13 +265,13 @@ static void tgn10_program_timing(
OTG_START_POINT_CNTL, start_point,
OTG_FIELD_NUMBER_CNTL, field_num);
- tgn10_program_global_sync(tg);
+ optc1_program_global_sync(optc);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
* program_horz_count_by_2
* for DVI 30bpp mode, 0 otherwise
- * program_horz_count_by_2(tg, &patched_crtc_timing);
+ * program_horz_count_by_2(optc, &patched_crtc_timing);
*/
/* Enable stereo - only when we need to pack 3D frame. Other types
@@ -290,13 +285,23 @@ static void tgn10_program_timing(
}
+static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, blank_data_double_buffer_enable);
+}
+
/**
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
-static void tgn10_unblank_crtc(struct timing_generator *tg)
+static void optc1_unblank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t vertical_interrupt_enable = 0;
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
@@ -306,8 +311,7 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* this check will be removed.
*/
if (vertical_interrupt_enable)
- REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
+ optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
@@ -319,37 +323,29 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* Call ASIC Control Object to Blank CRTC.
*/
-static void tgn10_blank_crtc(struct timing_generator *tg)
+static void optc1_blank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 1,
OTG_BLANK_DE_MODE, 0);
- /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
- * for status?
- */
- REG_WAIT(OTG_BLANK_CONTROL,
- OTG_BLANK_DATA_EN, 1,
- 1, 100000);
-
- REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+ optc1_set_blank_data_double_buffer(optc, false);
}
-static void tgn10_set_blank(struct timing_generator *tg,
+void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking)
{
if (enable_blanking)
- tgn10_blank_crtc(tg);
+ optc1_blank_crtc(optc);
else
- tgn10_unblank_crtc(tg);
+ optc1_unblank_crtc(optc);
}
-static bool tgn10_is_blanked(struct timing_generator *tg)
+bool optc1_is_blanked(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_en;
uint32_t blank_state;
@@ -360,9 +356,9 @@ static bool tgn10_is_blanked(struct timing_generator *tg)
return blank_en && blank_state;
}
-static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (enable) {
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
@@ -385,19 +381,9 @@ static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0);
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_CLOCK_CONTROL,
- OTG_CLOCK_ON, 0,
- 1, 1000);
-
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
OPTC_INPUT_CLK_GATE_DIS, 0,
OPTC_INPUT_CLK_EN, 0);
-
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
- OPTC_INPUT_CLK_ON, 0,
- 1, 1000);
}
}
@@ -405,19 +391,19 @@ static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
-static bool tgn10_enable_crtc(struct timing_generator *tg)
+static bool optc1_enable_crtc(struct timing_generator *optc)
{
/* TODO FPGA wait for answer
* OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
* OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
*/
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG. For DCN1.0, ODM is remoed.
* OPP and OPTC should 1:1 mapping
*/
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
- OPTC_SRC_SEL, tg->inst);
+ OPTC_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
@@ -432,9 +418,9 @@ static bool tgn10_enable_crtc(struct timing_generator *tg)
}
/* disable_crtc - call ASIC Control Object to disable Timing generator. */
-static bool tgn10_disable_crtc(struct timing_generator *tg)
+bool optc1_disable_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
@@ -455,11 +441,11 @@ static bool tgn10_disable_crtc(struct timing_generator *tg)
}
-static void tgn10_program_blank_color(
- struct timing_generator *tg,
+void optc1_program_blank_color(
+ struct timing_generator *optc,
const struct tg_color *black_color)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_3(OTG_BLACK_COLOR, 0,
OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
@@ -467,15 +453,15 @@ static void tgn10_program_blank_color(
OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
-static bool tgn10_validate_timing(
- struct timing_generator *tg,
+bool optc1_validate_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
uint32_t interlace_factor;
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
ASSERT(timing != NULL);
@@ -505,19 +491,19 @@ static bool tgn10_validate_timing(
* needs more than 8192 horizontal and
* more than 8192 vertical total pixels)
*/
- if (timing->h_total > tgn10->max_h_total ||
- timing->v_total > tgn10->max_v_total)
+ if (timing->h_total > optc1->max_h_total ||
+ timing->v_total > optc1->max_v_total)
return false;
- if (h_blank < tgn10->min_h_blank)
+ if (h_blank < optc1->min_h_blank)
return false;
- if (timing->h_sync_width < tgn10->min_h_sync_width ||
- timing->v_sync_width < tgn10->min_v_sync_width)
+ if (timing->h_sync_width < optc1->min_h_sync_width ||
+ timing->v_sync_width < optc1->min_v_sync_width)
return false;
- min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+ min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
if (v_blank < min_v_blank)
return false;
@@ -534,15 +520,15 @@ static bool tgn10_validate_timing(
* holds the counter of frames.
*
* @param
- * struct timing_generator *tg - [in] timing generator which controls the
+ * struct timing_generator *optc - [in] timing generator which controls the
* desired CRTC
*
* @return
* Counter of frames, which should equal to number of vblanks.
*/
-static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t frame_count;
REG_GET(OTG_STATUS_FRAME_COUNT,
@@ -551,38 +537,34 @@ static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
return frame_count;
}
-static void tgn10_lock(struct timing_generator *tg)
+void optc1_lock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ /* Should be fast, status does not update on maximus */
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
- 1, 100);
+ 1, 10);
}
-static void tgn10_unlock(struct timing_generator *tg)
+void optc1_unlock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
-
- /* why are we waiting here? */
- REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL,
- OTG_UPDATE_PENDING, 0,
- 1, 100000);
}
-static void tgn10_get_position(struct timing_generator *tg,
+void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET_2(OTG_STATUS_POSITION,
OTG_HORZ_COUNT, &position->horizontal_count,
@@ -592,12 +574,12 @@ static void tgn10_get_position(struct timing_generator *tg,
OTG_VERT_COUNT_NOM, &position->nominal_vcount);
}
-static bool tgn10_is_counter_moving(struct timing_generator *tg)
+bool optc1_is_counter_moving(struct timing_generator *optc)
{
struct crtc_position position1, position2;
- tg->funcs->get_position(tg, &position1);
- tg->funcs->get_position(tg, &position2);
+ optc->funcs->get_position(optc, &position1);
+ optc->funcs->get_position(optc, &position2);
if (position1.horizontal_count == position2.horizontal_count &&
position1.vertical_count == position2.vertical_count)
@@ -606,21 +588,37 @@ static bool tgn10_is_counter_moving(struct timing_generator *tg)
return true;
}
-static bool tgn10_did_triggered_reset_occur(
- struct timing_generator *tg)
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
- uint32_t occurred;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t occurred_force, occurred_vsync;
REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
- OTG_FORCE_COUNT_NOW_OCCURRED, &occurred);
+ OTG_FORCE_COUNT_NOW_OCCURRED, &occurred_force);
+
+ REG_GET(OTG_VERT_SYNC_CONTROL,
+ OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, &occurred_vsync);
+
+ return occurred_vsync != 0 || occurred_force != 0;
+}
+
+void optc1_disable_reset_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_WRITE(OTG_TRIGA_CNTL, 0);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ OTG_FORCE_COUNT_NOW_CLEAR, 1);
- return occurred != 0;
+ REG_SET(OTG_VERT_SYNC_CONTROL, 0,
+ OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1);
}
-static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge;
REG_GET(OTG_V_SYNC_A_CNTL,
@@ -652,20 +650,55 @@ static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_t
OTG_FORCE_COUNT_NOW_MODE, 2);
}
-static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t falling_edge = 0;
+ uint32_t rising_edge = 0;
- REG_WRITE(OTG_TRIGA_CNTL, 0);
+ switch (crtc_tp->event) {
- REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
- OTG_FORCE_COUNT_NOW_CLEAR, 1);
+ case CRTC_EVENT_VSYNC_RISING:
+ rising_edge = 1;
+ break;
+
+ case CRTC_EVENT_VSYNC_FALLING:
+ falling_edge = 1;
+ break;
+ }
+
+ REG_SET_4(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect falling edge */
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, rising_edge,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, falling_edge);
+
+ switch (crtc_tp->delay) {
+ case TRIGGER_DELAY_NEXT_LINE:
+ REG_SET(OTG_VERT_SYNC_CONTROL, 0,
+ OTG_AUTO_FORCE_VSYNC_MODE, 1);
+ break;
+ case TRIGGER_DELAY_NEXT_PIXEL:
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ /* force H count to H_TOTAL and V count to V_TOTAL in
+ * progressive mode and V_TOTAL-1 in interlaced mode
+ */
+ OTG_FORCE_COUNT_NOW_MODE, 2);
+ break;
+ }
}
-static void tgn10_wait_for_state(struct timing_generator *tg,
+void optc1_wait_for_state(struct timing_generator *optc,
enum crtc_state state)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
switch (state) {
case CRTC_STATE_VBLANK:
@@ -685,8 +718,8 @@ static void tgn10_wait_for_state(struct timing_generator *tg,
}
}
-static void tgn10_set_early_control(
- struct timing_generator *tg,
+void optc1_set_early_control(
+ struct timing_generator *optc,
uint32_t early_cntl)
{
/* asic design change, do not need this control
@@ -695,11 +728,11 @@ static void tgn10_set_early_control(
}
-static void tgn10_set_static_screen_control(
- struct timing_generator *tg,
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
uint32_t value)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
@@ -724,11 +757,11 @@ static void tgn10_set_static_screen_control(
*
*****************************************************************************
*/
-static void tgn10_set_drr(
- struct timing_generator *tg,
+void optc1_set_drr(
+ struct timing_generator *optc,
const struct drr_params *params)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
@@ -761,15 +794,15 @@ static void tgn10_set_drr(
}
}
-static void tgn10_set_test_pattern(
- struct timing_generator *tg,
+static void optc1_set_test_pattern(
+ struct timing_generator *optc,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
* encoder) */
enum controller_dp_test_pattern test_pattern,
enum dc_color_depth color_depth)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum test_pattern_color_format bit_depth;
enum test_pattern_dyn_range dyn_range;
enum test_pattern_mode mode;
@@ -1020,35 +1053,30 @@ static void tgn10_set_test_pattern(
}
}
-static void tgn10_get_crtc_scanoutpos(
- struct timing_generator *tg,
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct crtc_position position;
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, v_blank_start,
OTG_V_BLANK_END, v_blank_end);
- tgn10_get_position(tg, &position);
+ optc1_get_position(optc, &position);
*h_position = position.horizontal_count;
*v_position = position.vertical_count;
}
-
-
-static void tgn10_enable_stereo(struct timing_generator *tg,
+static void optc1_enable_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
-
- uint32_t active_width = timing->h_addressable;
- uint32_t space1_size = timing->v_total - timing->v_addressable;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (flags) {
uint32_t stereo_en;
@@ -1076,29 +1104,23 @@ static void tgn10_enable_stereo(struct timing_generator *tg,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, active_width);
-
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
}
-static void tgn10_program_stereo(struct timing_generator *tg,
+void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
if (flags->PROGRAM_STEREO)
- tgn10_enable_stereo(tg, timing, flags);
+ optc1_enable_stereo(optc, timing, flags);
else
- tgn10_disable_stereo(tg);
+ optc1_disable_stereo(optc);
}
-static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+bool optc1_is_stereo_left_eye(struct timing_generator *optc)
{
bool ret = false;
uint32_t left_eye = 0;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_STEREO_STATUS,
OTG_STEREO_CURRENT_EYE, &left_eye);
@@ -1110,7 +1132,7 @@ static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
return ret;
}
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s)
{
REG_GET(OTG_CONTROL,
@@ -1154,47 +1176,88 @@ void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
+static void optc1_clear_optc_underflow(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
+}
+
+static void optc1_tg_init(struct timing_generator *optc)
+{
+ optc1_set_blank_data_double_buffer(optc, true);
+ optc1_clear_optc_underflow(optc);
+}
+
+static bool optc1_is_tg_enabled(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t otg_enabled = 0;
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled);
+
+ return (otg_enabled != 0);
+
+}
+
+static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS,
+ &underflow_occurred);
+
+ return (underflow_occurred == 1);
+}
static const struct timing_generator_funcs dcn10_tg_funcs = {
- .validate_timing = tgn10_validate_timing,
- .program_timing = tgn10_program_timing,
- .program_global_sync = tgn10_program_global_sync,
- .enable_crtc = tgn10_enable_crtc,
- .disable_crtc = tgn10_disable_crtc,
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc1_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
- .is_counter_moving = tgn10_is_counter_moving,
- .get_position = tgn10_get_position,
- .get_frame_count = tgn10_get_vblank_counter,
- .get_scanoutpos = tgn10_get_crtc_scanoutpos,
- .set_early_control = tgn10_set_early_control,
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
- .wait_for_state = tgn10_wait_for_state,
- .set_blank = tgn10_set_blank,
- .is_blanked = tgn10_is_blanked,
- .set_blank_color = tgn10_program_blank_color,
- .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
- .enable_reset_trigger = tgn10_enable_reset_trigger,
- .disable_reset_trigger = tgn10_disable_reset_trigger,
- .lock = tgn10_lock,
- .unlock = tgn10_unlock,
- .enable_optc_clock = tgn10_enable_optc_clock,
- .set_drr = tgn10_set_drr,
- .set_static_screen_control = tgn10_set_static_screen_control,
- .set_test_pattern = tgn10_set_test_pattern,
- .program_stereo = tgn10_program_stereo,
- .is_stereo_left_eye = tgn10_is_stereo_left_eye
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .set_test_pattern = optc1_set_test_pattern,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+void dcn10_timing_generator_init(struct optc *optc1)
{
- tgn10->base.funcs = &dcn10_tg_funcs;
+ optc1->base.funcs = &dcn10_tg_funcs;
- tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
- tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
- tgn10->min_h_blank = 32;
- tgn10->min_v_blank = 3;
- tgn10->min_v_blank_interlace = 5;
- tgn10->min_h_sync_width = 8;
- tgn10->min_v_sync_width = 1;
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 7d4818d..d25e7bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -29,7 +29,7 @@
#include "timing_generator.h"
#define DCN10TG_FROM_TG(tg)\
- container_of(tg, struct dcn10_timing_generator, base)
+ container_of(tg, struct optc, base)
#define TG_COMMON_REG_LIST_DCN(inst) \
SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
@@ -70,9 +70,10 @@
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(OPPBUF_CONTROL, OPPBUF, inst),\
- SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
- SRI(CONTROL, VTG, inst)
+ SRI(CONTROL, VTG, inst),\
+ SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
+ SRI(OTG_GSL_CONTROL, OTG, inst)
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
@@ -81,7 +82,12 @@
SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
-struct dcn_tg_registers {
+struct dcn_optc_registers {
+ uint32_t OTG_GLOBAL_CONTROL1;
+ uint32_t OTG_GLOBAL_CONTROL2;
+ uint32_t OTG_VERT_SYNC_CONTROL;
+ uint32_t OTG_MASTER_UPDATE_MODE;
+ uint32_t OTG_GSL_CONTROL;
uint32_t OTG_VSTARTUP_PARAM;
uint32_t OTG_VUPDATE_PARAM;
uint32_t OTG_VREADY_PARAM;
@@ -122,10 +128,12 @@ struct dcn_tg_registers {
uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
uint32_t OPTC_INPUT_CLOCK_CONTROL;
uint32_t OPTC_DATA_SOURCE_SELECT;
+ uint32_t OPTC_MEMORY_CONFIG;
uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t OPPBUF_CONTROL;
- uint32_t OPPBUF_3D_PARAMETERS_0;
uint32_t CONTROL;
+ uint32_t OTG_GSL_WINDOW_X;
+ uint32_t OTG_GSL_WINDOW_Y;
+ uint32_t OTG_VUPDATE_KEEPOUT;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -204,11 +212,21 @@ struct dcn_tg_registers {
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
- SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
- SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
- SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh)
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_MODE, MASTER_UPDATE_INTERLACED_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -225,7 +243,7 @@ struct dcn_tg_registers {
SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
-#define TG_REG_FIELD_LIST(type) \
+#define TG_REG_FIELD_LIST_DCN1_0(type) \
type VSTARTUP_START;\
type VUPDATE_OFFSET;\
type VUPDATE_WIDTH;\
@@ -310,29 +328,51 @@ struct dcn_tg_registers {
type OPTC_INPUT_CLK_EN;\
type OPTC_INPUT_CLK_ON;\
type OPTC_INPUT_CLK_GATE_DIS;\
- type OPTC_SRC_SEL;\
- type OPTC_SEG0_SRC_SEL;\
type OPTC_UNDERFLOW_OCCURRED_STATUS;\
- type OPPBUF_ACTIVE_WIDTH;\
- type OPPBUF_3D_VACT_SPACE1_SIZE;\
+ type OPTC_UNDERFLOW_CLEAR;\
+ type OPTC_SRC_SEL;\
type VTG0_ENABLE;\
type VTG0_FP2;\
- type VTG0_VCOUNT_INIT;
+ type VTG0_VCOUNT_INIT;\
+ type OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED;\
+ type OTG_FORCE_VSYNC_NEXT_LINE_CLEAR;\
+ type OTG_AUTO_FORCE_VSYNC_MODE;\
+ type MASTER_UPDATE_INTERLACED_MODE;\
+ type OTG_GSL0_EN;\
+ type OTG_GSL1_EN;\
+ type OTG_GSL2_EN;\
+ type OTG_GSL_MASTER_EN;\
+ type OTG_GSL_FORCE_DELAY;\
+ type OTG_GSL_CHECK_ALL_FIELDS;\
+ type OTG_GSL_WINDOW_START_X;\
+ type OTG_GSL_WINDOW_END_X;\
+ type OTG_GSL_WINDOW_START_Y;\
+ type OTG_GSL_WINDOW_END_Y;\
+ type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\
+ type OTG_GSL_MASTER_MODE;\
+ type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
-struct dcn_tg_shift {
+#define TG_REG_FIELD_LIST(type) \
+ TG_REG_FIELD_LIST_DCN1_0(type)
+
+
+struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
};
-struct dcn_tg_mask {
+struct dcn_optc_mask {
TG_REG_FIELD_LIST(uint32_t)
};
-struct dcn10_timing_generator {
+struct optc {
struct timing_generator base;
- const struct dcn_tg_registers *tg_regs;
- const struct dcn_tg_shift *tg_shift;
- const struct dcn_tg_mask *tg_mask;
+ const struct dcn_optc_registers *tg_regs;
+ const struct dcn_optc_shift *tg_shift;
+ const struct dcn_optc_mask *tg_mask;
enum controller_id controller_id;
@@ -347,7 +387,7 @@ struct dcn10_timing_generator {
uint32_t min_v_blank_interlace;
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+void dcn10_timing_generator_init(struct optc *optc);
struct dcn_otg_state {
uint32_t v_blank_start;
@@ -368,7 +408,77 @@ struct dcn_otg_state {
uint32_t otg_enabled;
};
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
+bool optc1_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing);
+
+void optc1_program_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios);
+
+void optc1_program_global_sync(
+ struct timing_generator *optc);
+
+bool optc1_disable_crtc(struct timing_generator *optc);
+
+bool optc1_is_counter_moving(struct timing_generator *optc);
+
+void optc1_get_position(struct timing_generator *optc,
+ struct crtc_position *position);
+
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc);
+
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void optc1_set_early_control(
+ struct timing_generator *optc,
+ uint32_t early_cntl);
+
+void optc1_wait_for_state(struct timing_generator *optc,
+ enum crtc_state state);
+
+void optc1_set_blank(struct timing_generator *optc,
+ bool enable_blanking);
+
+bool optc1_is_blanked(struct timing_generator *optc);
+
+void optc1_program_blank_color(
+ struct timing_generator *optc,
+ const struct tg_color *black_color);
+
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc);
+
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst);
+
+void optc1_disable_reset_trigger(struct timing_generator *optc);
+
+void optc1_lock(struct timing_generator *optc);
+
+void optc1_unlock(struct timing_generator *optc);
+
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable);
+
+void optc1_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params);
+
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
+ uint32_t value);
+
+void optc1_program_stereo(struct timing_generator *optc,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+
+bool optc1_is_stereo_left_eye(struct timing_generator *optc);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 9fc8f82..02bd664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -34,7 +34,7 @@
#include "dcn10/dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_opp.h"
@@ -48,16 +48,18 @@
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
+#include "dcn10_hubbub.h"
-#include "vega10/soc15ip.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
-#include "raven1/NBIO/nbio_7_0_offset.h"
+#include "nbio/nbio_7_0_offset.h"
-#include "raven1/MMHUB/mmhub_9_1_offset.h"
-#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
+#include "mmhub/mmhub_9_1_offset.h"
+#include "mmhub/mmhub_9_1_sh_mask.h"
#include "reg_helper.h"
#include "dce/dce_abm.h"
@@ -347,45 +349,59 @@ static const struct dcn_mpc_mask mpc_mask = {
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
-static const struct dcn_tg_registers tg_regs[] = {
+static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3),
};
-static const struct dcn_tg_shift tg_shift = {
+static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
};
-static const struct dcn_tg_mask tg_mask = {
+static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
-#define mi_regs(id)\
+#define hubp_regs(id)\
[id] = {\
- MI_REG_LIST_DCN10(id)\
+ HUBP_REG_LIST_DCN10(id)\
}
-static const struct dcn_mi_registers mi_regs[] = {
- mi_regs(0),
- mi_regs(1),
- mi_regs(2),
- mi_regs(3),
+static const struct dcn_mi_registers hubp_regs[] = {
+ hubp_regs(0),
+ hubp_regs(1),
+ hubp_regs(2),
+ hubp_regs(3),
};
-static const struct dcn_mi_shift mi_shift = {
- MI_MASK_SH_LIST_DCN10(__SHIFT)
+static const struct dcn_mi_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN10(__SHIFT)
};
-static const struct dcn_mi_mask mi_mask = {
- MI_MASK_SH_LIST_DCN10(_MASK)
+static const struct dcn_mi_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN10(_MASK)
+};
+
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN10(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
#define clk_src_regs(index, pllid)\
@@ -424,7 +440,11 @@ static const struct dc_debug debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
- .min_disp_clk_khz = 300000,
+ /* raven smu dones't allow 0 disp clk,
+ * smu min disp clk limit is 50Mhz
+ * keep min disp clk 100Mhz avoid smu hang
+ */
+ .min_disp_clk_khz = 100000,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = false,
@@ -436,6 +456,7 @@ static const struct dc_debug debug_defaults_drv = {
.disable_stereo_support = true,
.vsr_support = true,
.performance_trace = false,
+ .az_endpoint_mute_only = true,
};
static const struct dc_debug debug_defaults_diags = {
@@ -519,12 +540,28 @@ static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
return &mpc10->base;
}
+static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx)
+{
+ struct hubbub *hubbub = kzalloc(sizeof(struct hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub)
+ return NULL;
+
+ hubbub1_construct(hubbub, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+ return hubbub;
+}
+
static struct timing_generator *dcn10_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
- struct dcn10_timing_generator *tgn10 =
- kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -647,6 +684,8 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN10_253 = true;
+ hws->wa.false_optc_underflow = true;
}
return hws;
}
@@ -700,6 +739,12 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(TO_DCN10_MPC(pool->base.mpc));
pool->base.mpc = NULL;
}
+
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.opps[i] != NULL)
pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
@@ -768,7 +813,7 @@ static struct hubp *dcn10_hubp_create(
return NULL;
dcn10_hubp_construct(hubp1, ctx, inst,
- &mi_regs[inst], &mi_shift, &mi_mask);
+ &hubp_regs[inst], &hubp_shift, &hubp_mask);
return &hubp1->base;
}
@@ -780,7 +825,7 @@ static void get_pixel_clock_parameters(
pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
- pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
@@ -922,11 +967,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.abm = head_pipe->stream_res.abm;
idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
return idle_pipe;
}
@@ -1233,8 +1280,8 @@ static bool construct(
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
-
dc->caps.max_slave_planes = 1;
+ dc->caps.is_apu = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1274,17 +1321,15 @@ static bool construct(
if (pool->base.clock_sources[i] == NULL) {
dm_error("DC: failed to create clock sources!\n");
BREAK_TO_DEBUGGER();
- goto clock_source_create_fail;
+ goto fail;
}
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pool->base.display_clock = dce120_disp_clk_create(ctx);
- if (pool->base.display_clock == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto disp_clk_create_fail;
- }
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto fail;
}
pool->base.dmcu = dcn10_dmcu_create(ctx,
@@ -1294,7 +1339,7 @@ static bool construct(
if (pool->base.dmcu == NULL) {
dm_error("DC: failed to create dmcu!\n");
BREAK_TO_DEBUGGER();
- goto res_create_fail;
+ goto fail;
}
pool->base.abm = dce_abm_create(ctx,
@@ -1304,7 +1349,7 @@ static bool construct(
if (pool->base.abm == NULL) {
dm_error("DC: failed to create abm!\n");
BREAK_TO_DEBUGGER();
- goto res_create_fail;
+ goto fail;
}
dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
@@ -1344,13 +1389,11 @@ static bool construct(
}
{
- #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
if (!pool->base.irqs)
- goto irqs_create_fail;
- #endif
+ goto fail;
}
/* index to valid pipe resource */
@@ -1368,7 +1411,7 @@ static bool construct(
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create memory input!\n");
- goto mi_create_fail;
+ goto fail;
}
pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
@@ -1376,7 +1419,7 @@ static bool construct(
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create input pixel processor!\n");
- goto ipp_create_fail;
+ goto fail;
}
pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
@@ -1384,7 +1427,7 @@ static bool construct(
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create dpp!\n");
- goto dpp_create_fail;
+ goto fail;
}
pool->base.opps[j] = dcn10_opp_create(ctx, i);
@@ -1392,7 +1435,7 @@ static bool construct(
BREAK_TO_DEBUGGER();
dm_error(
"DC: failed to create output pixel processor!\n");
- goto opp_create_fail;
+ goto fail;
}
pool->base.timing_generators[j] = dcn10_timing_generator_create(
@@ -1400,14 +1443,16 @@ static bool construct(
if (pool->base.timing_generators[j] == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create tg!\n");
- goto otg_create_fail;
+ goto fail;
}
+
/* check next valid pipe */
j++;
}
/* valid pipe num */
pool->base.pipe_count = j;
+ pool->base.timing_generator_count = j;
/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
* the value may be changed
@@ -1419,13 +1464,20 @@ static bool construct(
if (pool->base.mpc == NULL) {
BREAK_TO_DEBUGGER();
dm_error("DC: failed to create mpc!\n");
- goto mpc_create_fail;
+ goto fail;
+ }
+
+ pool->base.hubbub = dcn10_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto fail;
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
&res_create_funcs : &res_create_maximus_funcs)))
- goto res_create_fail;
+ goto fail;
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
@@ -1434,16 +1486,7 @@ static bool construct(
return true;
-disp_clk_create_fail:
-mpc_create_fail:
-otg_create_fail:
-opp_create_fail:
-dpp_create_fail:
-ipp_create_fail:
-mi_create_fail:
-irqs_create_fail:
-res_create_fail:
-clock_source_create_fail:
+fail:
destruct(pool);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index ab88f07..034369f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -50,6 +50,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable);
/*
+ * Clear payload allocation table before enable MST DP link.
+ */
+void dm_helpers_dp_mst_clear_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+
+/*
* Polls for ACT (allocation change trigger) handled and
*/
bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -101,5 +108,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_link *link,
struct dc_sink *sink);
+void dm_set_dcn_clocks(
+ struct dc_context *ctx,
+ struct dc_clocks *clks);
#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index bbfa832..eac4bfe 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -91,7 +91,8 @@ struct pp_smu_funcs_rv {
/* which SMU message? are reader and writer WM separate SMU msg? */
void (*set_wm_ranges)(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges);
-
+ /* PME w/a */
+ void (*set_pme_wa_enable)(struct pp_smu *pp);
};
#if 0
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index d491703..22e7ee7 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -192,37 +192,6 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
* Power Play (PP) interfaces
**************************************/
-/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
- * This is done *before* it changes DCE clock.
- *
- * If required clock is higher than current, then PP will increase the voltage.
- *
- * If required clock is lower than current, then PP will defer reduction of
- * voltage until the call to dc_service_pp_post_dce_clock_change().
- *
- * \input - Contains clocks needed for Mode Set.
- *
- * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
- * Valid only if function returns zero.
- *
- * \returns true - call is successful
- * false - call failed
- */
-bool dm_pp_pre_dce_clock_change(
- struct dc_context *ctx,
- struct dm_pp_gpu_clock_range *requested_state,
- struct dm_pp_gpu_clock_range *actual_state);
-
-/* The returned clocks range are 'static' system clocks which will be used for
- * mode validation purposes.
- *
- * \returns true - call is successful
- * false - call failed
- */
-bool dc_service_get_system_clocks_range(
- const struct dc_context *ctx,
- struct dm_pp_gpu_clock_range *sys_clks);
-
/* Gets valid clocks levels from pplib
*
* input: clk_type - display clk / sclk / mem clk
@@ -373,6 +342,13 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
unsigned long long dm_get_timestamp(struct dc_context *ctx);
/*
+ * performance tracing
+ */
+void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
+#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__)
+
+
+/*
* Debug and verification hooks
*/
bool dm_helpers_dc_conn_log(
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index fa26cf4..ab8c77d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
#include "os_types.h"
#include "dc_types.h"
-#include "dm_pp_smu.h"
+struct pp_smu_funcs_rv;
struct dm_pp_clock_range {
int min_khz;
@@ -239,25 +239,8 @@ enum dm_acpi_display_type {
AcpiDisplayType_DFP6 = 12
};
-enum dm_pp_power_level {
- DM_PP_POWER_LEVEL_INVALID,
- DM_PP_POWER_LEVEL_ULTRA_LOW,
- DM_PP_POWER_LEVEL_LOW,
- DM_PP_POWER_LEVEL_NOMINAL,
- DM_PP_POWER_LEVEL_PERFORMANCE,
-
- DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
- DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
- DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
- DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
- DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
- DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
- DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
- DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
-};
-
struct dm_pp_power_level_change_request {
- enum dm_pp_power_level power_level;
+ enum dm_pp_clocks_state power_level;
};
struct dm_pp_clock_for_voltage_req {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 3488af2..f83a608 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -24,19 +24,23 @@
# It provides the general basic services required by other DAL
# subcomponents.
-CFLAGS_display_mode_vba.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_display_mode_lib.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_display_pipe_clocks.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_dml1_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_display_rq_dlg_helpers.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_soc_bounding_box.o := -mhard-float -msse -mpreferred-stack-boundary=4
-CFLAGS_dml_common_defs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align := -mstack-alignment=16
+endif
+dml_ccflags := -mhard-float -msse $(cc_stack_align)
-DML = display_mode_lib.o display_rq_dlg_calc.o \
- display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- soc_bounding_box.o dml_common_defs.o display_mode_vba.o
+CFLAGS_display_mode_lib.o := $(dml_ccflags)
+CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
+CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
+CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
+CFLAGS_soc_bounding_box.o := $(dml_ccflags)
+CFLAGS_dml_common_defs.o := $(dml_ccflags)
+
+DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+ soc_bounding_box.o dml_common_defs.o
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 4c31fa5..c109b2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,35 +35,6 @@ static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum
soc->writeback_latency_us = 12.0;
soc->ideal_dram_bw_after_urgent_percent = 80.0;
soc->max_request_size_bytes = 256;
-
- soc->vmin.dcfclk_mhz = 300.0;
- soc->vmin.dispclk_mhz = 608.0;
- soc->vmin.dppclk_mhz = 435.0;
- soc->vmin.dram_bw_per_chan_gbps = 12.8;
- soc->vmin.phyclk_mhz = 540.0;
- soc->vmin.socclk_mhz = 208.0;
-
- soc->vmid.dcfclk_mhz = 600.0;
- soc->vmid.dispclk_mhz = 661.0;
- soc->vmid.dppclk_mhz = 661.0;
- soc->vmid.dram_bw_per_chan_gbps = 12.8;
- soc->vmid.phyclk_mhz = 540.0;
- soc->vmid.socclk_mhz = 208.0;
-
- soc->vnom.dcfclk_mhz = 600.0;
- soc->vnom.dispclk_mhz = 661.0;
- soc->vnom.dppclk_mhz = 661.0;
- soc->vnom.dram_bw_per_chan_gbps = 38.4;
- soc->vnom.phyclk_mhz = 810;
- soc->vnom.socclk_mhz = 208.0;
-
- soc->vmax.dcfclk_mhz = 600.0;
- soc->vmax.dispclk_mhz = 1086.0;
- soc->vmax.dppclk_mhz = 661.0;
- soc->vmax.dram_bw_per_chan_gbps = 38.4;
- soc->vmax.phyclk_mhz = 810.0;
- soc->vmax.socclk_mhz = 208.0;
-
soc->downspread_percent = 0.5;
soc->dram_page_open_time_ns = 50.0;
soc->dram_rw_turnaround_time_ns = 17.5;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 26f4f2a..3c2abcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -28,8 +28,6 @@
#include "dml_common_defs.h"
#include "soc_bounding_box.h"
-#include "display_mode_vba.h"
-#include "display_rq_dlg_calc.h"
#include "dml1_display_rq_dlg_calc.h"
enum dml_project {
@@ -41,7 +39,6 @@ struct display_mode_lib {
struct _vcs_dpi_ip_params_st ip;
struct _vcs_dpi_soc_bounding_box_st soc;
enum dml_project project;
- struct vba_vars_st vba;
struct dal_logger *logger;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index baf1821..09affa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -79,10 +79,6 @@ struct _vcs_dpi_soc_bounding_box_st {
double writeback_latency_us;
double ideal_dram_bw_after_urgent_percent;
unsigned int max_request_size_bytes;
- struct _vcs_dpi_voltage_scaling_st vmin;
- struct _vcs_dpi_voltage_scaling_st vmid;
- struct _vcs_dpi_voltage_scaling_st vnom;
- struct _vcs_dpi_voltage_scaling_st vmax;
double downspread_percent;
double dram_page_open_time_ns;
double dram_rw_turnaround_time_ns;
@@ -144,7 +140,6 @@ struct _vcs_dpi_ip_params_st {
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
unsigned int xfc_supported;
- unsigned int ptoi_supported;
unsigned int xfc_fill_constant_bytes;
double dispclk_ramp_margin_percent;
double xfc_fill_bw_overhead_percent;
@@ -229,7 +224,7 @@ struct _vcs_dpi_display_output_params_st {
int output_bpp;
int dsc_enable;
int wb_enable;
- int output_bpc;
+ int opp_input_bpc;
int output_type;
int output_format;
int output_standard;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
deleted file mode 100644
index ea661ee..0000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ /dev/null
@@ -1,6124 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "display_mode_lib.h"
-#include "display_mode_vba.h"
-
-#include "dml_inline_defs.h"
-
-static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
-
-static void fetch_socbb_params(struct display_mode_lib *mode_lib);
-static void fetch_ip_params(struct display_mode_lib *mode_lib);
-static void fetch_pipe_params(struct display_mode_lib *mode_lib);
-static void recalculate_params(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes);
-static void recalculate(struct display_mode_lib *mode_lib);
-static double adjust_ReturnBW(
- struct display_mode_lib *mode_lib,
- double ReturnBW,
- bool DCCEnabledAnyPlane,
- double ReturnBandwidthToDCN);
-static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
-static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
-static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
- struct display_mode_lib *mode_lib);
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double bpp,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum output_format_class pixelFormat);
-static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
-// Super monster function with some 45 argument
-static bool CalculatePrefetchSchedule(
- struct display_mode_lib *mode_lib,
- double DPPCLK,
- double DISPCLK,
- double PixelClock,
- double DCFClkDeepSleep,
- unsigned int DSCDelay,
- unsigned int DPPPerPlane,
- bool ScalerEnabled,
- unsigned int NumberOfCursors,
- double DPPCLKDelaySubtotal,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCFormater,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
- unsigned int ScalerRecoutWidth,
- enum output_format_class OutputFormat,
- unsigned int VBlank,
- unsigned int HTotal,
- unsigned int MaxInterDCNTileRepeaters,
- unsigned int VStartup,
- unsigned int PageTableLevels,
- bool VirtualMemoryEnable,
- bool DynamicMetadataEnable,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
- bool DCCEnable,
- double UrgentLatency,
- double UrgentExtraLatency,
- double TCalc,
- unsigned int PDEAndMetaPTEBytesFrame,
- unsigned int MetaRowByte,
- unsigned int PixelPTEBytesPerRow,
- double PrefetchSourceLinesY,
- unsigned int SwathWidthY,
- double BytePerPixelDETY,
- double VInitPreFillY,
- unsigned int MaxNumSwathY,
- double PrefetchSourceLinesC,
- double BytePerPixelDETC,
- double VInitPreFillC,
- unsigned int MaxNumSwathC,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double TWait,
- bool XFCEnabled,
- double XFCRemoteSurfaceFlipDelay,
- bool InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
- double *DestinationLinesForPrefetch,
- double *PrefetchBandwidth,
- double *DestinationLinesToRequestVMInVBlank,
- double *DestinationLinesToRequestRowInVBlank,
- double *VRatioPrefetchY,
- double *VRatioPrefetchC,
- double *RequiredPrefetchPixDataBW,
- unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix);
-static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
-static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
-static double CalculatePrefetchSourceLines(
- struct display_mode_lib *mode_lib,
- double VRatio,
- double vtaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- unsigned int ViewportYStart,
- double *VInitPreFill,
- unsigned int *MaxNumSwath);
-static unsigned int CalculateVMAndRowBytes(
- struct display_mode_lib *mode_lib,
- bool DCCEnable,
- unsigned int BlockHeight256Bytes,
- unsigned int BlockWidth256Bytes,
- enum source_format_class SourcePixelFormat,
- unsigned int SurfaceTiling,
- unsigned int BytePerPixel,
- enum scan_direction_class ScanDirection,
- unsigned int ViewportWidth,
- unsigned int ViewportHeight,
- unsigned int SwathWidthY,
- bool VirtualMemoryEnable,
- unsigned int VMMPageSize,
- unsigned int PTEBufferSizeInRequests,
- unsigned int PDEProcessingBufIn64KBReqs,
- unsigned int Pitch,
- unsigned int DCCMetaPitch,
- unsigned int *MacroTileWidth,
- unsigned int *MetaRowByte,
- unsigned int *PixelPTEBytesPerRow,
- bool *PTEBufferSizeNotExceeded,
- unsigned int *dpte_row_height,
- unsigned int *meta_row_height);
-static double CalculateTWait(
- unsigned int PrefetchMode,
- double DRAMClockChangeLatency,
- double UrgentLatency,
- double SREnterPlusExitTime);
-static double CalculateRemoteSurfaceFlipDelay(
- struct display_mode_lib *mode_lib,
- double VRatio,
- double SwathWidth,
- double Bpp,
- double LineTime,
- double XFCTSlvVupdateOffset,
- double XFCTSlvVupdateWidth,
- double XFCTSlvVreadyOffset,
- double XFCXBUFLatencyTolerance,
- double XFCFillBWOverhead,
- double XFCSlvChunkSize,
- double XFCBusTransportTime,
- double TCalc,
- double TWait,
- double *SrcActiveDrainRate,
- double *TInitXFill,
- double *TslvChk);
-static double CalculateWriteBackDISPCLK(
- enum source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackLumaHTaps,
- unsigned int WritebackLumaVTaps,
- unsigned int WritebackChromaHTaps,
- unsigned int WritebackChromaVTaps,
- double WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackChromaLineBufferWidth);
-static void CalculateActiveRowBandwidth(
- bool VirtualMemoryEnable,
- enum source_format_class SourcePixelFormat,
- double VRatio,
- bool DCCEnable,
- double LineTime,
- unsigned int MetaRowByteLuma,
- unsigned int MetaRowByteChroma,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
- double *meta_row_bw,
- double *dpte_row_bw,
- double *qual_row_bw);
-static void CalculateFlipSchedule(
- struct display_mode_lib *mode_lib,
- double UrgentExtraLatency,
- double UrgentLatency,
- unsigned int MaxPageTableLevels,
- bool VirtualMemoryEnable,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- unsigned int ImmediateFlipBytes,
- double LineTime,
- double Tno_bw,
- double VRatio,
- double PDEAndMetaPTEBytesFrame,
- unsigned int MetaRowByte,
- unsigned int PixelPTEBytesPerRow,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- double qual_row_bw,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
-static double CalculateWriteBackDelay(
- enum source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackLumaHTaps,
- unsigned int WritebackLumaVTaps,
- unsigned int WritebackChromaHTaps,
- unsigned int WritebackChromaVTaps,
- unsigned int WritebackDestinationWidth);
-static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib);
-static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
-static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
-
-void set_prefetch_mode(
- struct display_mode_lib *mode_lib,
- bool cstate_en,
- bool pstate_en,
- bool ignore_viewport_pos,
- bool immediate_flip_support)
-{
- unsigned int prefetch_mode;
-
- if (cstate_en && pstate_en)
- prefetch_mode = 0;
- else if (cstate_en)
- prefetch_mode = 1;
- else
- prefetch_mode = 2;
- if (prefetch_mode != mode_lib->vba.PrefetchMode
- || ignore_viewport_pos != mode_lib->vba.IgnoreViewportPositioning
- || immediate_flip_support != mode_lib->vba.ImmediateFlipSupport) {
- DTRACE(
- " Prefetch mode has changed from %i to %i. Recalculating.",
- prefetch_mode,
- mode_lib->vba.PrefetchMode);
- mode_lib->vba.PrefetchMode = prefetch_mode;
- mode_lib->vba.IgnoreViewportPositioning = ignore_viewport_pos;
- mode_lib->vba.ImmediateFlipSupport = immediate_flip_support;
- recalculate(mode_lib);
- }
-}
-
-unsigned int dml_get_voltage_level(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes)
-{
- bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
- || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
- || num_pipes != mode_lib->vba.cache_num_pipes
- || memcmp(pipes, mode_lib->vba.cache_pipes,
- sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
-
- mode_lib->vba.soc = mode_lib->soc;
- mode_lib->vba.ip = mode_lib->ip;
- memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
- mode_lib->vba.cache_num_pipes = num_pipes;
-
- if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
- recalculate(mode_lib);
- else {
- fetch_socbb_params(mode_lib);
- fetch_ip_params(mode_lib);
- fetch_pipe_params(mode_lib);
- }
- ModeSupportAndSystemConfigurationFull(mode_lib);
-
- return mode_lib->vba.VoltageLevel;
-}
-
-#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
-{ \
- recalculate_params(mode_lib, pipes, num_pipes); \
- return var; \
-}
-
-dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFClkDeepSleep);
-dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
-dml_get_attr_func(wm_memory_trip, mode_lib->vba.MemoryTripWatermark);
-dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
-dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
-dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
-dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
-dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
-dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
-dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
-dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
-dml_get_attr_func(urgent_latency, mode_lib->vba.MinUrgentLatencySupportUs);
-dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
-dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
-dml_get_attr_func(
- dram_clock_change_latency,
- mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
-dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
-dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
-dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
-dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
-
-#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
-{\
- unsigned int which_plane; \
- recalculate_params(mode_lib, pipes, num_pipes); \
- which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
- return var[which_plane]; \
-}
-
-dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
-dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
-dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
-dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
-dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
-dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
-dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
-dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
-dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
-dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
-dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
-dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
-dml_get_pipe_attr_func(
- dst_y_per_row_flip,
- mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
-
-dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
-dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
-dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
-dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
-
-unsigned int get_vstartup_calculated(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes,
- unsigned int which_pipe)
-{
- unsigned int which_plane;
-
- recalculate_params(mode_lib, pipes, num_pipes);
- which_plane = mode_lib->vba.pipe_plane[which_pipe];
- return mode_lib->vba.VStartup[which_plane];
-}
-
-double get_total_immediate_flip_bytes(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes)
-{
- recalculate_params(mode_lib, pipes, num_pipes);
- return mode_lib->vba.TotImmediateFlipBytes;
-}
-
-double get_total_immediate_flip_bw(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes)
-{
- recalculate_params(mode_lib, pipes, num_pipes);
- return mode_lib->vba.ImmediateFlipBW;
-}
-
-double get_total_prefetch_bw(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes)
-{
- unsigned int k;
- double total_prefetch_bw = 0.0;
-
- recalculate_params(mode_lib, pipes, num_pipes);
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
- return total_prefetch_bw;
-}
-
-static void fetch_socbb_params(struct display_mode_lib *mode_lib)
-{
- soc_bounding_box_st *soc = &mode_lib->vba.soc;
- unsigned int i;
-
- // SOC Bounding Box Parameters
- mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
- mode_lib->vba.NumberOfChannels = soc->num_chans;
- mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency =
- soc->ideal_dram_bw_after_urgent_percent; // there's always that one bastard variable that's so long it throws everything out of alignment!
- mode_lib->vba.UrgentLatency = soc->urgent_latency_us;
- mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
- mode_lib->vba.UrgentOutOfOrderReturnPerChannel =
- soc->urgent_out_of_order_return_per_channel_bytes;
- mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
- mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
- mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
- mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
- mode_lib->vba.Downspreading = soc->downspread_percent;
- mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
- mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
- mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
- mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
- // Set the voltage scaling clocks as the defaults. Most of these will
- // be set to different values by the test
- for (i = 0; i < DC__VOLTAGE_STATES; i++)
- if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
- break;
-
- mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
- mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
- mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mhz;
- mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
-
- mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
- mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
-
- mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
- mode_lib->vba.MaxHSCLRatio = 4;
- mode_lib->vba.MaxVSCLRatio = 4;
- mode_lib->vba.MaxNumWriteback = 0; /*TODO*/
- mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
- mode_lib->vba.Cursor64BppSupport = true;
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
- mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
- mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
- mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
- mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
- mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
- mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
- mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
- }
-}
-
-static void fetch_ip_params(struct display_mode_lib *mode_lib)
-{
- ip_params_st *ip = &mode_lib->vba.ip;
-
- // IP Parameters
- mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
- mode_lib->vba.MaxNumOTG = ip->max_num_otg;
- mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
- mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
-
- mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
- mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
- mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
- mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
- mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
- mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
- mode_lib->vba.PTEChunkSize = ip->pte_chunk_size_kbytes;
- mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
- mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
- mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
- mode_lib->vba.PTEBufferSizeInRequests = ip->dpte_buffer_size_in_pte_reqs;
- mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
- mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
- mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes;
- mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes;
- mode_lib->vba.WritebackChromaLineBufferWidth =
- ip->writeback_chroma_line_buffer_width_pixels;
- mode_lib->vba.MaxPageTableLevels = ip->max_page_table_levels;
- mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
- mode_lib->vba.NumberOfDSC = ip->num_dsc;
- mode_lib->vba.ODMCapability = ip->odm_capable;
- mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
-
- mode_lib->vba.XFCSupported = ip->xfc_supported;
- mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
- mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
- mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
- mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
- mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
- mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
- mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
- mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
-
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
-
- mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
-}
-
-static void fetch_pipe_params(struct display_mode_lib *mode_lib)
-{
- display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
- ip_params_st *ip = &mode_lib->vba.ip;
-
- unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
- unsigned int j, k;
- bool PlaneVisited[DC__NUM_DPP__MAX];
- bool visited[DC__NUM_DPP__MAX];
-
- // Convert Pipes to Planes
- for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
- visited[k] = false;
-
- mode_lib->vba.NumberOfActivePlanes = 0;
- for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
- display_pipe_source_params_st *src = &pipes[j].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
- scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
- scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
- display_output_params_st *dout = &pipes[j].dout;
- display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
-
- if (visited[j])
- continue;
- visited[j] = true;
-
- mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
-
- mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
- mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
- (enum scan_direction_class) (src->source_scan);
- mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
- src->viewport_width;
- mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
- src->viewport_height;
- mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
- src->viewport_y_y;
- mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
- src->viewport_y_c;
- mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
- mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
- mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
- mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
- mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
- mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
- mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
- if (mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes])
- mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
- mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
- mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
- mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
- mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
- mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
- mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
- mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
- src->dcc_use_global ?
- ip->dcc_supported : src->dcc && ip->dcc_supported;
- mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
- mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
- (enum source_format_class) (src->source_format);
- mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
- mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
- mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
- (enum dm_swizzle_mode) (src->sw_mode);
- mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
- dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
- mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
- dst->odm_combine;
- mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
- (enum output_format_class) (dout->output_format);
- mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
- (enum output_encoder_class) (dout->output_type);
- mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
- mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
- dout->dp_lanes;
- mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
- mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
- dout->dsc_slices;
- mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
- dout->output_bpc == 0 ? 12 : dout->output_bpc;
- mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
- mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_src_height;
- mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_dst_width;
- mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_dst_height;
- mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
- (enum source_format_class) (dout->wb.wb_pixel_format);
- mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_htaps_luma;
- mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_vtaps_luma;
- mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_htaps_chroma;
- mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_vtaps_chroma;
- mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_hratio;
- mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
- dout->wb.wb_vratio;
-
- mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
- src->dynamic_metadata_enable;
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
- src->dynamic_metadata_lines_before_active;
- mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
- src->dynamic_metadata_xmit_bytes;
-
- mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
- && ip->xfc_supported;
- mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
- mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
- mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
- mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
- mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
- mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
- if (ip->is_line_buffer_bpp_fixed)
- mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
- ip->line_buffer_fixed_bpp;
- else {
- unsigned int lb_depth;
-
- switch (scl->lb_depth) {
- case dm_lb_6:
- lb_depth = 18;
- break;
- case dm_lb_8:
- lb_depth = 24;
- break;
- case dm_lb_10:
- lb_depth = 30;
- break;
- case dm_lb_12:
- lb_depth = 36;
- break;
- case dm_lb_16:
- lb_depth = 48;
- break;
- default:
- lb_depth = 36;
- }
- mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
- }
- mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
- // The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
- // calculate things a little more accurately
- for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
- switch (k) {
- case 0:
- mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
- CursorBppEnumToBits(
- (enum cursor_bpp) (src->cur0_bpp));
- mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
- src->cur0_src_width;
- if (src->cur0_src_width > 0)
- mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
- break;
- case 1:
- mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
- CursorBppEnumToBits(
- (enum cursor_bpp) (src->cur1_bpp));
- mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
- src->cur1_src_width;
- if (src->cur1_src_width > 0)
- mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
- break;
- default:
- dml_print(
- "ERROR: Number of cursors specified exceeds supported maximum\n")
- ;
- }
- }
-
- OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
-
- if (dst->odm_combine && !src->is_hsplit)
- dml_print(
- "ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
- j);
-
- if (src->is_hsplit) {
- for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
- display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
- display_output_params_st *dout_k = &pipes[k].dout;
-
- if (src_k->is_hsplit && !visited[k]
- && src->hsplit_grp == src_k->hsplit_grp) {
- mode_lib->vba.pipe_plane[k] =
- mode_lib->vba.NumberOfActivePlanes;
- mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
- if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
- == dm_horz)
- mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
- src_k->viewport_width;
- else
- mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
- src_k->viewport_height;
-
- mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
- dout_k->dsc_slices;
- visited[k] = true;
- }
- }
- }
-
- mode_lib->vba.NumberOfActivePlanes++;
- }
-
- // handle overlays through dml_ml->vba.BlendingAndTiming
- // dml_ml->vba.BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
-
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
- PlaneVisited[j] = false;
-
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
- for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
- // doesn't matter, so choose the smaller one
- mode_lib->vba.BlendingAndTiming[j] = j;
- PlaneVisited[j] = true;
- mode_lib->vba.BlendingAndTiming[k] = j;
- PlaneVisited[k] = true;
- }
- }
-
- if (!PlaneVisited[j]) {
- mode_lib->vba.BlendingAndTiming[j] = j;
- PlaneVisited[j] = true;
- }
- }
-
- // TODO: dml_ml->vba.ODMCombineEnabled => 2 * dml_ml->vba.DPPPerPlane...actually maybe not since all pipes are specified
- // Do we want the dscclk to automatically be halved? Guess not since the value is specified
-
- mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
- for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
- ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
-
- mode_lib->vba.VirtualMemoryEnable = false;
- mode_lib->vba.OverridePageTableLevels = 0;
-
- for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
- mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable
- || !!pipes[k].pipe.src.vm;
- mode_lib->vba.OverridePageTableLevels =
- (pipes[k].pipe.src.vm_levels_force_en
- && mode_lib->vba.OverridePageTableLevels
- < pipes[k].pipe.src.vm_levels_force) ?
- pipes[k].pipe.src.vm_levels_force :
- mode_lib->vba.OverridePageTableLevels;
- }
-
- if (mode_lib->vba.OverridePageTableLevels)
- mode_lib->vba.MaxPageTableLevels = mode_lib->vba.OverridePageTableLevels;
-
- mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable && !!ip->pte_enable;
-
- mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
- mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels
- * mode_lib->vba.DRAMChannelWidth,
- mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn)
- / 1000.0;
-
- // TODO: Must be consistent across all pipes
- // DCCProgrammingAssumesScanDirectionUnknown = src.dcc_scan_dir_unknown;
-}
-
-static void recalculate(struct display_mode_lib *mode_lib)
-{
- ModeSupportAndSystemConfiguration(mode_lib);
- PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
- DisplayPipeConfiguration(mode_lib);
- DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
-}
-
-// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
-// rather than working them out as in recalculate_ms
-static void recalculate_params(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes)
-{
- // This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
- if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
- || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
- || num_pipes != mode_lib->vba.cache_num_pipes
- || memcmp(
- pipes,
- mode_lib->vba.cache_pipes,
- sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
- mode_lib->vba.soc = mode_lib->soc;
- mode_lib->vba.ip = mode_lib->ip;
- memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
- mode_lib->vba.cache_num_pipes = num_pipes;
- recalculate(mode_lib);
- }
-}
-
-static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
-{
- soc_bounding_box_st *soc = &mode_lib->vba.soc;
- unsigned int i, k;
- unsigned int total_pipes = 0;
-
- mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
- for (i = 1; i < mode_lib->vba.cache_num_pipes; ++i)
- ASSERT(mode_lib->vba.VoltageLevel == -1 || mode_lib->vba.VoltageLevel == mode_lib->vba.cache_pipes[i].clks_cfg.voltage);
-
- mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
- mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
-
- if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
- mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
- else
- mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
-
- fetch_socbb_params(mode_lib);
- fetch_ip_params(mode_lib);
- fetch_pipe_params(mode_lib);
-
- // Total Available Pipes Support Check
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- total_pipes += mode_lib->vba.DPPPerPlane[k];
- ASSERT(total_pipes <= DC__NUM_DPP__MAX);
-}
-
-static double adjust_ReturnBW(
- struct display_mode_lib *mode_lib,
- double ReturnBW,
- bool DCCEnabledAnyPlane,
- double ReturnBandwidthToDCN)
-{
- double CriticalCompression;
-
- if (DCCEnabledAnyPlane
- && ReturnBandwidthToDCN
- > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
- ReturnBW =
- dml_min(
- ReturnBW,
- ReturnBandwidthToDCN * 4
- * (1.0
- - mode_lib->vba.UrgentLatency
- / ((mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024
- / ReturnBandwidthToDCN
- - mode_lib->vba.DCFCLK
- * mode_lib->vba.ReturnBusWidth
- / 4)
- + mode_lib->vba.UrgentLatency));
-
- CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
- * mode_lib->vba.UrgentLatency
- / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024);
-
- if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
- ReturnBW =
- dml_min(
- ReturnBW,
- 4.0 * ReturnBandwidthToDCN
- * (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024
- * mode_lib->vba.ReturnBusWidth
- * mode_lib->vba.DCFCLK
- * mode_lib->vba.UrgentLatency
- / dml_pow(
- (ReturnBandwidthToDCN
- * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024),
- 2));
-
- return ReturnBW;
-}
-
-static unsigned int dscceComputeDelay(
- unsigned int bpc,
- double bpp,
- unsigned int sliceWidth,
- unsigned int numSlices,
- enum output_format_class pixelFormat)
-{
- // valid bpc = source bits per component in the set of {8, 10, 12}
- // valid bpp = increments of 1/16 of a bit
- // min = 6/7/8 in N420/N422/444, respectively
- // max = such that compression is 1:1
- //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
- //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
- //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
-
- // fixed value
- unsigned int rcModelSize = 8192;
-
- // N422/N420 operate at 2 pixels per clock
- unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
- Delay, pixels;
-
- if (pixelFormat == dm_n422 || pixelFormat == dm_420)
- pixelsPerClock = 2;
- // #all other modes operate at 1 pixel per clock
- else
- pixelsPerClock = 1;
-
- //initial transmit delay as per PPS
- initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
-
- //compute ssm delay
- if (bpc == 8)
- D = 81;
- else if (bpc == 10)
- D = 89;
- else
- D = 113;
-
- //divide by pixel per cycle to compute slice width as seen by DSC
- w = sliceWidth / pixelsPerClock;
-
- //422 mode has an additional cycle of delay
- if (pixelFormat == dm_s422)
- s = 1;
- else
- s = 0;
-
- //main calculation for the dscce
- ix = initalXmitDelay + 45;
- wx = (w + 2) / 3;
- p = 3 * wx - w;
- l0 = ix / w;
- a = ix + p * l0;
- ax = (a + 2) / 3 + D + 6 + 1;
- l = (ax + wx - 1) / wx;
- if ((ix % w) == 0 && p != 0)
- lstall = 1;
- else
- lstall = 0;
- Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
-
- //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
- pixels = Delay * 3 * pixelsPerClock;
- return pixels;
-}
-
-static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
-{
- unsigned int Delay = 0;
-
- if (pixelFormat == dm_420) {
- // sfr
- Delay = Delay + 2;
- // dsccif
- Delay = Delay + 0;
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc gets pixels every other cycle
- Delay = Delay + 2;
- // dscc - input cdc fifo
- Delay = Delay + 12;
- // dscc gets pixels every other cycle
- Delay = Delay + 13;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // dscc - output cdc fifo
- Delay = Delay + 7;
- // dscc gets pixels every other cycle
- Delay = Delay + 3;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // dscc - output serializer
- Delay = Delay + 1;
- // sft
- Delay = Delay + 1;
- } else if (pixelFormat == dm_n422) {
- // sfr
- Delay = Delay + 2;
- // dsccif
- Delay = Delay + 1;
- // dscc - input deserializer
- Delay = Delay + 5;
- // dscc - input cdc fifo
- Delay = Delay + 25;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // dscc - output cdc fifo
- Delay = Delay + 10;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // dscc - output serializer
- Delay = Delay + 1;
- // sft
- Delay = Delay + 1;
- } else {
- // sfr
- Delay = Delay + 2;
- // dsccif
- Delay = Delay + 0;
- // dscc - input deserializer
- Delay = Delay + 3;
- // dscc - input cdc fifo
- Delay = Delay + 12;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // dscc - output cdc fifo
- Delay = Delay + 7;
- // dscc - output serializer
- Delay = Delay + 1;
- // dscc - cdc uncertainty
- Delay = Delay + 2;
- // sft
- Delay = Delay + 1;
- }
-
- return Delay;
-}
-
-static bool CalculatePrefetchSchedule(
- struct display_mode_lib *mode_lib,
- double DPPCLK,
- double DISPCLK,
- double PixelClock,
- double DCFClkDeepSleep,
- unsigned int DSCDelay,
- unsigned int DPPPerPlane,
- bool ScalerEnabled,
- unsigned int NumberOfCursors,
- double DPPCLKDelaySubtotal,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCFormater,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
- unsigned int ScalerRecoutWidth,
- enum output_format_class OutputFormat,
- unsigned int VBlank,
- unsigned int HTotal,
- unsigned int MaxInterDCNTileRepeaters,
- unsigned int VStartup,
- unsigned int PageTableLevels,
- bool VirtualMemoryEnable,
- bool DynamicMetadataEnable,
- unsigned int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
- bool DCCEnable,
- double UrgentLatency,
- double UrgentExtraLatency,
- double TCalc,
- unsigned int PDEAndMetaPTEBytesFrame,
- unsigned int MetaRowByte,
- unsigned int PixelPTEBytesPerRow,
- double PrefetchSourceLinesY,
- unsigned int SwathWidthY,
- double BytePerPixelDETY,
- double VInitPreFillY,
- unsigned int MaxNumSwathY,
- double PrefetchSourceLinesC,
- double BytePerPixelDETC,
- double VInitPreFillC,
- unsigned int MaxNumSwathC,
- unsigned int SwathHeightY,
- unsigned int SwathHeightC,
- double TWait,
- bool XFCEnabled,
- double XFCRemoteSurfaceFlipDelay,
- bool InterlaceEnable,
- bool ProgressiveToInterlaceUnitInOPP,
- double *DSTXAfterScaler,
- double *DSTYAfterScaler,
- double *DestinationLinesForPrefetch,
- double *PrefetchBandwidth,
- double *DestinationLinesToRequestVMInVBlank,
- double *DestinationLinesToRequestRowInVBlank,
- double *VRatioPrefetchY,
- double *VRatioPrefetchC,
- double *RequiredPrefetchPixDataBW,
- unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- double *Tno_bw,
- unsigned int *VUpdateOffsetPix,
- unsigned int *VUpdateWidthPix,
- unsigned int *VReadyOffsetPix)
-{
- bool MyError = false;
- unsigned int DPPCycles, DISPCLKCycles;
- double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
- double Tdm, LineTime, Tsetup;
- double dst_y_prefetch_equ;
- double Tsw_oto;
- double prefetch_bw_oto;
- double Tvm_oto;
- double Tr0_oto;
- double Tpre_oto;
- double dst_y_prefetch_oto;
- double TimeForFetchingMetaPTE = 0;
- double TimeForFetchingRowInVBlank = 0;
- double LinesToRequestPrefetchPixelData = 0;
-
- if (ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
- else
- DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
-
- DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
-
- DISPCLKCycles = DISPCLKDelaySubtotal;
-
- if (DPPCLK == 0.0 || DISPCLK == 0.0)
- return true;
-
- *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
- + DSCDelay;
-
- if (DPPPerPlane > 1)
- *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
-
- if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
- *DSTYAfterScaler = 1;
- else
- *DSTYAfterScaler = 0;
-
- DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
- *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
- *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
-
- *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
- TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
- *VUpdateWidthPix = (14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
- * PixelClock;
-
- *VReadyOffsetPix = dml_max(
- 150.0 / DPPCLK,
- TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK)
- * PixelClock;
-
- Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
-
- LineTime = (double) HTotal / PixelClock;
-
- if (DynamicMetadataEnable) {
- double Tdmbf, Tdmec, Tdmsks;
-
- Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
- Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
- Tdmec = LineTime;
- if (DynamicMetadataLinesBeforeActiveRequired == 0)
- Tdmsks = VBlank * LineTime / 2.0;
- else
- Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
- if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
- Tdmsks = Tdmsks / 2;
- if (VStartup * LineTime
- < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
- MyError = true;
- *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
- + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
- } else
- *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
- } else
- Tdm = 0;
-
- if (VirtualMemoryEnable) {
- if (PageTableLevels == 4)
- *Tno_bw = UrgentExtraLatency + UrgentLatency;
- else if (PageTableLevels == 3)
- *Tno_bw = UrgentExtraLatency;
- else
- *Tno_bw = 0;
- } else if (DCCEnable)
- *Tno_bw = LineTime;
- else
- *Tno_bw = LineTime / 4;
-
- dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
- - (Tsetup + Tdm) / LineTime
- - (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
-
- Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
-
- prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
- + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
- + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
- / Tsw_oto;
-
- if (VirtualMemoryEnable == true) {
- Tvm_oto =
- dml_max(
- *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
- dml_max(
- UrgentExtraLatency
- + UrgentLatency
- * (PageTableLevels
- - 1),
- LineTime / 4.0));
- } else
- Tvm_oto = LineTime / 4.0;
-
- if ((VirtualMemoryEnable == true || DCCEnable == true)) {
- Tr0_oto = dml_max(
- (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
- dml_max(UrgentLatency, dml_max(LineTime - Tvm_oto, LineTime / 4)));
- } else
- Tr0_oto = LineTime - Tvm_oto;
-
- Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
-
- dst_y_prefetch_oto = Tpre_oto / LineTime;
-
- if (dst_y_prefetch_oto < dst_y_prefetch_equ)
- *DestinationLinesForPrefetch = dst_y_prefetch_oto;
- else
- *DestinationLinesForPrefetch = dst_y_prefetch_equ;
-
- *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
- / 4;
-
- dml_print("DML: VStartup: %d\n", VStartup);
- dml_print("DML: TCalc: %f\n", TCalc);
- dml_print("DML: TWait: %f\n", TWait);
- dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
- dml_print("DML: LineTime: %f\n", LineTime);
- dml_print("DML: Tsetup: %f\n", Tsetup);
- dml_print("DML: Tdm: %f\n", Tdm);
- dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
- dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
- dml_print("DML: HTotal: %d\n", HTotal);
-
- *PrefetchBandwidth = 0;
- *DestinationLinesToRequestVMInVBlank = 0;
- *DestinationLinesToRequestRowInVBlank = 0;
- *VRatioPrefetchY = 0;
- *VRatioPrefetchC = 0;
- *RequiredPrefetchPixDataBW = 0;
- if (*DestinationLinesForPrefetch > 1) {
- *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
- + 2 * PixelPTEBytesPerRow
- + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
- + PrefetchSourceLinesC * SwathWidthY / 2
- * dml_ceil(BytePerPixelDETC, 2))
- / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
- if (VirtualMemoryEnable) {
- TimeForFetchingMetaPTE =
- dml_max(
- *Tno_bw
- + (double) PDEAndMetaPTEBytesFrame
- / *PrefetchBandwidth,
- dml_max(
- UrgentExtraLatency
- + UrgentLatency
- * (PageTableLevels
- - 1),
- LineTime / 4));
- } else {
- if (NumberOfCursors > 0 || XFCEnabled)
- TimeForFetchingMetaPTE = LineTime / 4;
- else
- TimeForFetchingMetaPTE = 0.0;
- }
-
- if ((VirtualMemoryEnable == true || DCCEnable == true)) {
- TimeForFetchingRowInVBlank =
- dml_max(
- (MetaRowByte + PixelPTEBytesPerRow)
- / *PrefetchBandwidth,
- dml_max(
- UrgentLatency,
- dml_max(
- LineTime
- - TimeForFetchingMetaPTE,
- LineTime
- / 4.0)));
- } else {
- if (NumberOfCursors > 0 || XFCEnabled)
- TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
- else
- TimeForFetchingRowInVBlank = 0.0;
- }
-
- *DestinationLinesToRequestVMInVBlank = dml_floor(
- 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
- 1) / 4.0;
-
- *DestinationLinesToRequestRowInVBlank = dml_floor(
- 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
- 1) / 4.0;
-
- LinesToRequestPrefetchPixelData =
- *DestinationLinesForPrefetch
- - ((NumberOfCursors > 0 || VirtualMemoryEnable
- || DCCEnable) ?
- (*DestinationLinesToRequestVMInVBlank
- + *DestinationLinesToRequestRowInVBlank) :
- 0.0);
-
- if (LinesToRequestPrefetchPixelData > 0) {
-
- *VRatioPrefetchY = (double) PrefetchSourceLinesY
- / LinesToRequestPrefetchPixelData;
- *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
- if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
- if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
- *VRatioPrefetchY =
- dml_max(
- (double) PrefetchSourceLinesY
- / LinesToRequestPrefetchPixelData,
- (double) MaxNumSwathY
- * SwathHeightY
- / (LinesToRequestPrefetchPixelData
- - (VInitPreFillY
- - 3.0)
- / 2.0));
- *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
- } else {
- MyError = true;
- *VRatioPrefetchY = 0;
- }
- }
-
- *VRatioPrefetchC = (double) PrefetchSourceLinesC
- / LinesToRequestPrefetchPixelData;
- *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
-
- if ((SwathHeightC > 4)) {
- if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
- *VRatioPrefetchC =
- dml_max(
- *VRatioPrefetchC,
- (double) MaxNumSwathC
- * SwathHeightC
- / (LinesToRequestPrefetchPixelData
- - (VInitPreFillC
- - 3.0)
- / 2.0));
- *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
- } else {
- MyError = true;
- *VRatioPrefetchC = 0;
- }
- }
-
- *RequiredPrefetchPixDataBW =
- DPPPerPlane
- * ((double) PrefetchSourceLinesY
- / LinesToRequestPrefetchPixelData
- * dml_ceil(
- BytePerPixelDETY,
- 1)
- + (double) PrefetchSourceLinesC
- / LinesToRequestPrefetchPixelData
- * dml_ceil(
- BytePerPixelDETC,
- 2)
- / 2)
- * SwathWidthY / LineTime;
- } else {
- MyError = true;
- *VRatioPrefetchY = 0;
- *VRatioPrefetchC = 0;
- *RequiredPrefetchPixDataBW = 0;
- }
-
- } else {
- MyError = true;
- }
-
- if (MyError) {
- *PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
- *DestinationLinesToRequestVMInVBlank = 0;
- *DestinationLinesToRequestRowInVBlank = 0;
- *DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
- *VRatioPrefetchY = 0;
- *VRatioPrefetchC = 0;
- *RequiredPrefetchPixDataBW = 0;
- }
-
- return MyError;
-}
-
-static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
-{
- return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
-}
-
-static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
-{
- return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
-}
-
-static double CalculatePrefetchSourceLines(
- struct display_mode_lib *mode_lib,
- double VRatio,
- double vtaps,
- bool Interlace,
- bool ProgressiveToInterlaceUnitInOPP,
- unsigned int SwathHeight,
- unsigned int ViewportYStart,
- double *VInitPreFill,
- unsigned int *MaxNumSwath)
-{
- unsigned int MaxPartialSwath;
-
- if (ProgressiveToInterlaceUnitInOPP)
- *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
- else
- *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
-
- if (!mode_lib->vba.IgnoreViewportPositioning) {
-
- *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
-
- if (*VInitPreFill > 1.0)
- MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
- else
- MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
- % SwathHeight;
- MaxPartialSwath = dml_max(1U, MaxPartialSwath);
-
- } else {
-
- if (ViewportYStart != 0)
- dml_print(
- "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
-
- *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
-
- if (*VInitPreFill > 1.0)
- MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
- else
- MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
- % SwathHeight;
- }
-
- return *MaxNumSwath * SwathHeight + MaxPartialSwath;
-}
-
-static unsigned int CalculateVMAndRowBytes(
- struct display_mode_lib *mode_lib,
- bool DCCEnable,
- unsigned int BlockHeight256Bytes,
- unsigned int BlockWidth256Bytes,
- enum source_format_class SourcePixelFormat,
- unsigned int SurfaceTiling,
- unsigned int BytePerPixel,
- enum scan_direction_class ScanDirection,
- unsigned int ViewportWidth,
- unsigned int ViewportHeight,
- unsigned int SwathWidth,
- bool VirtualMemoryEnable,
- unsigned int VMMPageSize,
- unsigned int PTEBufferSizeInRequests,
- unsigned int PDEProcessingBufIn64KBReqs,
- unsigned int Pitch,
- unsigned int DCCMetaPitch,
- unsigned int *MacroTileWidth,
- unsigned int *MetaRowByte,
- unsigned int *PixelPTEBytesPerRow,
- bool *PTEBufferSizeNotExceeded,
- unsigned int *dpte_row_height,
- unsigned int *meta_row_height)
-{
- unsigned int MetaRequestHeight;
- unsigned int MetaRequestWidth;
- unsigned int MetaSurfWidth;
- unsigned int MetaSurfHeight;
- unsigned int MPDEBytesFrame;
- unsigned int MetaPTEBytesFrame;
- unsigned int DCCMetaSurfaceBytes;
-
- unsigned int MacroTileSizeBytes;
- unsigned int MacroTileHeight;
- unsigned int DPDE0BytesFrame;
- unsigned int ExtraDPDEBytesFrame;
- unsigned int PDEAndMetaPTEBytesFrame;
-
- if (DCCEnable == true) {
- MetaRequestHeight = 8 * BlockHeight256Bytes;
- MetaRequestWidth = 8 * BlockWidth256Bytes;
- if (ScanDirection == dm_horz) {
- *meta_row_height = MetaRequestHeight;
- MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
- + MetaRequestWidth;
- *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
- } else {
- *meta_row_height = MetaRequestWidth;
- MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
- + MetaRequestHeight;
- *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
- }
- if (ScanDirection == dm_horz) {
- DCCMetaSurfaceBytes = DCCMetaPitch
- * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
- + 64 * BlockHeight256Bytes) * BytePerPixel
- / 256;
- } else {
- DCCMetaSurfaceBytes = DCCMetaPitch
- * (dml_ceil(
- (double) ViewportHeight - 1,
- 64 * BlockHeight256Bytes)
- + 64 * BlockHeight256Bytes) * BytePerPixel
- / 256;
- }
- if (VirtualMemoryEnable == true) {
- MetaPTEBytesFrame = (dml_ceil(
- (double) (DCCMetaSurfaceBytes - VMMPageSize)
- / (8 * VMMPageSize),
- 1) + 1) * 64;
- MPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 1);
- } else {
- MetaPTEBytesFrame = 0;
- MPDEBytesFrame = 0;
- }
- } else {
- MetaPTEBytesFrame = 0;
- MPDEBytesFrame = 0;
- *MetaRowByte = 0;
- }
-
- if (SurfaceTiling == dm_sw_linear) {
- MacroTileSizeBytes = 256;
- MacroTileHeight = 1;
- } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
- || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
- MacroTileSizeBytes = 4096;
- MacroTileHeight = 4 * BlockHeight256Bytes;
- } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
- || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
- || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
- || SurfaceTiling == dm_sw_64kb_r_x) {
- MacroTileSizeBytes = 65536;
- MacroTileHeight = 16 * BlockHeight256Bytes;
- } else {
- MacroTileSizeBytes = 262144;
- MacroTileHeight = 32 * BlockHeight256Bytes;
- }
- *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
-
- if (VirtualMemoryEnable == true && mode_lib->vba.MaxPageTableLevels > 1) {
- if (ScanDirection == dm_horz) {
- DPDE0BytesFrame =
- 64
- * (dml_ceil(
- ((Pitch
- * (dml_ceil(
- ViewportHeight
- - 1,
- MacroTileHeight)
- + MacroTileHeight)
- * BytePerPixel)
- - MacroTileSizeBytes)
- / (8
- * 2097152),
- 1) + 1);
- } else {
- DPDE0BytesFrame =
- 64
- * (dml_ceil(
- ((Pitch
- * (dml_ceil(
- (double) SwathWidth
- - 1,
- MacroTileHeight)
- + MacroTileHeight)
- * BytePerPixel)
- - MacroTileSizeBytes)
- / (8
- * 2097152),
- 1) + 1);
- }
- ExtraDPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 2);
- } else {
- DPDE0BytesFrame = 0;
- ExtraDPDEBytesFrame = 0;
- }
-
- PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
- + ExtraDPDEBytesFrame;
-
- if (VirtualMemoryEnable == true) {
- unsigned int PTERequestSize;
- unsigned int PixelPTEReqHeight;
- unsigned int PixelPTEReqWidth;
- double FractionOfPTEReturnDrop;
- unsigned int EffectivePDEProcessingBufIn64KBReqs;
-
- if (SurfaceTiling == dm_sw_linear) {
- PixelPTEReqHeight = 1;
- PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
- PTERequestSize = 64;
- FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeight = MacroTileHeight;
- PixelPTEReqWidth = 8 * *MacroTileWidth;
- PTERequestSize = 64;
- if (ScanDirection == dm_horz)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7 / 8;
- } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
- PixelPTEReqHeight = 16 * BlockHeight256Bytes;
- PixelPTEReqWidth = 16 * BlockWidth256Bytes;
- PTERequestSize = 128;
- FractionOfPTEReturnDrop = 0;
- } else {
- PixelPTEReqHeight = MacroTileHeight;
- PixelPTEReqWidth = 8 * *MacroTileWidth;
- PTERequestSize = 64;
- FractionOfPTEReturnDrop = 0;
- }
-
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
- EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
- else
- EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
-
- if (SurfaceTiling == dm_sw_linear) {
- *dpte_row_height =
- dml_min(
- 128,
- 1
- << (unsigned int) dml_floor(
- dml_log2(
- dml_min(
- (double) PTEBufferSizeInRequests
- * PixelPTEReqWidth,
- EffectivePDEProcessingBufIn64KBReqs
- * 65536.0
- / BytePerPixel)
- / Pitch),
- 1));
- *PixelPTEBytesPerRow = PTERequestSize
- * (dml_ceil(
- (double) (Pitch * *dpte_row_height - 1)
- / PixelPTEReqWidth,
- 1) + 1);
- } else if (ScanDirection == dm_horz) {
- *dpte_row_height = PixelPTEReqHeight;
- *PixelPTEBytesPerRow = PTERequestSize
- * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
- + 1);
- } else {
- *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
- *PixelPTEBytesPerRow = PTERequestSize
- * (dml_ceil(
- ((double) SwathWidth - 1)
- / PixelPTEReqHeight,
- 1) + 1);
- }
- if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
- <= 64 * PTEBufferSizeInRequests) {
- *PTEBufferSizeNotExceeded = true;
- } else {
- *PTEBufferSizeNotExceeded = false;
- }
- } else {
- *PixelPTEBytesPerRow = 0;
- *PTEBufferSizeNotExceeded = true;
- }
-
- return PDEAndMetaPTEBytesFrame;
-}
-
-static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
- struct display_mode_lib *mode_lib)
-{
- unsigned int j, k;
-
- mode_lib->vba.WritebackDISPCLK = 0.0;
- mode_lib->vba.DISPCLKWithRamping = 0;
- mode_lib->vba.DISPCLKWithoutRamping = 0;
- mode_lib->vba.GlobalDPPCLK = 0.0;
-
- // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
- //
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.WritebackEnable[k]) {
- mode_lib->vba.WritebackDISPCLK =
- dml_max(
- mode_lib->vba.WritebackDISPCLK,
- CalculateWriteBackDISPCLK(
- mode_lib->vba.WritebackPixelFormat[k],
- mode_lib->vba.PixelClock[k],
- mode_lib->vba.WritebackHRatio[k],
- mode_lib->vba.WritebackVRatio[k],
- mode_lib->vba.WritebackLumaHTaps[k],
- mode_lib->vba.WritebackLumaVTaps[k],
- mode_lib->vba.WritebackChromaHTaps[k],
- mode_lib->vba.WritebackChromaVTaps[k],
- mode_lib->vba.WritebackDestinationWidth[k],
- mode_lib->vba.HTotal[k],
- mode_lib->vba.WritebackChromaLineBufferWidth));
- }
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.HRatio[k] > 1) {
- mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput
- * mode_lib->vba.HRatio[k]
- / dml_ceil(
- mode_lib->vba.htaps[k]
- / 6.0,
- 1));
- } else {
- mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput);
- }
-
- mode_lib->vba.DPPCLKUsingSingleDPPLuma =
- mode_lib->vba.PixelClock[k]
- * dml_max(
- mode_lib->vba.vtaps[k] / 6.0
- * dml_min(
- 1.0,
- mode_lib->vba.HRatio[k]),
- dml_max(
- mode_lib->vba.HRatio[k]
- * mode_lib->vba.VRatio[k]
- / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
- 1.0));
-
- if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
- && mode_lib->vba.DPPCLKUsingSingleDPPLuma
- < 2 * mode_lib->vba.PixelClock[k]) {
- mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
- }
-
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
- && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
- mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
- mode_lib->vba.DPPCLKUsingSingleDPP[k] =
- mode_lib->vba.DPPCLKUsingSingleDPPLuma;
- } else {
- if (mode_lib->vba.HRatio[k] > 1) {
- mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
- dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput
- * mode_lib->vba.HRatio[k]
- / 2
- / dml_ceil(
- mode_lib->vba.HTAPsChroma[k]
- / 6.0,
- 1.0));
- } else {
- mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput);
- }
- mode_lib->vba.DPPCLKUsingSingleDPPChroma =
- mode_lib->vba.PixelClock[k]
- * dml_max(
- mode_lib->vba.VTAPsChroma[k]
- / 6.0
- * dml_min(
- 1.0,
- mode_lib->vba.HRatio[k]
- / 2),
- dml_max(
- mode_lib->vba.HRatio[k]
- * mode_lib->vba.VRatio[k]
- / 4
- / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
- 1.0));
-
- if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
- && mode_lib->vba.DPPCLKUsingSingleDPPChroma
- < 2 * mode_lib->vba.PixelClock[k]) {
- mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
- * mode_lib->vba.PixelClock[k];
- }
-
- mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
- mode_lib->vba.DPPCLKUsingSingleDPPLuma,
- mode_lib->vba.DPPCLKUsingSingleDPPChroma);
- }
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.BlendingAndTiming[k] != k)
- continue;
- if (mode_lib->vba.ODMCombineEnabled[k]) {
- mode_lib->vba.DISPCLKWithRamping =
- dml_max(
- mode_lib->vba.DISPCLKWithRamping,
- mode_lib->vba.PixelClock[k] / 2
- * (1
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100)
- * (1
- + mode_lib->vba.DISPCLKRampingMargin
- / 100));
- mode_lib->vba.DISPCLKWithoutRamping =
- dml_max(
- mode_lib->vba.DISPCLKWithoutRamping,
- mode_lib->vba.PixelClock[k] / 2
- * (1
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100));
- } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
- mode_lib->vba.DISPCLKWithRamping =
- dml_max(
- mode_lib->vba.DISPCLKWithRamping,
- mode_lib->vba.PixelClock[k]
- * (1
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100)
- * (1
- + mode_lib->vba.DISPCLKRampingMargin
- / 100));
- mode_lib->vba.DISPCLKWithoutRamping =
- dml_max(
- mode_lib->vba.DISPCLKWithoutRamping,
- mode_lib->vba.PixelClock[k]
- * (1
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100));
- }
- }
-
- mode_lib->vba.DISPCLKWithRamping = dml_max(
- mode_lib->vba.DISPCLKWithRamping,
- mode_lib->vba.WritebackDISPCLK);
- mode_lib->vba.DISPCLKWithoutRamping = dml_max(
- mode_lib->vba.DISPCLKWithoutRamping,
- mode_lib->vba.WritebackDISPCLK);
-
- ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
- mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
- mode_lib->vba.DISPCLKWithRamping,
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
- mode_lib->vba.DISPCLKWithoutRamping,
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
- mode_lib->vba.soc.clock_limits[NumberOfStates - 1].dispclk_mhz,
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
- > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
- mode_lib->vba.DISPCLK_calculated =
- mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
- } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
- > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
- mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
- } else {
- mode_lib->vba.DISPCLK_calculated =
- mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
- }
- DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
- / mode_lib->vba.DPPPerPlane[k]
- * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
- mode_lib->vba.GlobalDPPCLK = dml_max(
- mode_lib->vba.GlobalDPPCLK,
- mode_lib->vba.DPPCLK_calculated[k]);
- }
- mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
- mode_lib->vba.GlobalDPPCLK,
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
- * dml_ceil(
- mode_lib->vba.DPPCLK_calculated[k] * 255
- / mode_lib->vba.GlobalDPPCLK,
- 1);
- DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
- }
-
- // Urgent Watermark
- mode_lib->vba.DCCEnabledAnyPlane = false;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- if (mode_lib->vba.DCCEnable[k])
- mode_lib->vba.DCCEnabledAnyPlane = true;
-
- mode_lib->vba.ReturnBandwidthToDCN = dml_min(
- mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
- mode_lib->vba.FabricAndDRAMBandwidth * 1000)
- * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency / 100;
-
- mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
- mode_lib->vba.ReturnBW = adjust_ReturnBW(
- mode_lib,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.DCCEnabledAnyPlane,
- mode_lib->vba.ReturnBandwidthToDCN);
-
- // Let's do this calculation again??
- mode_lib->vba.ReturnBandwidthToDCN = dml_min(
- mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
- mode_lib->vba.FabricAndDRAMBandwidth * 1000);
- mode_lib->vba.ReturnBW = adjust_ReturnBW(
- mode_lib,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.DCCEnabledAnyPlane,
- mode_lib->vba.ReturnBandwidthToDCN);
-
- DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
- DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
- DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- bool MainPlaneDoesODMCombine = false;
-
- if (mode_lib->vba.SourceScan[k] == dm_horz)
- mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
- else
- mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
-
- if (mode_lib->vba.ODMCombineEnabled[k] == true)
- MainPlaneDoesODMCombine = true;
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
- if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true)
- MainPlaneDoesODMCombine = true;
-
- if (MainPlaneDoesODMCombine == true)
- mode_lib->vba.SwathWidthY[k] = dml_min(
- (double) mode_lib->vba.SwathWidthSingleDPPY[k],
- dml_round(
- mode_lib->vba.HActive[k] / 2.0
- * mode_lib->vba.HRatio[k]));
- else
- mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
- / mode_lib->vba.DPPPerPlane[k];
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
- mode_lib->vba.BytePerPixelDETY[k] = 8;
- mode_lib->vba.BytePerPixelDETC[k] = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
- mode_lib->vba.BytePerPixelDETY[k] = 4;
- mode_lib->vba.BytePerPixelDETC[k] = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
- mode_lib->vba.BytePerPixelDETY[k] = 2;
- mode_lib->vba.BytePerPixelDETC[k] = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
- mode_lib->vba.BytePerPixelDETY[k] = 1;
- mode_lib->vba.BytePerPixelDETC[k] = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
- mode_lib->vba.BytePerPixelDETY[k] = 1;
- mode_lib->vba.BytePerPixelDETC[k] = 2;
- } else { // dm_420_10
- mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
- mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
- }
- }
-
- mode_lib->vba.TotalDataReadBandwidth = 0.0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
- * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
- / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- * mode_lib->vba.VRatio[k];
- mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
- / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
- / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- * mode_lib->vba.VRatio[k] / 2;
- DTRACE(
- " read_bw[%i] = %fBps",
- k,
- mode_lib->vba.ReadBandwidthPlaneLuma[k]
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
- mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
- + mode_lib->vba.ReadBandwidthPlaneChroma[k];
- }
-
- mode_lib->vba.TotalDCCActiveDPP = 0;
- mode_lib->vba.TotalActiveDPP = 0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
- + mode_lib->vba.DPPPerPlane[k];
- if (mode_lib->vba.DCCEnable[k])
- mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
- + mode_lib->vba.DPPPerPlane[k];
- }
-
- mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
- (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
- + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
- * mode_lib->vba.NumberOfChannels
- / mode_lib->vba.ReturnBW;
-
- mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
-
- if (mode_lib->vba.VRatio[k] <= 1.0)
- mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
- (double) mode_lib->vba.SwathWidthY[k]
- * mode_lib->vba.DPPPerPlane[k]
- / mode_lib->vba.HRatio[k]
- / mode_lib->vba.PixelClock[k];
- else
- mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
- (double) mode_lib->vba.SwathWidthY[k]
- / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
- / mode_lib->vba.DPPCLK[k];
-
- DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
- * mode_lib->vba.SwathHeightY[k]
- * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
- / (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
- / mode_lib->vba.TotalDataReadBandwidth);
- mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
- mode_lib->vba.LastPixelOfLineExtraWatermark,
- DataFabricLineDeliveryTimeLuma
- - mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
-
- if (mode_lib->vba.BytePerPixelDETC[k] == 0)
- mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
- else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
- mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
- mode_lib->vba.SwathWidthY[k] / 2.0
- * mode_lib->vba.DPPPerPlane[k]
- / (mode_lib->vba.HRatio[k] / 2.0)
- / mode_lib->vba.PixelClock[k];
- else
- mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
- mode_lib->vba.SwathWidthY[k] / 2.0
- / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
- / mode_lib->vba.DPPCLK[k];
-
- DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
- * mode_lib->vba.SwathHeightC[k]
- * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
- / (mode_lib->vba.ReturnBW
- * mode_lib->vba.ReadBandwidthPlaneChroma[k]
- / mode_lib->vba.TotalDataReadBandwidth);
- mode_lib->vba.LastPixelOfLineExtraWatermark =
- dml_max(
- mode_lib->vba.LastPixelOfLineExtraWatermark,
- DataFabricLineDeliveryTimeChroma
- - mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
- }
-
- mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
- + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
- + mode_lib->vba.TotalDCCActiveDPP
- * mode_lib->vba.MetaChunkSize) * 1024.0
- / mode_lib->vba.ReturnBW;
-
- if (mode_lib->vba.VirtualMemoryEnable)
- mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
- * mode_lib->vba.PTEChunkSize * 1024.0 / mode_lib->vba.ReturnBW;
-
- mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatency
- + mode_lib->vba.LastPixelOfLineExtraWatermark
- + mode_lib->vba.UrgentExtraLatency;
-
- DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
- DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
-
- mode_lib->vba.MemoryTripWatermark = mode_lib->vba.UrgentLatency;
-
- mode_lib->vba.TotalActiveWriteback = 0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.WritebackEnable[k])
- mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
- }
-
- if (mode_lib->vba.TotalActiveWriteback <= 1)
- mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
- else
- mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
- + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
- / mode_lib->vba.SOCCLK;
-
- DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
-
- // NB P-State/DRAM Clock Change Watermark
- mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
- + mode_lib->vba.UrgentWatermark;
-
- DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
-
- DTRACE(" calculating wb pstate watermark");
- DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
- DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
-
- if (mode_lib->vba.TotalActiveWriteback <= 1)
- mode_lib->vba.WritebackDRAMClockChangeWatermark =
- mode_lib->vba.DRAMClockChangeLatency
- + mode_lib->vba.WritebackLatency;
- else
- mode_lib->vba.WritebackDRAMClockChangeWatermark =
- mode_lib->vba.DRAMClockChangeLatency
- + mode_lib->vba.WritebackLatency
- + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
- / mode_lib->vba.SOCCLK;
-
- DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
-
- // Stutter Efficiency
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
- / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
- mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
- mode_lib->vba.LinesInDETY[k],
- mode_lib->vba.SwathHeightY[k]);
- mode_lib->vba.FullDETBufferingTimeY[k] =
- mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / mode_lib->vba.VRatio[k];
- if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
- mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
- / mode_lib->vba.BytePerPixelDETC[k]
- / (mode_lib->vba.SwathWidthY[k] / 2);
- mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
- mode_lib->vba.LinesInDETC[k],
- mode_lib->vba.SwathHeightC[k]);
- mode_lib->vba.FullDETBufferingTimeC[k] =
- mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / (mode_lib->vba.VRatio[k] / 2);
- } else {
- mode_lib->vba.LinesInDETC[k] = 0;
- mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
- mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
- }
- }
-
- mode_lib->vba.MinFullDETBufferingTime = 999999.0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.FullDETBufferingTimeY[k]
- < mode_lib->vba.MinFullDETBufferingTime) {
- mode_lib->vba.MinFullDETBufferingTime =
- mode_lib->vba.FullDETBufferingTimeY[k];
- mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
- (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k];
- }
- if (mode_lib->vba.FullDETBufferingTimeC[k]
- < mode_lib->vba.MinFullDETBufferingTime) {
- mode_lib->vba.MinFullDETBufferingTime =
- mode_lib->vba.FullDETBufferingTimeC[k];
- mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
- (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k];
- }
- }
-
- mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.DCCEnable[k]) {
- mode_lib->vba.AverageReadBandwidthGBytePerSecond =
- mode_lib->vba.AverageReadBandwidthGBytePerSecond
- + mode_lib->vba.ReadBandwidthPlaneLuma[k]
- / mode_lib->vba.DCCRate[k]
- / 1000
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]
- / mode_lib->vba.DCCRate[k]
- / 1000;
- } else {
- mode_lib->vba.AverageReadBandwidthGBytePerSecond =
- mode_lib->vba.AverageReadBandwidthGBytePerSecond
- + mode_lib->vba.ReadBandwidthPlaneLuma[k]
- / 1000
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]
- / 1000;
- }
- if (mode_lib->vba.DCCEnable[k]) {
- mode_lib->vba.AverageReadBandwidthGBytePerSecond =
- mode_lib->vba.AverageReadBandwidthGBytePerSecond
- + mode_lib->vba.ReadBandwidthPlaneLuma[k]
- / 1000 / 256
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]
- / 1000 / 256;
- }
- if (mode_lib->vba.VirtualMemoryEnable) {
- mode_lib->vba.AverageReadBandwidthGBytePerSecond =
- mode_lib->vba.AverageReadBandwidthGBytePerSecond
- + mode_lib->vba.ReadBandwidthPlaneLuma[k]
- / 1000 / 512
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]
- / 1000 / 512;
- }
- }
-
- mode_lib->vba.PartOfBurstThatFitsInROB =
- dml_min(
- mode_lib->vba.MinFullDETBufferingTime
- * mode_lib->vba.TotalDataReadBandwidth,
- mode_lib->vba.ROBBufferSizeInKByte * 1024
- * mode_lib->vba.TotalDataReadBandwidth
- / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
- * 1000));
- mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
- * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
- / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
- + (mode_lib->vba.MinFullDETBufferingTime
- * mode_lib->vba.TotalDataReadBandwidth
- - mode_lib->vba.PartOfBurstThatFitsInROB)
- / (mode_lib->vba.DCFCLK * 64);
- if (mode_lib->vba.TotalActiveWriteback == 0) {
- mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
- - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
- / mode_lib->vba.MinFullDETBufferingTime) * 100;
- } else {
- mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
- }
-
- mode_lib->vba.SmallestVBlank = 999999;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
- mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
- - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k];
- } else {
- mode_lib->vba.VBlankTime = 0;
- }
- mode_lib->vba.SmallestVBlank = dml_min(
- mode_lib->vba.SmallestVBlank,
- mode_lib->vba.VBlankTime);
- }
-
- mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
- * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
- - mode_lib->vba.SmallestVBlank)
- + mode_lib->vba.SmallestVBlank)
- / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
-
- // dml_ml->vba.DCFCLK Deep Sleep
- mode_lib->vba.DCFClkDeepSleep = 8.0;
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
- if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
- mode_lib->vba.DCFCLKDeepSleepPerPlane =
- dml_max(
- 1.1 * mode_lib->vba.SwathWidthY[k]
- * dml_ceil(
- mode_lib->vba.BytePerPixelDETY[k],
- 1) / 32
- / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
- 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
- * dml_ceil(
- mode_lib->vba.BytePerPixelDETC[k],
- 2) / 32
- / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
- } else
- mode_lib->vba.DCFCLKDeepSleepPerPlane = 1.1 * mode_lib->vba.SwathWidthY[k]
- * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
- / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
- mode_lib->vba.DCFCLKDeepSleepPerPlane = dml_max(
- mode_lib->vba.DCFCLKDeepSleepPerPlane,
- mode_lib->vba.PixelClock[k] / 16.0);
- mode_lib->vba.DCFClkDeepSleep = dml_max(
- mode_lib->vba.DCFClkDeepSleep,
- mode_lib->vba.DCFCLKDeepSleepPerPlane);
-
- DTRACE(
- " dcfclk_deepsleep_per_plane[%i] = %fMHz",
- k,
- mode_lib->vba.DCFCLKDeepSleepPerPlane);
- }
-
- DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFClkDeepSleep);
-
- // Stutter Watermark
- mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
- + mode_lib->vba.LastPixelOfLineExtraWatermark
- + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFClkDeepSleep;
- mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
- + mode_lib->vba.LastPixelOfLineExtraWatermark
- + mode_lib->vba.UrgentExtraLatency;
-
- DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
- DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
-
- // Urgent Latency Supported
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.EffectiveDETPlusLBLinesLuma =
- dml_floor(
- mode_lib->vba.LinesInDETY[k]
- + dml_min(
- mode_lib->vba.LinesInDETY[k]
- * mode_lib->vba.DPPCLK[k]
- * mode_lib->vba.BytePerPixelDETY[k]
- * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
- / (mode_lib->vba.ReturnBW
- / mode_lib->vba.DPPPerPlane[k]),
- (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
- mode_lib->vba.SwathHeightY[k]);
-
- mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
- * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- / mode_lib->vba.VRatio[k]
- - mode_lib->vba.EffectiveDETPlusLBLinesLuma
- * mode_lib->vba.SwathWidthY[k]
- * mode_lib->vba.BytePerPixelDETY[k]
- / (mode_lib->vba.ReturnBW
- / mode_lib->vba.DPPPerPlane[k]);
-
- if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
- mode_lib->vba.EffectiveDETPlusLBLinesChroma =
- dml_floor(
- mode_lib->vba.LinesInDETC[k]
- + dml_min(
- mode_lib->vba.LinesInDETC[k]
- * mode_lib->vba.DPPCLK[k]
- * mode_lib->vba.BytePerPixelDETC[k]
- * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
- / (mode_lib->vba.ReturnBW
- / mode_lib->vba.DPPPerPlane[k]),
- (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
- mode_lib->vba.SwathHeightC[k]);
- mode_lib->vba.UrgentLatencySupportUsChroma =
- mode_lib->vba.EffectiveDETPlusLBLinesChroma
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / (mode_lib->vba.VRatio[k] / 2)
- - mode_lib->vba.EffectiveDETPlusLBLinesChroma
- * (mode_lib->vba.SwathWidthY[k]
- / 2)
- * mode_lib->vba.BytePerPixelDETC[k]
- / (mode_lib->vba.ReturnBW
- / mode_lib->vba.DPPPerPlane[k]);
- mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
- mode_lib->vba.UrgentLatencySupportUsLuma,
- mode_lib->vba.UrgentLatencySupportUsChroma);
- } else {
- mode_lib->vba.UrgentLatencySupportUs[k] =
- mode_lib->vba.UrgentLatencySupportUsLuma;
- }
- }
-
- mode_lib->vba.MinUrgentLatencySupportUs = 999999;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
- mode_lib->vba.MinUrgentLatencySupportUs,
- mode_lib->vba.UrgentLatencySupportUs[k]);
- }
-
- // Non-Urgent Latency Tolerance
- mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
- - mode_lib->vba.UrgentWatermark;
-
- // DSCCLK
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
- mode_lib->vba.DSCCLK_calculated[k] = 0.0;
- } else {
- if (mode_lib->vba.OutputFormat[k] == dm_420
- || mode_lib->vba.OutputFormat[k] == dm_n422)
- mode_lib->vba.DSCFormatFactor = 2;
- else
- mode_lib->vba.DSCFormatFactor = 1;
- if (mode_lib->vba.ODMCombineEnabled[k])
- mode_lib->vba.DSCCLK_calculated[k] =
- mode_lib->vba.PixelClockBackEnd[k] / 6
- / mode_lib->vba.DSCFormatFactor
- / (1
- - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100);
- else
- mode_lib->vba.DSCCLK_calculated[k] =
- mode_lib->vba.PixelClockBackEnd[k] / 3
- / mode_lib->vba.DSCFormatFactor
- / (1
- - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100);
- }
- }
-
- // DSC Delay
- // TODO
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- double bpp = mode_lib->vba.OutputBpp[k];
- unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
-
- if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
- if (!mode_lib->vba.ODMCombineEnabled[k]) {
- mode_lib->vba.DSCDelay[k] =
- dscceComputeDelay(
- mode_lib->vba.DSCInputBitPerComponent[k],
- bpp,
- dml_ceil(
- (double) mode_lib->vba.HActive[k]
- / mode_lib->vba.NumberOfDSCSlices[k],
- 1),
- slices,
- mode_lib->vba.OutputFormat[k])
- + dscComputeDelay(
- mode_lib->vba.OutputFormat[k]);
- } else {
- mode_lib->vba.DSCDelay[k] =
- 2
- * (dscceComputeDelay(
- mode_lib->vba.DSCInputBitPerComponent[k],
- bpp,
- dml_ceil(
- (double) mode_lib->vba.HActive[k]
- / mode_lib->vba.NumberOfDSCSlices[k],
- 1),
- slices / 2.0,
- mode_lib->vba.OutputFormat[k])
- + dscComputeDelay(
- mode_lib->vba.OutputFormat[k]));
- }
- mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
- * mode_lib->vba.PixelClock[k]
- / mode_lib->vba.PixelClockBackEnd[k];
- } else {
- mode_lib->vba.DSCDelay[k] = 0;
- }
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
- if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.DSCEnabled[j])
- mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
-
- // Prefetch
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- unsigned int PDEAndMetaPTEBytesFrameY;
- unsigned int PixelPTEBytesPerRowY;
- unsigned int MetaRowByteY;
- unsigned int MetaRowByteC;
- unsigned int PDEAndMetaPTEBytesFrameC;
- unsigned int PixelPTEBytesPerRowC;
-
- Calculate256BBlockSizes(
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
- dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
- &mode_lib->vba.BlockHeight256BytesY[k],
- &mode_lib->vba.BlockHeight256BytesC[k],
- &mode_lib->vba.BlockWidth256BytesY[k],
- &mode_lib->vba.BlockWidth256BytesC[k]);
- PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
- mode_lib,
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.BlockHeight256BytesY[k],
- mode_lib->vba.BlockWidth256BytesY[k],
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
- mode_lib->vba.SourceScan[k],
- mode_lib->vba.ViewportWidth[k],
- mode_lib->vba.ViewportHeight[k],
- mode_lib->vba.SwathWidthY[k],
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.VMMPageSize,
- mode_lib->vba.PTEBufferSizeInRequests,
- mode_lib->vba.PDEProcessingBufIn64KBReqs,
- mode_lib->vba.PitchY[k],
- mode_lib->vba.DCCMetaPitchY[k],
- &mode_lib->vba.MacroTileWidthY[k],
- &MetaRowByteY,
- &PixelPTEBytesPerRowY,
- &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
- &mode_lib->vba.dpte_row_height[k],
- &mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
- mode_lib,
- mode_lib->vba.VRatio[k],
- mode_lib->vba.vtaps[k],
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.ViewportYStartY[k],
- &mode_lib->vba.VInitPreFillY[k],
- &mode_lib->vba.MaxNumSwathY[k]);
-
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
- PDEAndMetaPTEBytesFrameC =
- CalculateVMAndRowBytes(
- mode_lib,
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.BlockHeight256BytesC[k],
- mode_lib->vba.BlockWidth256BytesC[k],
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(
- mode_lib->vba.BytePerPixelDETC[k],
- 2),
- mode_lib->vba.SourceScan[k],
- mode_lib->vba.ViewportWidth[k] / 2,
- mode_lib->vba.ViewportHeight[k] / 2,
- mode_lib->vba.SwathWidthY[k] / 2,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.VMMPageSize,
- mode_lib->vba.PTEBufferSizeInRequests,
- mode_lib->vba.PDEProcessingBufIn64KBReqs,
- mode_lib->vba.PitchC[k],
- 0,
- &mode_lib->vba.MacroTileWidthC[k],
- &MetaRowByteC,
- &PixelPTEBytesPerRowC,
- &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
- &mode_lib->vba.dpte_row_height_chroma[k],
- &mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
- mode_lib,
- mode_lib->vba.VRatio[k] / 2,
- mode_lib->vba.VTAPsChroma[k],
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- mode_lib->vba.SwathHeightC[k],
- mode_lib->vba.ViewportYStartC[k],
- &mode_lib->vba.VInitPreFillC[k],
- &mode_lib->vba.MaxNumSwathC[k]);
- } else {
- PixelPTEBytesPerRowC = 0;
- PDEAndMetaPTEBytesFrameC = 0;
- MetaRowByteC = 0;
- mode_lib->vba.MaxNumSwathC[k] = 0;
- mode_lib->vba.PrefetchSourceLinesC[k] = 0;
- }
-
- mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
- mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
- + PDEAndMetaPTEBytesFrameC;
- mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
-
- CalculateActiveRowBandwidth(
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.VRatio[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
- MetaRowByteY,
- MetaRowByteC,
- mode_lib->vba.meta_row_height[k],
- mode_lib->vba.meta_row_height_chroma[k],
- PixelPTEBytesPerRowY,
- PixelPTEBytesPerRowC,
- mode_lib->vba.dpte_row_height[k],
- mode_lib->vba.dpte_row_height_chroma[k],
- &mode_lib->vba.meta_row_bw[k],
- &mode_lib->vba.dpte_row_bw[k],
- &mode_lib->vba.qual_row_bw[k]);
- }
-
- mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFClkDeepSleep;
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
- mode_lib->vba.WritebackLatency
- + CalculateWriteBackDelay(
- mode_lib->vba.WritebackPixelFormat[k],
- mode_lib->vba.WritebackHRatio[k],
- mode_lib->vba.WritebackVRatio[k],
- mode_lib->vba.WritebackLumaHTaps[k],
- mode_lib->vba.WritebackLumaVTaps[k],
- mode_lib->vba.WritebackChromaHTaps[k],
- mode_lib->vba.WritebackChromaVTaps[k],
- mode_lib->vba.WritebackDestinationWidth[k])
- / mode_lib->vba.DISPCLK;
- } else
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
- if (mode_lib->vba.BlendingAndTiming[j] == k
- && mode_lib->vba.WritebackEnable[j] == true) {
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
- dml_max(
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
- mode_lib->vba.WritebackLatency
- + CalculateWriteBackDelay(
- mode_lib->vba.WritebackPixelFormat[j],
- mode_lib->vba.WritebackHRatio[j],
- mode_lib->vba.WritebackVRatio[j],
- mode_lib->vba.WritebackLumaHTaps[j],
- mode_lib->vba.WritebackLumaVTaps[j],
- mode_lib->vba.WritebackChromaHTaps[j],
- mode_lib->vba.WritebackChromaVTaps[j],
- mode_lib->vba.WritebackDestinationWidth[j])
- / mode_lib->vba.DISPCLK);
- }
- }
- }
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
- if (mode_lib->vba.BlendingAndTiming[k] == j)
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
-
- mode_lib->vba.VStartupLines = 13;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.MaxVStartupLines[k] =
- mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- - dml_max(
- 1.0,
- dml_ceil(
- mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
- / (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]),
- 1));
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
- mode_lib->vba.MaximumMaxVStartupLines = dml_max(
- mode_lib->vba.MaximumMaxVStartupLines,
- mode_lib->vba.MaxVStartupLines[k]);
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.cursor_bw[k] = 0.0;
- for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
- mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
- * mode_lib->vba.CursorBPP[k][j] / 8.0
- / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- * mode_lib->vba.VRatio[k];
- }
-
- do {
- double MaxTotalRDBandwidth = 0;
- bool DestinationLineTimesForPrefetchLessThan2 = false;
- bool VRatioPrefetchMoreThan4 = false;
- bool prefetch_vm_bw_valid = true;
- bool prefetch_row_bw_valid = true;
- double TWait = CalculateTWait(
- mode_lib->vba.PrefetchMode,
- mode_lib->vba.DRAMClockChangeLatency,
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.SREnterPlusExitTime);
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.XFCEnabled[k] == true) {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay =
- CalculateRemoteSurfaceFlipDelay(
- mode_lib,
- mode_lib->vba.VRatio[k],
- mode_lib->vba.SwathWidthY[k],
- dml_ceil(
- mode_lib->vba.BytePerPixelDETY[k],
- 1),
- mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k],
- mode_lib->vba.XFCTSlvVupdateOffset,
- mode_lib->vba.XFCTSlvVupdateWidth,
- mode_lib->vba.XFCTSlvVreadyOffset,
- mode_lib->vba.XFCXBUFLatencyTolerance,
- mode_lib->vba.XFCFillBWOverhead,
- mode_lib->vba.XFCSlvChunkSize,
- mode_lib->vba.XFCBusTransportTime,
- mode_lib->vba.TCalc,
- TWait,
- &mode_lib->vba.SrcActiveDrainRate,
- &mode_lib->vba.TInitXFill,
- &mode_lib->vba.TslvChk);
- } else {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
- }
- mode_lib->vba.ErrorResult[k] =
- CalculatePrefetchSchedule(
- mode_lib,
- mode_lib->vba.DPPCLK[k],
- mode_lib->vba.DISPCLK,
- mode_lib->vba.PixelClock[k],
- mode_lib->vba.DCFClkDeepSleep,
- mode_lib->vba.DSCDelay[k],
- mode_lib->vba.DPPPerPlane[k],
- mode_lib->vba.ScalerEnabled[k],
- mode_lib->vba.NumberOfCursors[k],
- mode_lib->vba.DPPCLKDelaySubtotal,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (mode_lib->vba.SwathWidthY[k]
- / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.VTotal[k]
- - mode_lib->vba.VActive[k],
- mode_lib->vba.HTotal[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(
- mode_lib->vba.VStartupLines,
- mode_lib->vba.MaxVStartupLines[k]),
- mode_lib->vba.MaxPageTableLevels,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.UrgentExtraLatency,
- mode_lib->vba.TCalc,
- mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
- mode_lib->vba.MetaRowByte[k],
- mode_lib->vba.PixelPTEBytesPerRow[k],
- mode_lib->vba.PrefetchSourceLinesY[k],
- mode_lib->vba.SwathWidthY[k],
- mode_lib->vba.BytePerPixelDETY[k],
- mode_lib->vba.VInitPreFillY[k],
- mode_lib->vba.MaxNumSwathY[k],
- mode_lib->vba.PrefetchSourceLinesC[k],
- mode_lib->vba.BytePerPixelDETC[k],
- mode_lib->vba.VInitPreFillC[k],
- mode_lib->vba.MaxNumSwathC[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
- TWait,
- mode_lib->vba.XFCEnabled[k],
- mode_lib->vba.XFCRemoteSurfaceFlipDelay,
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- &mode_lib->vba.DSTXAfterScaler[k],
- &mode_lib->vba.DSTYAfterScaler[k],
- &mode_lib->vba.DestinationLinesForPrefetch[k],
- &mode_lib->vba.PrefetchBandwidth[k],
- &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
- &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
- &mode_lib->vba.VRatioPrefetchY[k],
- &mode_lib->vba.VRatioPrefetchC[k],
- &mode_lib->vba.RequiredPrefetchPixDataBW[k],
- &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.VUpdateOffsetPix[k],
- &mode_lib->vba.VUpdateWidthPix[k],
- &mode_lib->vba.VReadyOffsetPix[k]);
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- mode_lib->vba.VStartup[k] = dml_min(
- mode_lib->vba.VStartupLines,
- mode_lib->vba.MaxVStartupLines[k]);
- if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
- != 0) {
- mode_lib->vba.VStartup[k] =
- mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
- }
- } else {
- mode_lib->vba.VStartup[k] =
- dml_min(
- mode_lib->vba.VStartupLines,
- mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
- }
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
-
- if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
- mode_lib->vba.prefetch_vm_bw[k] = 0;
- else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
- mode_lib->vba.prefetch_vm_bw[k] =
- (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
- / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- } else {
- mode_lib->vba.prefetch_vm_bw[k] = 0;
- prefetch_vm_bw_valid = false;
- }
- if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
- == 0)
- mode_lib->vba.prefetch_row_bw[k] = 0;
- else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
- mode_lib->vba.prefetch_row_bw[k] =
- (double) (mode_lib->vba.MetaRowByte[k]
- + mode_lib->vba.PixelPTEBytesPerRow[k])
- / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- } else {
- mode_lib->vba.prefetch_row_bw[k] = 0;
- prefetch_row_bw_valid = false;
- }
-
- MaxTotalRDBandwidth =
- MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
- + dml_max(
- mode_lib->vba.prefetch_vm_bw[k],
- dml_max(
- mode_lib->vba.prefetch_row_bw[k],
- dml_max(
- mode_lib->vba.ReadBandwidthPlaneLuma[k]
- + mode_lib->vba.ReadBandwidthPlaneChroma[k],
- mode_lib->vba.RequiredPrefetchPixDataBW[k])
- + mode_lib->vba.meta_row_bw[k]
- + mode_lib->vba.dpte_row_bw[k]));
-
- if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
- DestinationLineTimesForPrefetchLessThan2 = true;
- if (mode_lib->vba.VRatioPrefetchY[k] > 4
- || mode_lib->vba.VRatioPrefetchC[k] > 4)
- VRatioPrefetchMoreThan4 = true;
- }
-
- if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
- && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
- && !DestinationLineTimesForPrefetchLessThan2)
- mode_lib->vba.PrefetchModeSupported = true;
- else {
- mode_lib->vba.PrefetchModeSupported = false;
- dml_print(
- "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
- }
-
- if (mode_lib->vba.PrefetchModeSupported == true) {
- double final_flip_bw[DC__NUM_DPP__MAX];
- unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
- double total_dcn_read_bw_with_flip = 0;
-
- mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.BandwidthAvailableForImmediateFlip
- - mode_lib->vba.cursor_bw[k]
- - dml_max(
- mode_lib->vba.ReadBandwidthPlaneLuma[k]
- + mode_lib->vba.ReadBandwidthPlaneChroma[k]
- + mode_lib->vba.qual_row_bw[k],
- mode_lib->vba.PrefetchBandwidth[k]);
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- ImmediateFlipBytes[k] = 0;
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
- && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
- ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
- + mode_lib->vba.MetaRowByte[k]
- + mode_lib->vba.PixelPTEBytesPerRow[k];
- }
- }
- mode_lib->vba.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
- && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
- mode_lib->vba.TotImmediateFlipBytes =
- mode_lib->vba.TotImmediateFlipBytes
- + ImmediateFlipBytes[k];
- }
- }
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- CalculateFlipSchedule(
- mode_lib,
- mode_lib->vba.UrgentExtraLatency,
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.MaxPageTableLevels,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.BandwidthAvailableForImmediateFlip,
- mode_lib->vba.TotImmediateFlipBytes,
- mode_lib->vba.SourcePixelFormat[k],
- ImmediateFlipBytes[k],
- mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k],
- mode_lib->vba.VRatio[k],
- mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
- mode_lib->vba.MetaRowByte[k],
- mode_lib->vba.PixelPTEBytesPerRow[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.dpte_row_height[k],
- mode_lib->vba.meta_row_height[k],
- mode_lib->vba.qual_row_bw[k],
- &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
- &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
- &final_flip_bw[k],
- &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
- }
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- total_dcn_read_bw_with_flip =
- total_dcn_read_bw_with_flip
- + mode_lib->vba.cursor_bw[k]
- + dml_max(
- mode_lib->vba.prefetch_vm_bw[k],
- dml_max(
- mode_lib->vba.prefetch_row_bw[k],
- final_flip_bw[k]
- + dml_max(
- mode_lib->vba.ReadBandwidthPlaneLuma[k]
- + mode_lib->vba.ReadBandwidthPlaneChroma[k],
- mode_lib->vba.RequiredPrefetchPixDataBW[k])));
- }
- mode_lib->vba.ImmediateFlipSupported = true;
- if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
- mode_lib->vba.ImmediateFlipSupported = false;
- }
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
- mode_lib->vba.ImmediateFlipSupported = false;
- }
- }
- } else {
- mode_lib->vba.ImmediateFlipSupported = false;
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ErrorResult[k]) {
- mode_lib->vba.PrefetchModeSupported = false;
- dml_print(
- "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
- }
- }
-
- mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
- } while (!((mode_lib->vba.PrefetchModeSupported
- && (!mode_lib->vba.ImmediateFlipSupport
- || mode_lib->vba.ImmediateFlipSupported))
- || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
-
- //Display Pipeline Delivery Time in Prefetch
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
- mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
- mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
- / mode_lib->vba.HRatio[k]
- / mode_lib->vba.PixelClock[k];
- } else {
- mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
- mode_lib->vba.SwathWidthY[k]
- / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
- / mode_lib->vba.DPPCLK[k];
- }
- if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
- mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
- } else {
- if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
- mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
- mode_lib->vba.SwathWidthY[k]
- * mode_lib->vba.DPPPerPlane[k]
- / mode_lib->vba.HRatio[k]
- / mode_lib->vba.PixelClock[k];
- } else {
- mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
- mode_lib->vba.SwathWidthY[k]
- / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
- / mode_lib->vba.DPPCLK[k];
- }
- }
- }
-
- // Min TTUVBlank
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.PrefetchMode == 0) {
- mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
- mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
- mode_lib->vba.MinTTUVBlank[k] = dml_max(
- mode_lib->vba.DRAMClockChangeWatermark,
- dml_max(
- mode_lib->vba.StutterEnterPlusExitWatermark,
- mode_lib->vba.UrgentWatermark));
- } else if (mode_lib->vba.PrefetchMode == 1) {
- mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
- mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
- mode_lib->vba.MinTTUVBlank[k] = dml_max(
- mode_lib->vba.StutterEnterPlusExitWatermark,
- mode_lib->vba.UrgentWatermark);
- } else {
- mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
- mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
- mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
- }
- if (!mode_lib->vba.DynamicMetadataEnable[k])
- mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
- + mode_lib->vba.MinTTUVBlank[k];
- }
-
- // DCC Configuration
- mode_lib->vba.ActiveDPPs = 0;
- // NB P-State/DRAM Clock Change Support
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
- }
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- double EffectiveLBLatencyHidingY;
- double EffectiveLBLatencyHidingC;
- double DPPOutputBufferLinesY;
- double DPPOutputBufferLinesC;
- double DPPOPPBufferingY;
- double MaxDETBufferingTimeY;
- double ActiveDRAMClockChangeLatencyMarginY;
-
- mode_lib->vba.LBLatencyHidingSourceLinesY =
- dml_min(
- mode_lib->vba.MaxLineBufferLines,
- (unsigned int) dml_floor(
- (double) mode_lib->vba.LineBufferSize
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.SwathWidthY[k]
- / dml_max(
- mode_lib->vba.HRatio[k],
- 1.0)),
- 1)) - (mode_lib->vba.vtaps[k] - 1);
-
- mode_lib->vba.LBLatencyHidingSourceLinesC =
- dml_min(
- mode_lib->vba.MaxLineBufferLines,
- (unsigned int) dml_floor(
- (double) mode_lib->vba.LineBufferSize
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.SwathWidthY[k]
- / 2.0
- / dml_max(
- mode_lib->vba.HRatio[k]
- / 2,
- 1.0)),
- 1))
- - (mode_lib->vba.VTAPsChroma[k] - 1);
-
- EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
- / mode_lib->vba.VRatio[k]
- * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
-
- EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
- / (mode_lib->vba.VRatio[k] / 2)
- * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
-
- if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
- DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
- / mode_lib->vba.SwathWidthY[k];
- } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
- DPPOutputBufferLinesY = 0.5;
- } else {
- DPPOutputBufferLinesY = 1;
- }
-
- if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
- DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
- / (mode_lib->vba.SwathWidthY[k] / 2);
- } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
- DPPOutputBufferLinesC = 0.5;
- } else {
- DPPOutputBufferLinesC = 1;
- }
-
- DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
- MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
- + (mode_lib->vba.LinesInDETY[k]
- - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
- / mode_lib->vba.SwathHeightY[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
-
- ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
- + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
-
- if (mode_lib->vba.ActiveDPPs > 1) {
- ActiveDRAMClockChangeLatencyMarginY =
- ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
- * mode_lib->vba.SwathHeightY[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- }
-
- if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
- double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- * (DPPOutputBufferLinesC
- + mode_lib->vba.OPPOutputBufferLines);
- double MaxDETBufferingTimeC =
- mode_lib->vba.FullDETBufferingTimeC[k]
- + (mode_lib->vba.LinesInDETC[k]
- - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
- / mode_lib->vba.SwathHeightC[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
- + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
- - mode_lib->vba.DRAMClockChangeWatermark;
-
- if (mode_lib->vba.ActiveDPPs > 1) {
- ActiveDRAMClockChangeLatencyMarginC =
- ActiveDRAMClockChangeLatencyMarginC
- - (1
- - 1
- / (mode_lib->vba.ActiveDPPs
- - 1))
- * mode_lib->vba.SwathHeightC[k]
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- }
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
- ActiveDRAMClockChangeLatencyMarginY,
- ActiveDRAMClockChangeLatencyMarginC);
- } else {
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
- ActiveDRAMClockChangeLatencyMarginY;
- }
-
- if (mode_lib->vba.WritebackEnable[k]) {
- double WritebackDRAMClockChangeLatencyMargin;
-
- if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
- WritebackDRAMClockChangeLatencyMargin =
- (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
- + mode_lib->vba.WritebackInterfaceChromaBufferSize)
- / (mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- * 4)
- - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
- WritebackDRAMClockChangeLatencyMargin =
- dml_min(
- (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
- * 8.0 / 10,
- 2.0
- * mode_lib->vba.WritebackInterfaceChromaBufferSize
- * 8 / 10)
- / (mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]))
- - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- } else {
- WritebackDRAMClockChangeLatencyMargin =
- dml_min(
- (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
- 2.0
- * mode_lib->vba.WritebackInterfaceChromaBufferSize)
- / (mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]))
- - mode_lib->vba.WritebackDRAMClockChangeWatermark;
- }
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
- WritebackDRAMClockChangeLatencyMargin);
- }
- }
-
- mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
- < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
- mode_lib->vba.MinActiveDRAMClockChangeMargin =
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
- }
- }
-
- mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
- mode_lib->vba.MinActiveDRAMClockChangeMargin
- + mode_lib->vba.DRAMClockChangeLatency;
-
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
- mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else {
- if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
- mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vblank;
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
- mode_lib->vba.DRAMClockChangeSupport =
- dm_dram_clock_change_unsupported;
- }
- }
- } else {
- mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
- }
- }
-
- //XFC Parameters:
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.XFCEnabled[k] == true) {
- double TWait;
-
- mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
- mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
- mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
- TWait = CalculateTWait(
- mode_lib->vba.PrefetchMode,
- mode_lib->vba.DRAMClockChangeLatency,
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.SREnterPlusExitTime);
- mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
- mode_lib,
- mode_lib->vba.VRatio[k],
- mode_lib->vba.SwathWidthY[k],
- dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
- mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
- mode_lib->vba.XFCTSlvVupdateOffset,
- mode_lib->vba.XFCTSlvVupdateWidth,
- mode_lib->vba.XFCTSlvVreadyOffset,
- mode_lib->vba.XFCXBUFLatencyTolerance,
- mode_lib->vba.XFCFillBWOverhead,
- mode_lib->vba.XFCSlvChunkSize,
- mode_lib->vba.XFCBusTransportTime,
- mode_lib->vba.TCalc,
- TWait,
- &mode_lib->vba.SrcActiveDrainRate,
- &mode_lib->vba.TInitXFill,
- &mode_lib->vba.TslvChk);
- mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
- dml_floor(
- mode_lib->vba.XFCRemoteSurfaceFlipDelay
- / (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]),
- 1);
- mode_lib->vba.XFCTransferDelay[k] =
- dml_ceil(
- mode_lib->vba.XFCBusTransportTime
- / (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]),
- 1);
- mode_lib->vba.XFCPrechargeDelay[k] =
- dml_ceil(
- (mode_lib->vba.XFCBusTransportTime
- + mode_lib->vba.TInitXFill
- + mode_lib->vba.TslvChk)
- / (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]),
- 1);
- mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
- * mode_lib->vba.SrcActiveDrainRate;
- mode_lib->vba.FinalFillMargin =
- (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
- + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]
- * mode_lib->vba.SrcActiveDrainRate
- + mode_lib->vba.XFCFillConstant;
- mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
- * mode_lib->vba.SrcActiveDrainRate
- + mode_lib->vba.FinalFillMargin;
- mode_lib->vba.RemainingFillLevel = dml_max(
- 0.0,
- mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
- mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
- / (mode_lib->vba.SrcActiveDrainRate
- * mode_lib->vba.XFCFillBWOverhead / 100);
- mode_lib->vba.XFCPrefetchMargin[k] =
- mode_lib->vba.XFCRemoteSurfaceFlipDelay
- + mode_lib->vba.TFinalxFill
- + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
- + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k];
- } else {
- mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
- mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
- mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
- mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
- mode_lib->vba.XFCPrechargeDelay[k] = 0;
- mode_lib->vba.XFCTransferDelay[k] = 0;
- mode_lib->vba.XFCPrefetchMargin[k] = 0;
- }
- }
-}
-
-static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
-{
- double BytePerPixDETY;
- double BytePerPixDETC;
- double Read256BytesBlockHeightY;
- double Read256BytesBlockHeightC;
- double Read256BytesBlockWidthY;
- double Read256BytesBlockWidthC;
- double MaximumSwathHeightY;
- double MaximumSwathHeightC;
- double MinimumSwathHeightY;
- double MinimumSwathHeightC;
- double SwathWidth;
- double SwathWidthGranularityY;
- double SwathWidthGranularityC;
- double RoundedUpMaxSwathSizeBytesY;
- double RoundedUpMaxSwathSizeBytesC;
- unsigned int j, k;
-
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- bool MainPlaneDoesODMCombine = false;
-
- if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
- BytePerPixDETY = 8;
- BytePerPixDETC = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
- BytePerPixDETY = 4;
- BytePerPixDETC = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
- BytePerPixDETY = 2;
- BytePerPixDETC = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
- BytePerPixDETY = 1;
- BytePerPixDETC = 0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
- BytePerPixDETY = 1;
- BytePerPixDETC = 2;
- } else {
- BytePerPixDETY = 4.0 / 3.0;
- BytePerPixDETC = 8.0 / 3.0;
- }
-
- if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
- Read256BytesBlockHeightY = 1;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
- Read256BytesBlockHeightY = 4;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
- Read256BytesBlockHeightY = 8;
- } else {
- Read256BytesBlockHeightY = 16;
- }
- Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
- / Read256BytesBlockHeightY;
- Read256BytesBlockHeightC = 0;
- Read256BytesBlockWidthC = 0;
- } else {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
- Read256BytesBlockHeightY = 1;
- Read256BytesBlockHeightC = 1;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
- Read256BytesBlockHeightY = 16;
- Read256BytesBlockHeightC = 8;
- } else {
- Read256BytesBlockHeightY = 8;
- Read256BytesBlockHeightC = 8;
- }
- Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
- / Read256BytesBlockHeightY;
- Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
- / Read256BytesBlockHeightC;
- }
-
- if (mode_lib->vba.SourceScan[k] == dm_horz) {
- MaximumSwathHeightY = Read256BytesBlockHeightY;
- MaximumSwathHeightC = Read256BytesBlockHeightC;
- } else {
- MaximumSwathHeightY = Read256BytesBlockWidthY;
- MaximumSwathHeightC = Read256BytesBlockWidthC;
- }
-
- if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
- || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- && (mode_lib->vba.SurfaceTiling[k]
- == dm_sw_4kb_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_4kb_s_x
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s_t
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s_x
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_var_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_var_s_x)
- && mode_lib->vba.SourceScan[k] == dm_horz)) {
- MinimumSwathHeightY = MaximumSwathHeightY;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
- && mode_lib->vba.SourceScan[k] != dm_horz) {
- MinimumSwathHeightY = MaximumSwathHeightY;
- } else {
- MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
- }
- MinimumSwathHeightC = MaximumSwathHeightC;
- } else {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
- MinimumSwathHeightY = MaximumSwathHeightY;
- MinimumSwathHeightC = MaximumSwathHeightC;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
- && mode_lib->vba.SourceScan[k] == dm_horz) {
- MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
- MinimumSwathHeightC = MaximumSwathHeightC;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
- && mode_lib->vba.SourceScan[k] == dm_horz) {
- MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
- MinimumSwathHeightY = MaximumSwathHeightY;
- } else {
- MinimumSwathHeightY = MaximumSwathHeightY;
- MinimumSwathHeightC = MaximumSwathHeightC;
- }
- }
-
- if (mode_lib->vba.SourceScan[k] == dm_horz) {
- SwathWidth = mode_lib->vba.ViewportWidth[k];
- } else {
- SwathWidth = mode_lib->vba.ViewportHeight[k];
- }
-
- if (mode_lib->vba.ODMCombineEnabled[k] == true) {
- MainPlaneDoesODMCombine = true;
- }
- for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
- if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.ODMCombineEnabled[j] == true) {
- MainPlaneDoesODMCombine = true;
- }
- }
-
- if (MainPlaneDoesODMCombine == true) {
- SwathWidth = dml_min(
- SwathWidth,
- mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
- } else {
- SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
- }
-
- SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
- RoundedUpMaxSwathSizeBytesY = (dml_ceil(
- (double) (SwathWidth - 1),
- SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
- * MaximumSwathHeightY;
- if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
- RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
- + 256;
- }
- if (MaximumSwathHeightC > 0) {
- SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
- / MaximumSwathHeightC;
- RoundedUpMaxSwathSizeBytesC = (dml_ceil(
- (double) (SwathWidth / 2.0 - 1),
- SwathWidthGranularityC) + SwathWidthGranularityC)
- * BytePerPixDETC * MaximumSwathHeightC;
- if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
- RoundedUpMaxSwathSizeBytesC = dml_ceil(
- RoundedUpMaxSwathSizeBytesC,
- 256) + 256;
- }
- } else
- RoundedUpMaxSwathSizeBytesC = 0.0;
-
- if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
- <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
- mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
- mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
- } else {
- mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
- mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
- }
-
- if (mode_lib->vba.SwathHeightC[k] == 0) {
- mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
- mode_lib->vba.DETBufferSizeC[k] = 0;
- } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
- mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 2;
- mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 2;
- } else {
- mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 * 2 / 3;
- mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 3;
- }
- }
-}
-
-bool Calculate256BBlockSizes(
- enum source_format_class SourcePixelFormat,
- enum dm_swizzle_mode SurfaceTiling,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC)
-{
- if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
- || SourcePixelFormat == dm_444_16
- || SourcePixelFormat == dm_444_8)) {
- if (SurfaceTiling == dm_sw_linear) {
- *BlockHeight256BytesY = 1;
- } else if (SourcePixelFormat == dm_444_64) {
- *BlockHeight256BytesY = 4;
- } else if (SourcePixelFormat == dm_444_8) {
- *BlockHeight256BytesY = 16;
- } else {
- *BlockHeight256BytesY = 8;
- }
- *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
- *BlockHeight256BytesC = 0;
- *BlockWidth256BytesC = 0;
- } else {
- if (SurfaceTiling == dm_sw_linear) {
- *BlockHeight256BytesY = 1;
- *BlockHeight256BytesC = 1;
- } else if (SourcePixelFormat == dm_420_8) {
- *BlockHeight256BytesY = 16;
- *BlockHeight256BytesC = 8;
- } else {
- *BlockHeight256BytesY = 8;
- *BlockHeight256BytesC = 8;
- }
- *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
- *BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
- }
- return true;
-}
-
-static double CalculateTWait(
- unsigned int PrefetchMode,
- double DRAMClockChangeLatency,
- double UrgentLatency,
- double SREnterPlusExitTime)
-{
- if (PrefetchMode == 0) {
- return dml_max(
- DRAMClockChangeLatency + UrgentLatency,
- dml_max(SREnterPlusExitTime, UrgentLatency));
- } else if (PrefetchMode == 1) {
- return dml_max(SREnterPlusExitTime, UrgentLatency);
- } else {
- return UrgentLatency;
- }
-}
-
-static double CalculateRemoteSurfaceFlipDelay(
- struct display_mode_lib *mode_lib,
- double VRatio,
- double SwathWidth,
- double Bpp,
- double LineTime,
- double XFCTSlvVupdateOffset,
- double XFCTSlvVupdateWidth,
- double XFCTSlvVreadyOffset,
- double XFCXBUFLatencyTolerance,
- double XFCFillBWOverhead,
- double XFCSlvChunkSize,
- double XFCBusTransportTime,
- double TCalc,
- double TWait,
- double *SrcActiveDrainRate,
- double *TInitXFill,
- double *TslvChk)
-{
- double TSlvSetup, AvgfillRate, result;
-
- *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
- TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
- *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
- AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
- *TslvChk = XFCSlvChunkSize / AvgfillRate;
- dml_print(
- "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
- *SrcActiveDrainRate);
- dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
- dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
- dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
- dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
- result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
- dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
- return result;
-}
-
-static double CalculateWriteBackDISPCLK(
- enum source_format_class WritebackPixelFormat,
- double PixelClock,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackLumaHTaps,
- unsigned int WritebackLumaVTaps,
- unsigned int WritebackChromaHTaps,
- unsigned int WritebackChromaVTaps,
- double WritebackDestinationWidth,
- unsigned int HTotal,
- unsigned int WritebackChromaLineBufferWidth)
-{
- double CalculateWriteBackDISPCLK =
- 1.01 * PixelClock
- * dml_max(
- dml_ceil(WritebackLumaHTaps / 4.0, 1)
- / WritebackHRatio,
- dml_max(
- (WritebackLumaVTaps
- * dml_ceil(
- 1.0
- / WritebackVRatio,
- 1)
- * dml_ceil(
- WritebackDestinationWidth
- / 4.0,
- 1)
- + dml_ceil(
- WritebackDestinationWidth
- / 4.0,
- 1))
- / (double) HTotal
- + dml_ceil(
- 1.0
- / WritebackVRatio,
- 1)
- * (dml_ceil(
- WritebackLumaVTaps
- / 4.0,
- 1)
- + 4.0)
- / (double) HTotal,
- dml_ceil(
- 1.0
- / WritebackVRatio,
- 1)
- * WritebackDestinationWidth
- / (double) HTotal));
- if (WritebackPixelFormat != dm_444_32) {
- CalculateWriteBackDISPCLK =
- dml_max(
- CalculateWriteBackDISPCLK,
- 1.01 * PixelClock
- * dml_max(
- dml_ceil(
- WritebackChromaHTaps
- / 2.0,
- 1)
- / (2
- * WritebackHRatio),
- dml_max(
- (WritebackChromaVTaps
- * dml_ceil(
- 1
- / (2
- * WritebackVRatio),
- 1)
- * dml_ceil(
- WritebackDestinationWidth
- / 2.0
- / 2.0,
- 1)
- + dml_ceil(
- WritebackDestinationWidth
- / 2.0
- / WritebackChromaLineBufferWidth,
- 1))
- / HTotal
- + dml_ceil(
- 1
- / (2
- * WritebackVRatio),
- 1)
- * (dml_ceil(
- WritebackChromaVTaps
- / 4.0,
- 1)
- + 4)
- / HTotal,
- dml_ceil(
- 1.0
- / (2
- * WritebackVRatio),
- 1)
- * WritebackDestinationWidth
- / 2.0
- / HTotal)));
- }
- return CalculateWriteBackDISPCLK;
-}
-
-static double CalculateWriteBackDelay(
- enum source_format_class WritebackPixelFormat,
- double WritebackHRatio,
- double WritebackVRatio,
- unsigned int WritebackLumaHTaps,
- unsigned int WritebackLumaVTaps,
- unsigned int WritebackChromaHTaps,
- unsigned int WritebackChromaVTaps,
- unsigned int WritebackDestinationWidth)
-{
- double CalculateWriteBackDelay =
- dml_max(
- dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
- WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
- * dml_ceil(
- WritebackDestinationWidth
- / 4.0,
- 1)
- + dml_ceil(1.0 / WritebackVRatio, 1)
- * (dml_ceil(
- WritebackLumaVTaps
- / 4.0,
- 1) + 4));
-
- if (WritebackPixelFormat != dm_444_32) {
- CalculateWriteBackDelay =
- dml_max(
- CalculateWriteBackDelay,
- dml_max(
- dml_ceil(
- WritebackChromaHTaps
- / 2.0,
- 1)
- / (2
- * WritebackHRatio),
- WritebackChromaVTaps
- * dml_ceil(
- 1
- / (2
- * WritebackVRatio),
- 1)
- * dml_ceil(
- WritebackDestinationWidth
- / 2.0
- / 2.0,
- 1)
- + dml_ceil(
- 1
- / (2
- * WritebackVRatio),
- 1)
- * (dml_ceil(
- WritebackChromaVTaps
- / 4.0,
- 1)
- + 4)));
- }
- return CalculateWriteBackDelay;
-}
-
-static void CalculateActiveRowBandwidth(
- bool VirtualMemoryEnable,
- enum source_format_class SourcePixelFormat,
- double VRatio,
- bool DCCEnable,
- double LineTime,
- unsigned int MetaRowByteLuma,
- unsigned int MetaRowByteChroma,
- unsigned int meta_row_height_luma,
- unsigned int meta_row_height_chroma,
- unsigned int PixelPTEBytesPerRowLuma,
- unsigned int PixelPTEBytesPerRowChroma,
- unsigned int dpte_row_height_luma,
- unsigned int dpte_row_height_chroma,
- double *meta_row_bw,
- double *dpte_row_bw,
- double *qual_row_bw)
-{
- if (DCCEnable != true) {
- *meta_row_bw = 0;
- } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
- *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
- + VRatio / 2 * MetaRowByteChroma
- / (meta_row_height_chroma * LineTime);
- } else {
- *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
- }
-
- if (VirtualMemoryEnable != true) {
- *dpte_row_bw = 0;
- } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
- + VRatio / 2 * PixelPTEBytesPerRowChroma
- / (dpte_row_height_chroma * LineTime);
- } else {
- *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
- }
-
- if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
- *qual_row_bw = *meta_row_bw + *dpte_row_bw;
- } else {
- *qual_row_bw = 0;
- }
-}
-
-static void CalculateFlipSchedule(
- struct display_mode_lib *mode_lib,
- double UrgentExtraLatency,
- double UrgentLatency,
- unsigned int MaxPageTableLevels,
- bool VirtualMemoryEnable,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- unsigned int ImmediateFlipBytes,
- double LineTime,
- double Tno_bw,
- double VRatio,
- double PDEAndMetaPTEBytesFrame,
- unsigned int MetaRowByte,
- unsigned int PixelPTEBytesPerRow,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- double qual_row_bw,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
-{
- double min_row_time = 0.0;
-
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
- *DestinationLinesToRequestVMInImmediateFlip = 0.0;
- *DestinationLinesToRequestRowInImmediateFlip = 0.0;
- *final_flip_bw = qual_row_bw;
- *ImmediateFlipSupportedForPipe = true;
- } else {
- double TimeForFetchingMetaPTEImmediateFlip;
- double TimeForFetchingRowInVBlankImmediateFlip;
-
- if (VirtualMemoryEnable == true) {
- mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
- * ImmediateFlipBytes / TotImmediateFlipBytes;
- TimeForFetchingMetaPTEImmediateFlip =
- dml_max(
- Tno_bw
- + PDEAndMetaPTEBytesFrame
- / mode_lib->vba.ImmediateFlipBW,
- dml_max(
- UrgentExtraLatency
- + UrgentLatency
- * (MaxPageTableLevels
- - 1),
- LineTime / 4.0));
- } else {
- TimeForFetchingMetaPTEImmediateFlip = 0;
- }
-
- *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
- 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
- 1) / 4.0;
-
- if ((VirtualMemoryEnable == true || DCCEnable == true)) {
- mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
- * ImmediateFlipBytes / TotImmediateFlipBytes;
- TimeForFetchingRowInVBlankImmediateFlip = dml_max(
- (MetaRowByte + PixelPTEBytesPerRow)
- / mode_lib->vba.ImmediateFlipBW,
- dml_max(UrgentLatency, LineTime / 4.0));
- } else {
- TimeForFetchingRowInVBlankImmediateFlip = 0;
- }
-
- *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
- 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
- 1) / 4.0;
-
- if (VirtualMemoryEnable == true) {
- *final_flip_bw =
- dml_max(
- PDEAndMetaPTEBytesFrame
- / (*DestinationLinesToRequestVMInImmediateFlip
- * LineTime),
- (MetaRowByte + PixelPTEBytesPerRow)
- / (TimeForFetchingRowInVBlankImmediateFlip
- * LineTime));
- } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
- *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
- / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
- } else {
- *final_flip_bw = 0;
- }
-
- if (VirtualMemoryEnable && !DCCEnable)
- min_row_time = dpte_row_height * LineTime / VRatio;
- else if (!VirtualMemoryEnable && DCCEnable)
- min_row_time = meta_row_height * LineTime / VRatio;
- else
- min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
- / VRatio;
-
- if (*DestinationLinesToRequestVMInImmediateFlip >= 8
- || *DestinationLinesToRequestRowInImmediateFlip >= 16
- || TimeForFetchingMetaPTEImmediateFlip
- + 2 * TimeForFetchingRowInVBlankImmediateFlip
- > min_row_time)
- *ImmediateFlipSupportedForPipe = false;
- else
- *ImmediateFlipSupportedForPipe = true;
- }
-}
-
-static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
-{
- unsigned int k;
-
- //Progressive To dml_ml->vba.Interlace Unit Effect
- for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
- if (mode_lib->vba.Interlace[k] == 1
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
- mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
- }
- }
-}
-
-static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
-{
- switch (ebpp) {
- case dm_cur_2bit:
- return 2;
- case dm_cur_32bit:
- return 32;
- case dm_cur_64bit:
- return 64;
- default:
- return 0;
- }
-}
-
-static unsigned int TruncToValidBPP(
- double DecimalBPP,
- bool DSCEnabled,
- enum output_encoder_class Output,
- enum output_format_class Format,
- unsigned int DSCInputBitPerComponent)
-{
- if (Output == dm_hdmi) {
- if (Format == dm_420) {
- if (DecimalBPP >= 18)
- return 18;
- else if (DecimalBPP >= 15)
- return 15;
- else if (DecimalBPP >= 12)
- return 12;
- else
- return 0;
- } else if (Format == dm_444) {
- if (DecimalBPP >= 36)
- return 36;
- else if (DecimalBPP >= 30)
- return 30;
- else if (DecimalBPP >= 24)
- return 24;
- else
- return 0;
- } else {
- if (DecimalBPP / 1.5 >= 24)
- return 24;
- else if (DecimalBPP / 1.5 >= 20)
- return 20;
- else if (DecimalBPP / 1.5 >= 16)
- return 16;
- else
- return 0;
- }
- } else {
- if (DSCEnabled) {
- if (Format == dm_420) {
- if (DecimalBPP < 6)
- return 0;
- else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
- return 1.5 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
- } else if (Format == dm_n422) {
- if (DecimalBPP < 7)
- return 0;
- else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
- return 2 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
- } else {
- if (DecimalBPP < 8)
- return 0;
- else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
- return 3 * DSCInputBitPerComponent - 1 / 16;
- else
- return dml_floor(16 * DecimalBPP, 1) / 16;
- }
- } else if (Format == dm_420) {
- if (DecimalBPP >= 18)
- return 18;
- else if (DecimalBPP >= 15)
- return 15;
- else if (DecimalBPP >= 12)
- return 12;
- else
- return 0;
- } else if (Format == dm_s422 || Format == dm_n422) {
- if (DecimalBPP >= 24)
- return 24;
- else if (DecimalBPP >= 20)
- return 20;
- else if (DecimalBPP >= 16)
- return 16;
- else
- return 0;
- } else {
- if (DecimalBPP >= 36)
- return 36;
- else if (DecimalBPP >= 30)
- return 30;
- else if (DecimalBPP >= 24)
- return 24;
- else
- return 0;
- }
- }
-}
-
-static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
-{
- int i;
- unsigned int j, k;
- /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
-
- /*Scale Ratio, taps Support Check*/
-
- mode_lib->vba.ScaleRatioAndTapsSupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.ScalerEnabled[k] == false
- && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
- || mode_lib->vba.HRatio[k] != 1.0
- || mode_lib->vba.htaps[k] != 1.0
- || mode_lib->vba.VRatio[k] != 1.0
- || mode_lib->vba.vtaps[k] != 1.0)) {
- mode_lib->vba.ScaleRatioAndTapsSupport = false;
- } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
- || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
- || (mode_lib->vba.htaps[k] > 1.0
- && (mode_lib->vba.htaps[k] % 2) == 1)
- || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
- || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
- || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
- || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
- || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
- && (mode_lib->vba.HRatio[k] / 2.0
- > mode_lib->vba.HTAPsChroma[k]
- || mode_lib->vba.VRatio[k] / 2.0
- > mode_lib->vba.VTAPsChroma[k]))) {
- mode_lib->vba.ScaleRatioAndTapsSupport = false;
- }
- }
- /*Source Format, Pixel Format and Scan Support Check*/
-
- mode_lib->vba.SourceFormatPixelAndScanSupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
- && mode_lib->vba.SourceScan[k] != dm_horz)
- || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
- || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
- && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
- || mode_lib->vba.SourcePixelFormat[k]
- == dm_420_8
- || mode_lib->vba.SourcePixelFormat[k]
- == dm_420_10))
- || (((mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_gl
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_gfx7_2d_thin_lvp)
- && !((mode_lib->vba.SourcePixelFormat[k]
- == dm_444_64
- || mode_lib->vba.SourcePixelFormat[k]
- == dm_444_32)
- && mode_lib->vba.SourceScan[k]
- == dm_horz
- && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
- == true
- && mode_lib->vba.DCCEnable[k]
- == false))
- || (mode_lib->vba.DCCEnable[k] == true
- && (mode_lib->vba.SurfaceTiling[k]
- == dm_sw_linear
- || mode_lib->vba.SourcePixelFormat[k]
- == dm_420_8
- || mode_lib->vba.SourcePixelFormat[k]
- == dm_420_10)))) {
- mode_lib->vba.SourceFormatPixelAndScanSupport = false;
- }
- }
- /*Bandwidth Support Check*/
-
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.SourceScan[k] == dm_horz) {
- mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
- } else {
- mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
- }
- if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
- mode_lib->vba.BytePerPixelInDETY[k] = 8.0;
- mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
- mode_lib->vba.BytePerPixelInDETY[k] = 4.0;
- mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
- || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
- mode_lib->vba.BytePerPixelInDETY[k] = 2.0;
- mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
- mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
- mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
- mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
- mode_lib->vba.BytePerPixelInDETC[k] = 2.0;
- } else {
- mode_lib->vba.BytePerPixelInDETY[k] = 4.0 / 3;
- mode_lib->vba.BytePerPixelInDETC[k] = 8.0 / 3;
- }
- }
- mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.SwathWidthYSingleDPP[k]
- * (dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
- * mode_lib->vba.VRatio[k]
- + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
- / 2.0 * mode_lib->vba.VRatio[k] / 2)
- / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
- if (mode_lib->vba.DCCEnable[k] == true) {
- mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
- * (1 + 1 / 256);
- }
- if (mode_lib->vba.VirtualMemoryEnable == true
- && mode_lib->vba.SourceScan[k] != dm_horz
- && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x)) {
- mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
- * (1 + 1 / 64);
- } else if (mode_lib->vba.VirtualMemoryEnable == true
- && mode_lib->vba.SourceScan[k] == dm_horz
- && (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_32)
- && (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
- || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x)) {
- mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
- * (1 + 1 / 256);
- } else if (mode_lib->vba.VirtualMemoryEnable == true) {
- mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
- * (1 + 1 / 512);
- }
- mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond =
- mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
- + mode_lib->vba.ReadBandwidth[k] / 1000.0;
- }
- mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true
- && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
- mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]) * 4.0;
- } else if (mode_lib->vba.WritebackEnable[k] == true
- && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
- mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]) * 3.0;
- } else if (mode_lib->vba.WritebackEnable[k] == true) {
- mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
- * mode_lib->vba.WritebackDestinationHeight[k]
- / (mode_lib->vba.WritebackSourceHeight[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]) * 1.5;
- } else {
- mode_lib->vba.WriteBandwidth[k] = 0.0;
- }
- mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond =
- mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond
- + mode_lib->vba.WriteBandwidth[k] / 1000.0;
- }
- mode_lib->vba.TotalBandwidthConsumedGBytePerSecond =
- mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
- + mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond;
- mode_lib->vba.DCCEnabledInAnyPlane = false;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.DCCEnable[k] == true) {
- mode_lib->vba.DCCEnabledInAnyPlane = true;
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.FabricAndDRAMBandwidthPerState[i] = dml_min(
- mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
- * mode_lib->vba.DRAMChannelWidth,
- mode_lib->vba.FabricClockPerState[i]
- * mode_lib->vba.FabricDatapathToDCNDataReturn)
- / 1000;
- mode_lib->vba.ReturnBWToDCNPerState = dml_min(
- mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
- mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0)
- * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
- / 100;
- mode_lib->vba.ReturnBWPerState[i] = mode_lib->vba.ReturnBWToDCNPerState;
- if (mode_lib->vba.DCCEnabledInAnyPlane == true
- && mode_lib->vba.ReturnBWToDCNPerState
- > mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.ReturnBusWidth
- / 4.0) {
- mode_lib->vba.ReturnBWPerState[i] =
- dml_min(
- mode_lib->vba.ReturnBWPerState[i],
- mode_lib->vba.ReturnBWToDCNPerState * 4.0
- * (1.0
- - mode_lib->vba.UrgentLatency
- / ((mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0
- / (mode_lib->vba.ReturnBWToDCNPerState
- - mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.ReturnBusWidth
- / 4.0)
- + mode_lib->vba.UrgentLatency)));
- }
- mode_lib->vba.CriticalPoint =
- 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.UrgentLatency
- / (mode_lib->vba.ReturnBWToDCNPerState
- * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0);
- if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
- && mode_lib->vba.CriticalPoint < 4.0) {
- mode_lib->vba.ReturnBWPerState[i] =
- dml_min(
- mode_lib->vba.ReturnBWPerState[i],
- dml_pow(
- 4.0
- * mode_lib->vba.ReturnBWToDCNPerState
- * (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0
- * mode_lib->vba.ReturnBusWidth
- * mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.UrgentLatency
- / (mode_lib->vba.ReturnBWToDCNPerState
- * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0),
- 2));
- }
- mode_lib->vba.ReturnBWToDCNPerState = dml_min(
- mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
- mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0);
- if (mode_lib->vba.DCCEnabledInAnyPlane == true
- && mode_lib->vba.ReturnBWToDCNPerState
- > mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.ReturnBusWidth
- / 4.0) {
- mode_lib->vba.ReturnBWPerState[i] =
- dml_min(
- mode_lib->vba.ReturnBWPerState[i],
- mode_lib->vba.ReturnBWToDCNPerState * 4.0
- * (1.0
- - mode_lib->vba.UrgentLatency
- / ((mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0
- / (mode_lib->vba.ReturnBWToDCNPerState
- - mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.ReturnBusWidth
- / 4.0)
- + mode_lib->vba.UrgentLatency)));
- }
- mode_lib->vba.CriticalPoint =
- 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.UrgentLatency
- / (mode_lib->vba.ReturnBWToDCNPerState
- * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0);
- if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
- && mode_lib->vba.CriticalPoint < 4.0) {
- mode_lib->vba.ReturnBWPerState[i] =
- dml_min(
- mode_lib->vba.ReturnBWPerState[i],
- dml_pow(
- 4.0
- * mode_lib->vba.ReturnBWToDCNPerState
- * (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0
- * mode_lib->vba.ReturnBusWidth
- * mode_lib->vba.DCFCLKPerState[i]
- * mode_lib->vba.UrgentLatency
- / (mode_lib->vba.ReturnBWToDCNPerState
- * mode_lib->vba.UrgentLatency
- + (mode_lib->vba.ROBBufferSizeInKByte
- - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0),
- 2));
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- if ((mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond * 1000.0
- <= mode_lib->vba.ReturnBWPerState[i])
- && (mode_lib->vba.TotalBandwidthConsumedGBytePerSecond * 1000.0
- <= mode_lib->vba.FabricAndDRAMBandwidthPerState[i]
- * 1000.0
- * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
- / 100.0)) {
- mode_lib->vba.BandwidthSupport[i] = true;
- } else {
- mode_lib->vba.BandwidthSupport[i] = false;
- }
- }
- /*Writeback Latency support check*/
-
- mode_lib->vba.WritebackLatencySupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
- if (mode_lib->vba.WriteBandwidth[k]
- > (mode_lib->vba.WritebackInterfaceLumaBufferSize
- + mode_lib->vba.WritebackInterfaceChromaBufferSize)
- / mode_lib->vba.WritebackLatency) {
- mode_lib->vba.WritebackLatencySupport = false;
- }
- } else {
- if (mode_lib->vba.WriteBandwidth[k]
- > 1.5
- * dml_min(
- mode_lib->vba.WritebackInterfaceLumaBufferSize,
- 2.0
- * mode_lib->vba.WritebackInterfaceChromaBufferSize)
- / mode_lib->vba.WritebackLatency) {
- mode_lib->vba.WritebackLatencySupport = false;
- }
- }
- }
- }
- /*Re-ordering Buffer Support Check*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
- (mode_lib->vba.RoundTripPingLatencyCycles + 32.0)
- / mode_lib->vba.DCFCLKPerState[i]
- + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
- * mode_lib->vba.NumberOfChannels
- / mode_lib->vba.ReturnBWPerState[i];
- if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte)
- * 1024.0 / mode_lib->vba.ReturnBWPerState[i]
- > mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
- mode_lib->vba.ROBSupport[i] = true;
- } else {
- mode_lib->vba.ROBSupport[i] = false;
- }
- }
- /*Writeback Mode Support Check*/
-
- mode_lib->vba.TotalNumberOfActiveWriteback = 0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- mode_lib->vba.TotalNumberOfActiveWriteback =
- mode_lib->vba.TotalNumberOfActiveWriteback + 1;
- }
- }
- mode_lib->vba.WritebackModeSupport = true;
- if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
- mode_lib->vba.WritebackModeSupport = false;
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true
- && mode_lib->vba.Writeback10bpc420Supported != true
- && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
- mode_lib->vba.WritebackModeSupport = false;
- }
- }
- /*Writeback Scale Ratio and Taps Support Check*/
-
- mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
- && (mode_lib->vba.WritebackHRatio[k] != 1.0
- || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
- mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
- }
- if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
- || mode_lib->vba.WritebackVRatio[k]
- > mode_lib->vba.WritebackMaxVSCLRatio
- || mode_lib->vba.WritebackHRatio[k]
- < mode_lib->vba.WritebackMinHSCLRatio
- || mode_lib->vba.WritebackVRatio[k]
- < mode_lib->vba.WritebackMinVSCLRatio
- || mode_lib->vba.WritebackLumaHTaps[k]
- > mode_lib->vba.WritebackMaxHSCLTaps
- || mode_lib->vba.WritebackLumaVTaps[k]
- > mode_lib->vba.WritebackMaxVSCLTaps
- || mode_lib->vba.WritebackHRatio[k]
- > mode_lib->vba.WritebackLumaHTaps[k]
- || mode_lib->vba.WritebackVRatio[k]
- > mode_lib->vba.WritebackLumaVTaps[k]
- || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
- && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
- == 1))
- || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
- && (mode_lib->vba.WritebackChromaHTaps[k]
- > mode_lib->vba.WritebackMaxHSCLTaps
- || mode_lib->vba.WritebackChromaVTaps[k]
- > mode_lib->vba.WritebackMaxVSCLTaps
- || 2.0
- * mode_lib->vba.WritebackHRatio[k]
- > mode_lib->vba.WritebackChromaHTaps[k]
- || 2.0
- * mode_lib->vba.WritebackVRatio[k]
- > mode_lib->vba.WritebackChromaVTaps[k]
- || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
- && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
- mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
- }
- if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
- mode_lib->vba.WritebackLumaVExtra =
- dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
- } else {
- mode_lib->vba.WritebackLumaVExtra = -1;
- }
- if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
- && mode_lib->vba.WritebackLumaVTaps[k]
- > (mode_lib->vba.WritebackLineBufferLumaBufferSize
- + mode_lib->vba.WritebackLineBufferChromaBufferSize)
- / 3.0
- / mode_lib->vba.WritebackDestinationWidth[k]
- - mode_lib->vba.WritebackLumaVExtra)
- || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
- && mode_lib->vba.WritebackLumaVTaps[k]
- > mode_lib->vba.WritebackLineBufferLumaBufferSize
- / mode_lib->vba.WritebackDestinationWidth[k]
- - mode_lib->vba.WritebackLumaVExtra)
- || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
- && mode_lib->vba.WritebackLumaVTaps[k]
- > mode_lib->vba.WritebackLineBufferLumaBufferSize
- * 8.0 / 10.0
- / mode_lib->vba.WritebackDestinationWidth[k]
- - mode_lib->vba.WritebackLumaVExtra)) {
- mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
- }
- if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
- mode_lib->vba.WritebackChromaVExtra = 0.0;
- } else {
- mode_lib->vba.WritebackChromaVExtra = -1;
- }
- if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
- && mode_lib->vba.WritebackChromaVTaps[k]
- > mode_lib->vba.WritebackLineBufferChromaBufferSize
- / mode_lib->vba.WritebackDestinationWidth[k]
- - mode_lib->vba.WritebackChromaVExtra)
- || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
- && mode_lib->vba.WritebackChromaVTaps[k]
- > mode_lib->vba.WritebackLineBufferChromaBufferSize
- * 8.0 / 10.0
- / mode_lib->vba.WritebackDestinationWidth[k]
- - mode_lib->vba.WritebackChromaVExtra)) {
- mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
- }
- }
- }
- /*Maximum DISPCLK/DPPCLK Support check*/
-
- mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- mode_lib->vba.WritebackRequiredDISPCLK =
- dml_max(
- mode_lib->vba.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(
- mode_lib->vba.WritebackPixelFormat[k],
- mode_lib->vba.PixelClock[k],
- mode_lib->vba.WritebackHRatio[k],
- mode_lib->vba.WritebackVRatio[k],
- mode_lib->vba.WritebackLumaHTaps[k],
- mode_lib->vba.WritebackLumaVTaps[k],
- mode_lib->vba.WritebackChromaHTaps[k],
- mode_lib->vba.WritebackChromaVTaps[k],
- mode_lib->vba.WritebackDestinationWidth[k],
- mode_lib->vba.HTotal[k],
- mode_lib->vba.WritebackChromaLineBufferWidth));
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.HRatio[k] > 1.0) {
- mode_lib->vba.PSCL_FACTOR[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput
- * mode_lib->vba.HRatio[k]
- / dml_ceil(
- mode_lib->vba.htaps[k]
- / 6.0,
- 1.0));
- } else {
- mode_lib->vba.PSCL_FACTOR[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput);
- }
- if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
- mode_lib->vba.PSCL_FACTOR_CHROMA[k] = 0.0;
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
- mode_lib->vba.PixelClock[k]
- * dml_max3(
- mode_lib->vba.vtaps[k] / 6.0
- * dml_min(
- 1.0,
- mode_lib->vba.HRatio[k]),
- mode_lib->vba.HRatio[k]
- * mode_lib->vba.VRatio[k]
- / mode_lib->vba.PSCL_FACTOR[k],
- 1.0);
- if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
- && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- < 2.0 * mode_lib->vba.PixelClock[k]) {
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
- * mode_lib->vba.PixelClock[k];
- }
- } else {
- if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
- mode_lib->vba.PSCL_FACTOR_CHROMA[k] =
- dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput
- * mode_lib->vba.HRatio[k]
- / 2.0
- / dml_ceil(
- mode_lib->vba.HTAPsChroma[k]
- / 6.0,
- 1.0));
- } else {
- mode_lib->vba.PSCL_FACTOR_CHROMA[k] = dml_min(
- mode_lib->vba.MaxDCHUBToPSCLThroughput,
- mode_lib->vba.MaxPSCLToLBThroughput);
- }
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
- mode_lib->vba.PixelClock[k]
- * dml_max5(
- mode_lib->vba.vtaps[k] / 6.0
- * dml_min(
- 1.0,
- mode_lib->vba.HRatio[k]),
- mode_lib->vba.HRatio[k]
- * mode_lib->vba.VRatio[k]
- / mode_lib->vba.PSCL_FACTOR[k],
- mode_lib->vba.VTAPsChroma[k]
- / 6.0
- * dml_min(
- 1.0,
- mode_lib->vba.HRatio[k]
- / 2.0),
- mode_lib->vba.HRatio[k]
- * mode_lib->vba.VRatio[k]
- / 4.0
- / mode_lib->vba.PSCL_FACTOR_CHROMA[k],
- 1.0);
- if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
- || mode_lib->vba.HTAPsChroma[k] > 6.0
- || mode_lib->vba.VTAPsChroma[k] > 6.0)
- && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- < 2.0 * mode_lib->vba.PixelClock[k]) {
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
- * mode_lib->vba.PixelClock[k];
- }
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- Calculate256BBlockSizes(
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
- dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
- &mode_lib->vba.Read256BlockHeightY[k],
- &mode_lib->vba.Read256BlockHeightC[k],
- &mode_lib->vba.Read256BlockWidthY[k],
- &mode_lib->vba.Read256BlockWidthC[k]);
- if (mode_lib->vba.SourceScan[k] == dm_horz) {
- mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockHeightY[k];
- mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockHeightC[k];
- } else {
- mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockWidthY[k];
- mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockWidthC[k];
- }
- if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
- || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
- || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
- || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
- || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
- && (mode_lib->vba.SurfaceTiling[k]
- == dm_sw_4kb_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_4kb_s_x
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s_t
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_64kb_s_x
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_var_s
- || mode_lib->vba.SurfaceTiling[k]
- == dm_sw_var_s_x)
- && mode_lib->vba.SourceScan[k] == dm_horz)) {
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
- } else {
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
- / 2.0;
- }
- mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
- } else {
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
- mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
- && mode_lib->vba.SourceScan[k] == dm_horz) {
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
- / 2.0;
- mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
- } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
- && mode_lib->vba.SourceScan[k] == dm_horz) {
- mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k]
- / 2.0;
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
- } else {
- mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
- mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
- }
- }
- if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
- mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
- } else {
- mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
- }
- mode_lib->vba.MaximumSwathWidthInDETBuffer =
- dml_min(
- mode_lib->vba.MaximumSwathWidthSupport,
- mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
- / (mode_lib->vba.BytePerPixelInDETY[k]
- * mode_lib->vba.MinSwathHeightY[k]
- + mode_lib->vba.BytePerPixelInDETC[k]
- / 2.0
- * mode_lib->vba.MinSwathHeightC[k]));
- if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
- mode_lib->vba.MaximumSwathWidthInLineBuffer =
- mode_lib->vba.LineBufferSize
- * dml_max(mode_lib->vba.HRatio[k], 1.0)
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.vtaps[k]
- + dml_max(
- dml_ceil(
- mode_lib->vba.VRatio[k],
- 1.0)
- - 2,
- 0.0));
- } else {
- mode_lib->vba.MaximumSwathWidthInLineBuffer =
- dml_min(
- mode_lib->vba.LineBufferSize
- * dml_max(
- mode_lib->vba.HRatio[k],
- 1.0)
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.vtaps[k]
- + dml_max(
- dml_ceil(
- mode_lib->vba.VRatio[k],
- 1.0)
- - 2,
- 0.0)),
- 2.0 * mode_lib->vba.LineBufferSize
- * dml_max(
- mode_lib->vba.HRatio[k]
- / 2.0,
- 1.0)
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.VTAPsChroma[k]
- + dml_max(
- dml_ceil(
- mode_lib->vba.VRatio[k]
- / 2.0,
- 1.0)
- - 2,
- 0.0)));
- }
- mode_lib->vba.MaximumSwathWidth[k] = dml_min(
- mode_lib->vba.MaximumSwathWidthInDETBuffer,
- mode_lib->vba.MaximumSwathWidthInLineBuffer);
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
- mode_lib->vba.MaxDispclk[i],
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
- mode_lib->vba.MaxDppclk[i],
- mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
- mode_lib->vba.RequiredDISPCLK[i] = 0.0;
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
- mode_lib->vba.PixelClock[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- * (1.0
- + mode_lib->vba.DISPCLKRampingMargin
- / 100.0);
- if (mode_lib->vba.ODMCapability == true
- && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
- > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
- / 2.0;
- } else {
- mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- }
- if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
- && mode_lib->vba.SwathWidthYSingleDPP[k]
- <= mode_lib->vba.MaximumSwathWidth[k]
- && mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
- mode_lib->vba.NoOfDPP[i][k] = 1;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0);
- } else {
- mode_lib->vba.NoOfDPP[i][k] = 2;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- / 2.0;
- }
- mode_lib->vba.RequiredDISPCLK[i] = dml_max(
- mode_lib->vba.RequiredDISPCLK[i],
- mode_lib->vba.PlaneRequiredDISPCLK);
- if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k] / mode_lib->vba.NoOfDPP[i][k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
- || (mode_lib->vba.PlaneRequiredDISPCLK
- > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
- }
- }
- mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.TotalNumberOfActiveDPP[i] =
- mode_lib->vba.TotalNumberOfActiveDPP[i]
- + mode_lib->vba.NoOfDPP[i][k];
- }
- if ((mode_lib->vba.MaxDispclk[i] == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
- && mode_lib->vba.MaxDppclk[i]
- == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])
- && (mode_lib->vba.TotalNumberOfActiveDPP[i]
- > mode_lib->vba.MaxNumDPP
- || mode_lib->vba.DISPCLK_DPPCLK_Support[i] == false)) {
- mode_lib->vba.RequiredDISPCLK[i] = 0.0;
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
- mode_lib->vba.PixelClock[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0);
- if (mode_lib->vba.ODMCapability == true
- && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
- > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
- / 2.0;
- } else {
- mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
- }
- if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
- && mode_lib->vba.SwathWidthYSingleDPP[k]
- <= mode_lib->vba.MaximumSwathWidth[k]
- && mode_lib->vba.ODMCombineEnablePerState[i][k]
- == false) {
- mode_lib->vba.NoOfDPP[i][k] = 1;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0);
- } else {
- mode_lib->vba.NoOfDPP[i][k] = 2;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- / 2.0;
- }
- mode_lib->vba.RequiredDISPCLK[i] = dml_max(
- mode_lib->vba.RequiredDISPCLK[i],
- mode_lib->vba.PlaneRequiredDISPCLK);
- if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- / mode_lib->vba.NoOfDPP[i][k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
- || (mode_lib->vba.PlaneRequiredDISPCLK
- > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
- }
- }
- mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.TotalNumberOfActiveDPP[i] =
- mode_lib->vba.TotalNumberOfActiveDPP[i]
- + mode_lib->vba.NoOfDPP[i][k];
- }
- }
- if (mode_lib->vba.TotalNumberOfActiveDPP[i] > mode_lib->vba.MaxNumDPP) {
- mode_lib->vba.RequiredDISPCLK[i] = 0.0;
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
- if (mode_lib->vba.SwathWidthYSingleDPP[k]
- <= mode_lib->vba.MaximumSwathWidth[k]) {
- mode_lib->vba.NoOfDPP[i][k] = 1;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0);
- } else {
- mode_lib->vba.NoOfDPP[i][k] = 2;
- mode_lib->vba.RequiredDPPCLK[i][k] =
- mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- / 2.0;
- }
- if (!(mode_lib->vba.MaxDispclk[i]
- == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
- && mode_lib->vba.MaxDppclk[i]
- == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])) {
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PixelClock[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- * (1.0
- + mode_lib->vba.DISPCLKRampingMargin
- / 100.0);
- } else {
- mode_lib->vba.PlaneRequiredDISPCLK =
- mode_lib->vba.PixelClock[k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0);
- }
- mode_lib->vba.RequiredDISPCLK[i] = dml_max(
- mode_lib->vba.RequiredDISPCLK[i],
- mode_lib->vba.PlaneRequiredDISPCLK);
- if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
- / mode_lib->vba.NoOfDPP[i][k]
- * (1.0
- + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
- || (mode_lib->vba.PlaneRequiredDISPCLK
- > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
- }
- }
- mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.TotalNumberOfActiveDPP[i] =
- mode_lib->vba.TotalNumberOfActiveDPP[i]
- + mode_lib->vba.NoOfDPP[i][k];
- }
- }
- mode_lib->vba.RequiredDISPCLK[i] = dml_max(
- mode_lib->vba.RequiredDISPCLK[i],
- mode_lib->vba.WritebackRequiredDISPCLK);
- if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
- < mode_lib->vba.WritebackRequiredDISPCLK) {
- mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
- }
- }
- /*Viewport Size Check*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.ViewportSizeSupport[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
- if (dml_min(
- mode_lib->vba.SwathWidthYSingleDPP[k],
- dml_round(
- mode_lib->vba.HActive[k] / 2.0
- * mode_lib->vba.HRatio[k]))
- > mode_lib->vba.MaximumSwathWidth[k]) {
- mode_lib->vba.ViewportSizeSupport[i] = false;
- }
- } else {
- if (mode_lib->vba.SwathWidthYSingleDPP[k] / 2.0
- > mode_lib->vba.MaximumSwathWidth[k]) {
- mode_lib->vba.ViewportSizeSupport[i] = false;
- }
- }
- }
- }
- /*Total Available Pipes Support Check*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- if (mode_lib->vba.TotalNumberOfActiveDPP[i] <= mode_lib->vba.MaxNumDPP) {
- mode_lib->vba.TotalAvailablePipesSupport[i] = true;
- } else {
- mode_lib->vba.TotalAvailablePipesSupport[i] = false;
- }
- }
- /*Total Available OTG Support Check*/
-
- mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
- + 1.0;
- }
- }
- if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
- mode_lib->vba.NumberOfOTGSupport = true;
- } else {
- mode_lib->vba.NumberOfOTGSupport = false;
- }
- /*Display IO and DSC Support Check*/
-
- mode_lib->vba.NonsupportedDSCInputBPC = false;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
- mode_lib->vba.NonsupportedDSCInputBPC = true;
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.RequiresDSC[i][k] = 0;
- mode_lib->vba.RequiresFEC[i][k] = 0;
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- if (mode_lib->vba.Output[k] == dm_hdmi) {
- mode_lib->vba.RequiresDSC[i][k] = 0;
- mode_lib->vba.RequiresFEC[i][k] = 0;
- mode_lib->vba.OutputBppPerState[i][k] =
- TruncToValidBPP(
- dml_min(
- 600.0,
- mode_lib->vba.PHYCLKPerState[i])
- / mode_lib->vba.PixelClockBackEnd[k]
- * 24,
- false,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- } else if (mode_lib->vba.Output[k] == dm_dp
- || mode_lib->vba.Output[k] == dm_edp) {
- if (mode_lib->vba.Output[k] == dm_edp) {
- mode_lib->vba.EffectiveFECOverhead = 0.0;
- } else {
- mode_lib->vba.EffectiveFECOverhead =
- mode_lib->vba.FECOverhead;
- }
- if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
- mode_lib->vba.Outbpp =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * 270.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- false,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- mode_lib->vba.OutbppDSC =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * (1.0
- - mode_lib->vba.EffectiveFECOverhead
- / 100.0)
- * 270.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- true,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- if (mode_lib->vba.DSCEnabled[k] == true) {
- mode_lib->vba.RequiresDSC[i][k] = true;
- if (mode_lib->vba.Output[k] == dm_dp) {
- mode_lib->vba.RequiresFEC[i][k] =
- true;
- } else {
- mode_lib->vba.RequiresFEC[i][k] =
- false;
- }
- mode_lib->vba.Outbpp =
- mode_lib->vba.OutbppDSC;
- } else {
- mode_lib->vba.RequiresDSC[i][k] = false;
- mode_lib->vba.RequiresFEC[i][k] = false;
- }
- mode_lib->vba.OutputBppPerState[i][k] =
- mode_lib->vba.Outbpp;
- }
- if (mode_lib->vba.Outbpp == 0) {
- mode_lib->vba.Outbpp =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * 540.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- false,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- mode_lib->vba.OutbppDSC =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * (1.0
- - mode_lib->vba.EffectiveFECOverhead
- / 100.0)
- * 540.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- true,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- if (mode_lib->vba.DSCEnabled[k] == true) {
- mode_lib->vba.RequiresDSC[i][k] = true;
- if (mode_lib->vba.Output[k] == dm_dp) {
- mode_lib->vba.RequiresFEC[i][k] =
- true;
- } else {
- mode_lib->vba.RequiresFEC[i][k] =
- false;
- }
- mode_lib->vba.Outbpp =
- mode_lib->vba.OutbppDSC;
- } else {
- mode_lib->vba.RequiresDSC[i][k] = false;
- mode_lib->vba.RequiresFEC[i][k] = false;
- }
- mode_lib->vba.OutputBppPerState[i][k] =
- mode_lib->vba.Outbpp;
- }
- if (mode_lib->vba.Outbpp == 0
- && mode_lib->vba.PHYCLKPerState[i]
- >= 810.0) {
- mode_lib->vba.Outbpp =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * 810.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- false,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- mode_lib->vba.OutbppDSC =
- TruncToValidBPP(
- (1.0
- - mode_lib->vba.Downspreading
- / 100.0)
- * (1.0
- - mode_lib->vba.EffectiveFECOverhead
- / 100.0)
- * 810.0
- * mode_lib->vba.OutputLinkDPLanes[k]
- / mode_lib->vba.PixelClockBackEnd[k]
- * 8.0,
- true,
- mode_lib->vba.Output[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.DSCInputBitPerComponent[k]);
- if (mode_lib->vba.DSCEnabled[k] == true
- || mode_lib->vba.Outbpp == 0) {
- mode_lib->vba.RequiresDSC[i][k] = true;
- if (mode_lib->vba.Output[k] == dm_dp) {
- mode_lib->vba.RequiresFEC[i][k] =
- true;
- } else {
- mode_lib->vba.RequiresFEC[i][k] =
- false;
- }
- mode_lib->vba.Outbpp =
- mode_lib->vba.OutbppDSC;
- } else {
- mode_lib->vba.RequiresDSC[i][k] = false;
- mode_lib->vba.RequiresFEC[i][k] = false;
- }
- mode_lib->vba.OutputBppPerState[i][k] =
- mode_lib->vba.Outbpp;
- }
- }
- } else {
- mode_lib->vba.OutputBppPerState[i][k] = 0;
- }
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.DIOSupport[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.OutputBppPerState[i][k] == 0
- || (mode_lib->vba.OutputFormat[k] == dm_420
- && mode_lib->vba.ProgressiveToInterlaceUnitInOPP
- == true)) {
- mode_lib->vba.DIOSupport[i] = false;
- }
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- if ((mode_lib->vba.Output[k] == dm_dp
- || mode_lib->vba.Output[k] == dm_edp)) {
- if (mode_lib->vba.OutputFormat[k] == dm_420
- || mode_lib->vba.OutputFormat[k]
- == dm_n422) {
- mode_lib->vba.DSCFormatFactor = 2;
- } else {
- mode_lib->vba.DSCFormatFactor = 1;
- }
- if (mode_lib->vba.RequiresDSC[i][k] == true) {
- if (mode_lib->vba.ODMCombineEnablePerState[i][k]
- == true) {
- if (mode_lib->vba.PixelClockBackEnd[k] / 6.0
- / mode_lib->vba.DSCFormatFactor
- > (1.0
- - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- * mode_lib->vba.MaxDSCCLK[i]) {
- mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
- true;
- }
- } else {
- if (mode_lib->vba.PixelClockBackEnd[k] / 3.0
- / mode_lib->vba.DSCFormatFactor
- > (1.0
- - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
- / 100.0)
- * mode_lib->vba.MaxDSCCLK[i]) {
- mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
- true;
- }
- }
- }
- }
- }
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.NotEnoughDSCUnits[i] = false;
- mode_lib->vba.TotalDSCUnitsRequired = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.RequiresDSC[i][k] == true) {
- if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
- mode_lib->vba.TotalDSCUnitsRequired =
- mode_lib->vba.TotalDSCUnitsRequired + 2.0;
- } else {
- mode_lib->vba.TotalDSCUnitsRequired =
- mode_lib->vba.TotalDSCUnitsRequired + 1.0;
- }
- }
- }
- if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
- mode_lib->vba.NotEnoughDSCUnits[i] = true;
- }
- }
- /*DSC Delay per state*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.BlendingAndTiming[k] != k) {
- mode_lib->vba.slices = 0;
- } else if (mode_lib->vba.RequiresDSC[i][k] == 0
- || mode_lib->vba.RequiresDSC[i][k] == false) {
- mode_lib->vba.slices = 0;
- } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
- mode_lib->vba.slices = dml_ceil(
- mode_lib->vba.PixelClockBackEnd[k] / 400.0,
- 4.0);
- } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
- mode_lib->vba.slices = 8.0;
- } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
- mode_lib->vba.slices = 4.0;
- } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
- mode_lib->vba.slices = 2.0;
- } else {
- mode_lib->vba.slices = 1.0;
- }
- if (mode_lib->vba.OutputBppPerState[i][k] == 0
- || mode_lib->vba.OutputBppPerState[i][k] == 0) {
- mode_lib->vba.bpp = 0.0;
- } else {
- mode_lib->vba.bpp = mode_lib->vba.OutputBppPerState[i][k];
- }
- if (mode_lib->vba.RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
- if (mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
- mode_lib->vba.DSCDelayPerState[i][k] =
- dscceComputeDelay(
- mode_lib->vba.DSCInputBitPerComponent[k],
- mode_lib->vba.bpp,
- dml_ceil(
- mode_lib->vba.HActive[k]
- / mode_lib->vba.slices,
- 1.0),
- mode_lib->vba.slices,
- mode_lib->vba.OutputFormat[k])
- + dscComputeDelay(
- mode_lib->vba.OutputFormat[k]);
- } else {
- mode_lib->vba.DSCDelayPerState[i][k] =
- 2.0
- * (dscceComputeDelay(
- mode_lib->vba.DSCInputBitPerComponent[k],
- mode_lib->vba.bpp,
- dml_ceil(
- mode_lib->vba.HActive[k]
- / mode_lib->vba.slices,
- 1.0),
- mode_lib->vba.slices
- / 2,
- mode_lib->vba.OutputFormat[k])
- + dscComputeDelay(
- mode_lib->vba.OutputFormat[k]));
- }
- mode_lib->vba.DSCDelayPerState[i][k] =
- mode_lib->vba.DSCDelayPerState[i][k]
- * mode_lib->vba.PixelClock[k]
- / mode_lib->vba.PixelClockBackEnd[k];
- } else {
- mode_lib->vba.DSCDelayPerState[i][k] = 0.0;
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
- if (mode_lib->vba.BlendingAndTiming[k] == j
- && mode_lib->vba.RequiresDSC[i][j] == true) {
- mode_lib->vba.DSCDelayPerState[i][k] =
- mode_lib->vba.DSCDelayPerState[i][j];
- }
- }
- }
- }
- /*Urgent Latency Support Check*/
-
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
- mode_lib->vba.SwathWidthYPerState[i][k] =
- dml_min(
- mode_lib->vba.SwathWidthYSingleDPP[k],
- dml_round(
- mode_lib->vba.HActive[k]
- / 2.0
- * mode_lib->vba.HRatio[k]));
- } else {
- mode_lib->vba.SwathWidthYPerState[i][k] =
- mode_lib->vba.SwathWidthYSingleDPP[k]
- / mode_lib->vba.NoOfDPP[i][k];
- }
- mode_lib->vba.SwathWidthGranularityY = 256.0
- / dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
- / mode_lib->vba.MaxSwathHeightY[k];
- mode_lib->vba.RoundedUpMaxSwathSizeBytesY = (dml_ceil(
- mode_lib->vba.SwathWidthYPerState[i][k] - 1.0,
- mode_lib->vba.SwathWidthGranularityY)
- + mode_lib->vba.SwathWidthGranularityY)
- * mode_lib->vba.BytePerPixelInDETY[k]
- * mode_lib->vba.MaxSwathHeightY[k];
- if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
- mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
- mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
- 256.0) + 256;
- }
- if (mode_lib->vba.MaxSwathHeightC[k] > 0.0) {
- mode_lib->vba.SwathWidthGranularityC = 256.0
- / dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
- / mode_lib->vba.MaxSwathHeightC[k];
- mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(
- mode_lib->vba.SwathWidthYPerState[i][k] / 2.0 - 1.0,
- mode_lib->vba.SwathWidthGranularityC)
- + mode_lib->vba.SwathWidthGranularityC)
- * mode_lib->vba.BytePerPixelInDETC[k]
- * mode_lib->vba.MaxSwathHeightC[k];
- if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
- mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(
- mode_lib->vba.RoundedUpMaxSwathSizeBytesC,
- 256.0) + 256;
- }
- } else {
- mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
- }
- if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY
- + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
- <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
- mode_lib->vba.SwathHeightYPerState[i][k] =
- mode_lib->vba.MaxSwathHeightY[k];
- mode_lib->vba.SwathHeightCPerState[i][k] =
- mode_lib->vba.MaxSwathHeightC[k];
- } else {
- mode_lib->vba.SwathHeightYPerState[i][k] =
- mode_lib->vba.MinSwathHeightY[k];
- mode_lib->vba.SwathHeightCPerState[i][k] =
- mode_lib->vba.MinSwathHeightC[k];
- }
- if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
- mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / mode_lib->vba.BytePerPixelInDETY[k]
- / mode_lib->vba.SwathWidthYPerState[i][k];
- mode_lib->vba.LinesInDETChroma = 0.0;
- } else if (mode_lib->vba.SwathHeightYPerState[i][k]
- <= mode_lib->vba.SwathHeightCPerState[i][k]) {
- mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETY[k]
- / mode_lib->vba.SwathWidthYPerState[i][k];
- mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETC[k]
- / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
- } else {
- mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 * 2.0 / 3.0
- / mode_lib->vba.BytePerPixelInDETY[k]
- / mode_lib->vba.SwathWidthYPerState[i][k];
- mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
- * 1024.0 / 3.0 / mode_lib->vba.BytePerPixelInDETY[k]
- / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
- }
- mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma =
- dml_min(
- mode_lib->vba.MaxLineBufferLines,
- dml_floor(
- mode_lib->vba.LineBufferSize
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.SwathWidthYPerState[i][k]
- / dml_max(
- mode_lib->vba.HRatio[k],
- 1.0)),
- 1.0))
- - (mode_lib->vba.vtaps[k] - 1.0);
- mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma =
- dml_min(
- mode_lib->vba.MaxLineBufferLines,
- dml_floor(
- mode_lib->vba.LineBufferSize
- / mode_lib->vba.LBBitPerPixel[k]
- / (mode_lib->vba.SwathWidthYPerState[i][k]
- / 2.0
- / dml_max(
- mode_lib->vba.HRatio[k]
- / 2.0,
- 1.0)),
- 1.0))
- - (mode_lib->vba.VTAPsChroma[k] - 1.0);
- mode_lib->vba.EffectiveDETLBLinesLuma =
- dml_floor(
- mode_lib->vba.LinesInDETLuma
- + dml_min(
- mode_lib->vba.LinesInDETLuma
- * mode_lib->vba.RequiredDISPCLK[i]
- * mode_lib->vba.BytePerPixelInDETY[k]
- * mode_lib->vba.PSCL_FACTOR[k]
- / mode_lib->vba.ReturnBWPerState[i],
- mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
- mode_lib->vba.SwathHeightYPerState[i][k]);
- mode_lib->vba.EffectiveDETLBLinesChroma =
- dml_floor(
- mode_lib->vba.LinesInDETChroma
- + dml_min(
- mode_lib->vba.LinesInDETChroma
- * mode_lib->vba.RequiredDISPCLK[i]
- * mode_lib->vba.BytePerPixelInDETC[k]
- * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
- / mode_lib->vba.ReturnBWPerState[i],
- mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
- mode_lib->vba.SwathHeightCPerState[i][k]);
- if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
- mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
- mode_lib->vba.EffectiveDETLBLinesLuma
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / mode_lib->vba.VRatio[k]
- - mode_lib->vba.EffectiveDETLBLinesLuma
- * mode_lib->vba.SwathWidthYPerState[i][k]
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / (mode_lib->vba.ReturnBWPerState[i]
- / mode_lib->vba.NoOfDPP[i][k]);
- } else {
- mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
- dml_min(
- mode_lib->vba.EffectiveDETLBLinesLuma
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / mode_lib->vba.VRatio[k]
- - mode_lib->vba.EffectiveDETLBLinesLuma
- * mode_lib->vba.SwathWidthYPerState[i][k]
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / (mode_lib->vba.ReturnBWPerState[i]
- / mode_lib->vba.NoOfDPP[i][k]),
- mode_lib->vba.EffectiveDETLBLinesChroma
- * (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k])
- / (mode_lib->vba.VRatio[k]
- / 2.0)
- - mode_lib->vba.EffectiveDETLBLinesChroma
- * mode_lib->vba.SwathWidthYPerState[i][k]
- / 2.0
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETC[k],
- 2.0)
- / (mode_lib->vba.ReturnBWPerState[i]
- / mode_lib->vba.NoOfDPP[i][k]));
- }
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.UrgentLatencySupport[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.UrgentLatencySupportUsPerState[i][k]
- < mode_lib->vba.UrgentLatency / 1.0) {
- mode_lib->vba.UrgentLatencySupport[i] = false;
- }
- }
- }
- /*Prefetch Check*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.TotalNumberOfDCCActiveDPP[i] = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.DCCEnable[k] == true) {
- mode_lib->vba.TotalNumberOfDCCActiveDPP[i] =
- mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
- + mode_lib->vba.NoOfDPP[i][k];
- }
- }
- }
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep = 8.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- mode_lib->vba.PixelClock[k] / 16.0);
- if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
- if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / 64.0
- * mode_lib->vba.HRatio[k]
- * mode_lib->vba.PixelClock[k]
- / mode_lib->vba.NoOfDPP[i][k]);
- } else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / 64.0
- * mode_lib->vba.PSCL_FACTOR[k]
- * mode_lib->vba.RequiredDPPCLK[i][k]);
- }
- } else {
- if (mode_lib->vba.VRatio[k] <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / 32.0
- * mode_lib->vba.HRatio[k]
- * mode_lib->vba.PixelClock[k]
- / mode_lib->vba.NoOfDPP[i][k]);
- } else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0)
- / 32.0
- * mode_lib->vba.PSCL_FACTOR[k]
- * mode_lib->vba.RequiredDPPCLK[i][k]);
- }
- if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETC[k],
- 2.0)
- / 32.0
- * mode_lib->vba.HRatio[k]
- / 2.0
- * mode_lib->vba.PixelClock[k]
- / mode_lib->vba.NoOfDPP[i][k]);
- } else {
- mode_lib->vba.ProjectedDCFCLKDeepSleep =
- dml_max(
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- 1.1
- * dml_ceil(
- mode_lib->vba.BytePerPixelInDETC[k],
- 2.0)
- / 32.0
- * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
- * mode_lib->vba.RequiredDPPCLK[i][k]);
- }
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
- mode_lib,
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.Read256BlockHeightY[k],
- mode_lib->vba.Read256BlockWidthY[k],
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
- mode_lib->vba.SourceScan[k],
- mode_lib->vba.ViewportWidth[k],
- mode_lib->vba.ViewportHeight[k],
- mode_lib->vba.SwathWidthYPerState[i][k],
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.VMMPageSize,
- mode_lib->vba.PTEBufferSizeInRequests,
- mode_lib->vba.PDEProcessingBufIn64KBReqs,
- mode_lib->vba.PitchY[k],
- mode_lib->vba.DCCMetaPitchY[k],
- &mode_lib->vba.MacroTileWidthY[k],
- &mode_lib->vba.MetaRowBytesY,
- &mode_lib->vba.DPTEBytesPerRowY,
- &mode_lib->vba.PTEBufferSizeNotExceededY[i][k],
- &mode_lib->vba.dpte_row_height[k],
- &mode_lib->vba.meta_row_height[k]);
- mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
- mode_lib,
- mode_lib->vba.VRatio[k],
- mode_lib->vba.vtaps[k],
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- mode_lib->vba.SwathHeightYPerState[i][k],
- mode_lib->vba.ViewportYStartY[k],
- &mode_lib->vba.PrefillY[k],
- &mode_lib->vba.MaxNumSwY[k]);
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
- mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
- mode_lib,
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.Read256BlockHeightY[k],
- mode_lib->vba.Read256BlockWidthY[k],
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.SurfaceTiling[k],
- dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
- mode_lib->vba.SourceScan[k],
- mode_lib->vba.ViewportWidth[k] / 2.0,
- mode_lib->vba.ViewportHeight[k] / 2.0,
- mode_lib->vba.SwathWidthYPerState[i][k] / 2.0,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.VMMPageSize,
- mode_lib->vba.PTEBufferSizeInRequests,
- mode_lib->vba.PDEProcessingBufIn64KBReqs,
- mode_lib->vba.PitchC[k],
- 0.0,
- &mode_lib->vba.MacroTileWidthC[k],
- &mode_lib->vba.MetaRowBytesC,
- &mode_lib->vba.DPTEBytesPerRowC,
- &mode_lib->vba.PTEBufferSizeNotExceededC[i][k],
- &mode_lib->vba.dpte_row_height_chroma[k],
- &mode_lib->vba.meta_row_height_chroma[k]);
- mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
- mode_lib,
- mode_lib->vba.VRatio[k] / 2.0,
- mode_lib->vba.VTAPsChroma[k],
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- mode_lib->vba.SwathHeightCPerState[i][k],
- mode_lib->vba.ViewportYStartC[k],
- &mode_lib->vba.PrefillC[k],
- &mode_lib->vba.MaxNumSwC[k]);
- } else {
- mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
- mode_lib->vba.MetaRowBytesC = 0.0;
- mode_lib->vba.DPTEBytesPerRowC = 0.0;
- mode_lib->vba.PrefetchLinesC[k] = 0.0;
- mode_lib->vba.PTEBufferSizeNotExceededC[i][k] = true;
- }
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrameY
- + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
- mode_lib->vba.MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY
- + mode_lib->vba.MetaRowBytesC;
- mode_lib->vba.DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY
- + mode_lib->vba.DPTEBytesPerRowC;
- }
- mode_lib->vba.ExtraLatency =
- mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
- + (mode_lib->vba.TotalNumberOfActiveDPP[i]
- * mode_lib->vba.PixelChunkSizeInKByte
- + mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
- * mode_lib->vba.MetaChunkSize)
- * 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
- if (mode_lib->vba.VirtualMemoryEnable == true) {
- mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
- + mode_lib->vba.TotalNumberOfActiveDPP[i]
- * mode_lib->vba.PTEChunkSize * 1024.0
- / mode_lib->vba.ReturnBWPerState[i];
- }
- mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- if (mode_lib->vba.WritebackEnable[k] == true) {
- mode_lib->vba.WritebackDelay[i][k] =
- mode_lib->vba.WritebackLatency
- + CalculateWriteBackDelay(
- mode_lib->vba.WritebackPixelFormat[k],
- mode_lib->vba.WritebackHRatio[k],
- mode_lib->vba.WritebackVRatio[k],
- mode_lib->vba.WritebackLumaHTaps[k],
- mode_lib->vba.WritebackLumaVTaps[k],
- mode_lib->vba.WritebackChromaHTaps[k],
- mode_lib->vba.WritebackChromaVTaps[k],
- mode_lib->vba.WritebackDestinationWidth[k])
- / mode_lib->vba.RequiredDISPCLK[i];
- } else {
- mode_lib->vba.WritebackDelay[i][k] = 0.0;
- }
- for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
- if (mode_lib->vba.BlendingAndTiming[j] == k
- && mode_lib->vba.WritebackEnable[j]
- == true) {
- mode_lib->vba.WritebackDelay[i][k] =
- dml_max(
- mode_lib->vba.WritebackDelay[i][k],
- mode_lib->vba.WritebackLatency
- + CalculateWriteBackDelay(
- mode_lib->vba.WritebackPixelFormat[j],
- mode_lib->vba.WritebackHRatio[j],
- mode_lib->vba.WritebackVRatio[j],
- mode_lib->vba.WritebackLumaHTaps[j],
- mode_lib->vba.WritebackLumaVTaps[j],
- mode_lib->vba.WritebackChromaHTaps[j],
- mode_lib->vba.WritebackChromaVTaps[j],
- mode_lib->vba.WritebackDestinationWidth[j])
- / mode_lib->vba.RequiredDISPCLK[i]);
- }
- }
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
- if (mode_lib->vba.BlendingAndTiming[k] == j) {
- mode_lib->vba.WritebackDelay[i][k] =
- mode_lib->vba.WritebackDelay[i][j];
- }
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.MaximumVStartup[k] =
- mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
- - dml_max(
- 1.0,
- dml_ceil(
- mode_lib->vba.WritebackDelay[i][k]
- / (mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]),
- 1.0));
- }
- mode_lib->vba.TWait = CalculateTWait(
- mode_lib->vba.PrefetchMode,
- mode_lib->vba.DRAMClockChangeLatency,
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.SREnterPlusExitTime);
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.XFCEnabled[k] == true) {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay =
- CalculateRemoteSurfaceFlipDelay(
- mode_lib,
- mode_lib->vba.VRatio[k],
- mode_lib->vba.SwathWidthYPerState[i][k],
- dml_ceil(
- mode_lib->vba.BytePerPixelInDETY[k],
- 1.0),
- mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k],
- mode_lib->vba.XFCTSlvVupdateOffset,
- mode_lib->vba.XFCTSlvVupdateWidth,
- mode_lib->vba.XFCTSlvVreadyOffset,
- mode_lib->vba.XFCXBUFLatencyTolerance,
- mode_lib->vba.XFCFillBWOverhead,
- mode_lib->vba.XFCSlvChunkSize,
- mode_lib->vba.XFCBusTransportTime,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.TWait,
- &mode_lib->vba.SrcActiveDrainRate,
- &mode_lib->vba.TInitXFill,
- &mode_lib->vba.TslvChk);
- } else {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
- }
- mode_lib->vba.IsErrorResult[i][k] =
- CalculatePrefetchSchedule(
- mode_lib,
- mode_lib->vba.RequiredDPPCLK[i][k],
- mode_lib->vba.RequiredDISPCLK[i],
- mode_lib->vba.PixelClock[k],
- mode_lib->vba.ProjectedDCFCLKDeepSleep,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.NoOfDPP[i][k],
- mode_lib->vba.ScalerEnabled[k],
- mode_lib->vba.NumberOfCursors[k],
- mode_lib->vba.DPPCLKDelaySubtotal,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYPerState[i][k]
- / mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.VTotal[k]
- - mode_lib->vba.VActive[k],
- mode_lib->vba.HTotal[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- mode_lib->vba.MaximumVStartup[k],
- mode_lib->vba.MaxPageTableLevels,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.PrefetchLinesY[k],
- mode_lib->vba.SwathWidthYPerState[i][k],
- mode_lib->vba.BytePerPixelInDETY[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[k],
- mode_lib->vba.BytePerPixelInDETC[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.SwathHeightYPerState[i][k],
- mode_lib->vba.SwathHeightCPerState[i][k],
- mode_lib->vba.TWait,
- mode_lib->vba.XFCEnabled[k],
- mode_lib->vba.XFCRemoteSurfaceFlipDelay,
- mode_lib->vba.Interlace[k],
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- mode_lib->vba.DSTXAfterScaler,
- mode_lib->vba.DSTYAfterScaler,
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][k],
- &mode_lib->vba.VRatioPreC[i][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBW[i][k],
- &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.VUpdateOffsetPix[k],
- &mode_lib->vba.VUpdateWidthPix[k],
- &mode_lib->vba.VReadyOffsetPix[k]);
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k]
- * mode_lib->vba.CursorWidth[k][0]
- * mode_lib->vba.CursorBPP[k][0] / 8.0
- / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- * mode_lib->vba.VRatio[k];
- }
- mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
- mode_lib->vba.prefetch_vm_bw_valid = true;
- mode_lib->vba.prefetch_row_bw_valid = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] == 0.0) {
- mode_lib->vba.prefetch_vm_bw[k] = 0.0;
- } else if (mode_lib->vba.LinesForMetaPTE[k] > 0.0) {
- mode_lib->vba.prefetch_vm_bw[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- / (mode_lib->vba.LinesForMetaPTE[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- } else {
- mode_lib->vba.prefetch_vm_bw[k] = 0.0;
- mode_lib->vba.prefetch_vm_bw_valid = false;
- }
- if (mode_lib->vba.MetaRowBytes[k] + mode_lib->vba.DPTEBytesPerRow[k]
- == 0.0) {
- mode_lib->vba.prefetch_row_bw[k] = 0.0;
- } else if (mode_lib->vba.LinesForMetaAndDPTERow[k] > 0.0) {
- mode_lib->vba.prefetch_row_bw[k] = (mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k])
- / (mode_lib->vba.LinesForMetaAndDPTERow[k]
- * mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k]);
- } else {
- mode_lib->vba.prefetch_row_bw[k] = 0.0;
- mode_lib->vba.prefetch_row_bw_valid = false;
- }
- mode_lib->vba.MaximumReadBandwidthWithPrefetch =
- mode_lib->vba.MaximumReadBandwidthWithPrefetch
- + mode_lib->vba.cursor_bw[k]
- + dml_max4(
- mode_lib->vba.prefetch_vm_bw[k],
- mode_lib->vba.prefetch_row_bw[k],
- mode_lib->vba.ReadBandwidth[k],
- mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]);
- }
- mode_lib->vba.PrefetchSupported[i] = true;
- if (mode_lib->vba.MaximumReadBandwidthWithPrefetch
- > mode_lib->vba.ReturnBWPerState[i]
- || mode_lib->vba.prefetch_vm_bw_valid == false
- || mode_lib->vba.prefetch_row_bw_valid == false) {
- mode_lib->vba.PrefetchSupported[i] = false;
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.LineTimesForPrefetch[k] < 2.0
- || mode_lib->vba.LinesForMetaPTE[k] >= 8.0
- || mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
- || mode_lib->vba.IsErrorResult[i][k] == true) {
- mode_lib->vba.PrefetchSupported[i] = false;
- }
- }
- mode_lib->vba.VRatioInPrefetchSupported[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.VRatioPreY[i][k] > 4.0
- || mode_lib->vba.VRatioPreC[i][k] > 4.0
- || mode_lib->vba.IsErrorResult[i][k] == true) {
- mode_lib->vba.VRatioInPrefetchSupported[i] = false;
- }
- }
- if (mode_lib->vba.PrefetchSupported[i] == true
- && mode_lib->vba.VRatioInPrefetchSupported[i] == true) {
- mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.ReturnBWPerState[i];
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.BandwidthAvailableForImmediateFlip =
- mode_lib->vba.BandwidthAvailableForImmediateFlip
- - mode_lib->vba.cursor_bw[k]
- - dml_max(
- mode_lib->vba.ReadBandwidth[k],
- mode_lib->vba.PrefetchBW[k]);
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
- && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
- mode_lib->vba.ImmediateFlipBytes[k] =
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
- + mode_lib->vba.MetaRowBytes[k]
- + mode_lib->vba.DPTEBytesPerRow[k];
- }
- }
- mode_lib->vba.TotImmediateFlipBytes = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
- && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
- mode_lib->vba.TotImmediateFlipBytes =
- mode_lib->vba.TotImmediateFlipBytes
- + mode_lib->vba.ImmediateFlipBytes[k];
- }
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- CalculateFlipSchedule(
- mode_lib,
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.MaxPageTableLevels,
- mode_lib->vba.VirtualMemoryEnable,
- mode_lib->vba.BandwidthAvailableForImmediateFlip,
- mode_lib->vba.TotImmediateFlipBytes,
- mode_lib->vba.SourcePixelFormat[k],
- mode_lib->vba.ImmediateFlipBytes[k],
- mode_lib->vba.HTotal[k]
- / mode_lib->vba.PixelClock[k],
- mode_lib->vba.VRatio[k],
- mode_lib->vba.Tno_bw[k],
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
- mode_lib->vba.MetaRowBytes[k],
- mode_lib->vba.DPTEBytesPerRow[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.dpte_row_height[k],
- mode_lib->vba.meta_row_height[k],
- mode_lib->vba.qual_row_bw[k],
- &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
- &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
- &mode_lib->vba.final_flip_bw[k],
- &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
- }
- mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.total_dcn_read_bw_with_flip =
- mode_lib->vba.total_dcn_read_bw_with_flip
- + mode_lib->vba.cursor_bw[k]
- + dml_max3(
- mode_lib->vba.prefetch_vm_bw[k],
- mode_lib->vba.prefetch_row_bw[k],
- mode_lib->vba.final_flip_bw[k]
- + dml_max(
- mode_lib->vba.ReadBandwidth[k],
- mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]));
- }
- mode_lib->vba.ImmediateFlipSupportedForState[i] = true;
- if (mode_lib->vba.total_dcn_read_bw_with_flip
- > mode_lib->vba.ReturnBWPerState[i]) {
- mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
- mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
- }
- }
- } else {
- mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
- }
- }
- /*PTE Buffer Size Check*/
-
- for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
- mode_lib->vba.PTEBufferSizeNotExceeded[i] = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.PTEBufferSizeNotExceededY[i][k] == false
- || mode_lib->vba.PTEBufferSizeNotExceededC[i][k] == false) {
- mode_lib->vba.PTEBufferSizeNotExceeded[i] = false;
- }
- }
- }
- /*Cursor Support Check*/
-
- mode_lib->vba.CursorSupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
- if (dml_floor(
- dml_floor(
- mode_lib->vba.CursorBufferSize
- - mode_lib->vba.CursorChunkSize,
- mode_lib->vba.CursorChunkSize) * 1024.0
- / (mode_lib->vba.CursorWidth[k][0]
- * mode_lib->vba.CursorBPP[k][0]
- / 8.0),
- 1.0)
- * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
- / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatency
- || (mode_lib->vba.CursorBPP[k][0] == 64.0
- && mode_lib->vba.Cursor64BppSupport == false)) {
- mode_lib->vba.CursorSupport = false;
- }
- }
- }
- /*Valid Pitch Check*/
-
- mode_lib->vba.PitchSupport = true;
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.AlignedYPitch[k] = dml_ceil(
- dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
- mode_lib->vba.MacroTileWidthY[k]);
- if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
- mode_lib->vba.PitchSupport = false;
- }
- if (mode_lib->vba.DCCEnable[k] == true) {
- mode_lib->vba.AlignedDCCMetaPitch[k] = dml_ceil(
- dml_max(
- mode_lib->vba.DCCMetaPitchY[k],
- mode_lib->vba.ViewportWidth[k]),
- 64.0 * mode_lib->vba.Read256BlockWidthY[k]);
- } else {
- mode_lib->vba.AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
- }
- if (mode_lib->vba.AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
- mode_lib->vba.PitchSupport = false;
- }
- if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
- mode_lib->vba.AlignedCPitch[k] = dml_ceil(
- dml_max(
- mode_lib->vba.PitchC[k],
- mode_lib->vba.ViewportWidth[k] / 2.0),
- mode_lib->vba.MacroTileWidthC[k]);
- } else {
- mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
- }
- if (mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
- mode_lib->vba.PitchSupport = false;
- }
- }
- /*Mode Support, Voltage State and SOC Configuration*/
-
- for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
- if (mode_lib->vba.ScaleRatioAndTapsSupport == true
- && mode_lib->vba.SourceFormatPixelAndScanSupport == true
- && mode_lib->vba.ViewportSizeSupport[i] == true
- && mode_lib->vba.BandwidthSupport[i] == true
- && mode_lib->vba.DIOSupport[i] == true
- && mode_lib->vba.NotEnoughDSCUnits[i] == false
- && mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
- && mode_lib->vba.UrgentLatencySupport[i] == true
- && mode_lib->vba.ROBSupport[i] == true
- && mode_lib->vba.DISPCLK_DPPCLK_Support[i] == true
- && mode_lib->vba.TotalAvailablePipesSupport[i] == true
- && mode_lib->vba.NumberOfOTGSupport == true
- && mode_lib->vba.WritebackModeSupport == true
- && mode_lib->vba.WritebackLatencySupport == true
- && mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
- && mode_lib->vba.CursorSupport == true
- && mode_lib->vba.PitchSupport == true
- && mode_lib->vba.PrefetchSupported[i] == true
- && mode_lib->vba.VRatioInPrefetchSupported[i] == true
- && mode_lib->vba.PTEBufferSizeNotExceeded[i] == true
- && mode_lib->vba.NonsupportedDSCInputBPC == false) {
- mode_lib->vba.ModeSupport[i] = true;
- } else {
- mode_lib->vba.ModeSupport[i] = false;
- }
- }
- for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
- if (i == DC__VOLTAGE_STATES || mode_lib->vba.ModeSupport[i] == true) {
- mode_lib->vba.VoltageLevel = i;
- }
- }
- mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.FabricAndDRAMBandwidth =
- mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
- mode_lib->vba.ImmediateFlipSupport =
- mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel];
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][k];
- }
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- if (mode_lib->vba.BlendingAndTiming[k] == k) {
- mode_lib->vba.ODMCombineEnabled[k] =
- mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
- } else {
- mode_lib->vba.ODMCombineEnabled[k] = 0;
- }
- mode_lib->vba.DSCEnabled[k] =
- mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
- mode_lib->vba.OutputBpp[k] =
- mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
- }
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
deleted file mode 100644
index 4112409..0000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML2_DISPLAY_MODE_VBA_H__
-#define __DML2_DISPLAY_MODE_VBA_H__
-
-#include "dml_common_defs.h"
-
-struct display_mode_lib;
-
-void set_prefetch_mode(struct display_mode_lib *mode_lib,
- bool cstate_en,
- bool pstate_en,
- bool ignore_viewport_pos,
- bool immediate_flip_support);
-
-#define dml_get_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes)
-
-dml_get_attr_decl(clk_dcf_deepsleep);
-dml_get_attr_decl(wm_urgent);
-dml_get_attr_decl(wm_memory_trip);
-dml_get_attr_decl(wm_writeback_urgent);
-dml_get_attr_decl(wm_stutter_exit);
-dml_get_attr_decl(wm_stutter_enter_exit);
-dml_get_attr_decl(wm_dram_clock_change);
-dml_get_attr_decl(wm_writeback_dram_clock_change);
-dml_get_attr_decl(wm_xfc_underflow);
-dml_get_attr_decl(stutter_efficiency_no_vblank);
-dml_get_attr_decl(stutter_efficiency);
-dml_get_attr_decl(urgent_latency);
-dml_get_attr_decl(urgent_extra_latency);
-dml_get_attr_decl(nonurgent_latency);
-dml_get_attr_decl(dram_clock_change_latency);
-dml_get_attr_decl(dispclk_calculated);
-dml_get_attr_decl(total_data_read_bw);
-dml_get_attr_decl(return_bw);
-dml_get_attr_decl(tcalc);
-
-#define dml_get_pipe_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe)
-
-dml_get_pipe_attr_decl(dsc_delay);
-dml_get_pipe_attr_decl(dppclk_calculated);
-dml_get_pipe_attr_decl(dscclk_calculated);
-dml_get_pipe_attr_decl(min_ttu_vblank);
-dml_get_pipe_attr_decl(vratio_prefetch_l);
-dml_get_pipe_attr_decl(vratio_prefetch_c);
-dml_get_pipe_attr_decl(dst_x_after_scaler);
-dml_get_pipe_attr_decl(dst_y_after_scaler);
-dml_get_pipe_attr_decl(dst_y_per_vm_vblank);
-dml_get_pipe_attr_decl(dst_y_per_row_vblank);
-dml_get_pipe_attr_decl(dst_y_prefetch);
-dml_get_pipe_attr_decl(dst_y_per_vm_flip);
-dml_get_pipe_attr_decl(dst_y_per_row_flip);
-dml_get_pipe_attr_decl(xfc_transfer_delay);
-dml_get_pipe_attr_decl(xfc_precharge_delay);
-dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
-dml_get_pipe_attr_decl(xfc_prefetch_margin);
-
-unsigned int get_vstartup_calculated(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes,
- unsigned int which_pipe);
-
-double get_total_immediate_flip_bytes(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes);
-double get_total_immediate_flip_bw(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes);
-double get_total_prefetch_bw(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes);
-
-unsigned int dml_get_voltage_level(
- struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *pipes,
- unsigned int num_pipes);
-
-bool Calculate256BBlockSizes(
- enum source_format_class SourcePixelFormat,
- enum dm_swizzle_mode SurfaceTiling,
- unsigned int BytePerPixelY,
- unsigned int BytePerPixelC,
- unsigned int *BlockHeight256BytesY,
- unsigned int *BlockHeight256BytesC,
- unsigned int *BlockWidth256BytesY,
- unsigned int *BlockWidth256BytesC);
-
-
-struct vba_vars_st {
- ip_params_st ip;
- soc_bounding_box_st soc;
-
- unsigned int MaximumMaxVStartupLines;
- double cursor_bw[DC__NUM_DPP__MAX];
- double meta_row_bw[DC__NUM_DPP__MAX];
- double dpte_row_bw[DC__NUM_DPP__MAX];
- double qual_row_bw[DC__NUM_DPP__MAX];
- double WritebackDISPCLK;
- double PSCL_THROUGHPUT_LUMA[DC__NUM_DPP__MAX];
- double PSCL_THROUGHPUT_CHROMA[DC__NUM_DPP__MAX];
- double DPPCLKUsingSingleDPPLuma;
- double DPPCLKUsingSingleDPPChroma;
- double DPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
- double DISPCLKWithRamping;
- double DISPCLKWithoutRamping;
- double GlobalDPPCLK;
- double DISPCLKWithRampingRoundedToDFSGranularity;
- double DISPCLKWithoutRampingRoundedToDFSGranularity;
- double MaxDispclkRoundedToDFSGranularity;
- bool DCCEnabledAnyPlane;
- double ReturnBandwidthToDCN;
- unsigned int SwathWidthY[DC__NUM_DPP__MAX];
- unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
- double BytePerPixelDETY[DC__NUM_DPP__MAX];
- double BytePerPixelDETC[DC__NUM_DPP__MAX];
- double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
- double ReadBandwidthPlaneChroma[DC__NUM_DPP__MAX];
- unsigned int TotalActiveDPP;
- unsigned int TotalDCCActiveDPP;
- double UrgentRoundTripAndOutOfOrderLatency;
- double DisplayPipeLineDeliveryTimeLuma[DC__NUM_DPP__MAX]; // WM
- double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
- double LinesInDETY[DC__NUM_DPP__MAX]; // WM
- double LinesInDETC[DC__NUM_DPP__MAX]; // WM
- unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
- unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
- double FullDETBufferingTimeY[DC__NUM_DPP__MAX]; // WM
- double FullDETBufferingTimeC[DC__NUM_DPP__MAX]; // WM
- double MinFullDETBufferingTime;
- double FrameTimeForMinFullDETBufferingTime;
- double AverageReadBandwidthGBytePerSecond;
- double PartOfBurstThatFitsInROB;
- double StutterBurstTime;
- //unsigned int NextPrefetchMode;
- double VBlankTime;
- double SmallestVBlank;
- double DCFCLKDeepSleepPerPlane;
- double EffectiveDETPlusLBLinesLuma;
- double EffectiveDETPlusLBLinesChroma;
- double UrgentLatencySupportUsLuma;
- double UrgentLatencySupportUsChroma;
- double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
- unsigned int DSCFormatFactor;
- unsigned int BlockHeight256BytesY[DC__NUM_DPP__MAX];
- unsigned int BlockHeight256BytesC[DC__NUM_DPP__MAX];
- unsigned int BlockWidth256BytesY[DC__NUM_DPP__MAX];
- unsigned int BlockWidth256BytesC[DC__NUM_DPP__MAX];
- double VInitPreFillY[DC__NUM_DPP__MAX];
- double VInitPreFillC[DC__NUM_DPP__MAX];
- unsigned int MaxNumSwathY[DC__NUM_DPP__MAX];
- unsigned int MaxNumSwathC[DC__NUM_DPP__MAX];
- double PrefetchSourceLinesY[DC__NUM_DPP__MAX];
- double PrefetchSourceLinesC[DC__NUM_DPP__MAX];
- double PixelPTEBytesPerRow[DC__NUM_DPP__MAX];
- double MetaRowByte[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height[DC__NUM_DPP__MAX];
- unsigned int dpte_row_height_chroma[DC__NUM_DPP__MAX];
- unsigned int meta_row_height[DC__NUM_DPP__MAX];
- unsigned int meta_row_height_chroma[DC__NUM_DPP__MAX];
-
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
- unsigned int MaxVStartupLines[DC__NUM_DPP__MAX];
- double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool PrefetchModeSupported;
- bool AllowDRAMClockChangeDuringVBlank[DC__NUM_DPP__MAX];
- bool AllowDRAMSelfRefreshDuringVBlank[DC__NUM_DPP__MAX];
- double RequiredPrefetchPixDataBW[DC__NUM_DPP__MAX];
- double XFCRemoteSurfaceFlipDelay;
- double TInitXFill;
- double TslvChk;
- double SrcActiveDrainRate;
- double Tno_bw[DC__NUM_DPP__MAX];
- bool ImmediateFlipSupported;
-
- double prefetch_vm_bw[DC__NUM_DPP__MAX];
- double prefetch_row_bw[DC__NUM_DPP__MAX];
- bool ImmediateFlipSupportedForPipe[DC__NUM_DPP__MAX];
- unsigned int VStartupLines;
- double DisplayPipeLineDeliveryTimeLumaPrefetch[DC__NUM_DPP__MAX];
- double DisplayPipeLineDeliveryTimeChromaPrefetch[DC__NUM_DPP__MAX];
- unsigned int ActiveDPPs;
- unsigned int LBLatencyHidingSourceLinesY;
- unsigned int LBLatencyHidingSourceLinesC;
- double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
- double MinActiveDRAMClockChangeMargin;
- double XFCSlaveVUpdateOffset[DC__NUM_DPP__MAX];
- double XFCSlaveVupdateWidth[DC__NUM_DPP__MAX];
- double XFCSlaveVReadyOffset[DC__NUM_DPP__MAX];
- double InitFillLevel;
- double FinalFillMargin;
- double FinalFillLevel;
- double RemainingFillLevel;
- double TFinalxFill;
-
-
- //
- // SOC Bounding Box Parameters
- //
- double SRExitTime;
- double SREnterPlusExitTime;
- double UrgentLatency;
- double WritebackLatency;
- double PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency;
- double NumberOfChannels;
- double DRAMChannelWidth;
- double FabricDatapathToDCNDataReturn;
- double ReturnBusWidth;
- double Downspreading;
- double DISPCLKDPPCLKDSCCLKDownSpreading;
- double DISPCLKDPPCLKVCOSpeed;
- double RoundTripPingLatencyCycles;
- double UrgentOutOfOrderReturnPerChannel;
- unsigned int VMMPageSize;
- double DRAMClockChangeLatency;
- double XFCBusTransportTime;
- double XFCXBUFLatencyTolerance;
-
- //
- // IP Parameters
- //
- unsigned int ROBBufferSizeInKByte;
- double DETBufferSizeInKByte;
- unsigned int DPPOutputBufferPixels;
- unsigned int OPPOutputBufferLines;
- unsigned int PixelChunkSizeInKByte;
- double ReturnBW;
- bool VirtualMemoryEnable;
- unsigned int MaxPageTableLevels;
- unsigned int OverridePageTableLevels;
- unsigned int PTEChunkSize;
- unsigned int MetaChunkSize;
- unsigned int WritebackChunkSize;
- bool ODMCapability;
- unsigned int NumberOfDSC;
- unsigned int LineBufferSize;
- unsigned int MaxLineBufferLines;
- unsigned int WritebackInterfaceLumaBufferSize;
- unsigned int WritebackInterfaceChromaBufferSize;
- unsigned int WritebackChromaLineBufferWidth;
- double MaxDCHUBToPSCLThroughput;
- double MaxPSCLToLBThroughput;
- unsigned int PTEBufferSizeInRequests;
- double DISPCLKRampingMargin;
- unsigned int MaxInterDCNTileRepeaters;
- bool XFCSupported;
- double XFCSlvChunkSize;
- double XFCFillBWOverhead;
- double XFCFillConstant;
- double XFCTSlvVupdateOffset;
- double XFCTSlvVupdateWidth;
- double XFCTSlvVreadyOffset;
- double DPPCLKDelaySubtotal;
- double DPPCLKDelaySCL;
- double DPPCLKDelaySCLLBOnly;
- double DPPCLKDelayCNVCFormater;
- double DPPCLKDelayCNVCCursor;
- double DISPCLKDelaySubtotal;
- bool ProgressiveToInterlaceUnitInOPP;
- unsigned int PDEProcessingBufIn64KBReqs;
-
- // Pipe/Plane Parameters
- int VoltageLevel;
- double FabricAndDRAMBandwidth;
- double FabricClock;
- double DRAMSpeed;
- double DISPCLK;
- double SOCCLK;
- double DCFCLK;
-
- unsigned int NumberOfActivePlanes;
- unsigned int ViewportWidth[DC__NUM_DPP__MAX];
- unsigned int ViewportHeight[DC__NUM_DPP__MAX];
- unsigned int ViewportYStartY[DC__NUM_DPP__MAX];
- unsigned int ViewportYStartC[DC__NUM_DPP__MAX];
- unsigned int PitchY[DC__NUM_DPP__MAX];
- unsigned int PitchC[DC__NUM_DPP__MAX];
- double HRatio[DC__NUM_DPP__MAX];
- double VRatio[DC__NUM_DPP__MAX];
- unsigned int htaps[DC__NUM_DPP__MAX];
- unsigned int vtaps[DC__NUM_DPP__MAX];
- unsigned int HTAPsChroma[DC__NUM_DPP__MAX];
- unsigned int VTAPsChroma[DC__NUM_DPP__MAX];
- unsigned int HTotal[DC__NUM_DPP__MAX];
- unsigned int VTotal[DC__NUM_DPP__MAX];
- unsigned int DPPPerPlane[DC__NUM_DPP__MAX];
- double PixelClock[DC__NUM_DPP__MAX];
- double PixelClockBackEnd[DC__NUM_DPP__MAX];
- double DPPCLK[DC__NUM_DPP__MAX];
- bool DCCEnable[DC__NUM_DPP__MAX];
- unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
- enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
- enum source_format_class SourcePixelFormat[DC__NUM_DPP__MAX];
- bool WritebackEnable[DC__NUM_DPP__MAX];
- double WritebackDestinationWidth[DC__NUM_DPP__MAX];
- double WritebackDestinationHeight[DC__NUM_DPP__MAX];
- double WritebackSourceHeight[DC__NUM_DPP__MAX];
- enum source_format_class WritebackPixelFormat[DC__NUM_DPP__MAX];
- unsigned int WritebackLumaHTaps[DC__NUM_DPP__MAX];
- unsigned int WritebackLumaVTaps[DC__NUM_DPP__MAX];
- unsigned int WritebackChromaHTaps[DC__NUM_DPP__MAX];
- unsigned int WritebackChromaVTaps[DC__NUM_DPP__MAX];
- double WritebackHRatio[DC__NUM_DPP__MAX];
- double WritebackVRatio[DC__NUM_DPP__MAX];
- unsigned int HActive[DC__NUM_DPP__MAX];
- unsigned int VActive[DC__NUM_DPP__MAX];
- bool Interlace[DC__NUM_DPP__MAX];
- enum dm_swizzle_mode SurfaceTiling[DC__NUM_DPP__MAX];
- unsigned int ScalerRecoutWidth[DC__NUM_DPP__MAX];
- bool DynamicMetadataEnable[DC__NUM_DPP__MAX];
- unsigned int DynamicMetadataLinesBeforeActiveRequired[DC__NUM_DPP__MAX];
- unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
- double DCCRate[DC__NUM_DPP__MAX];
- bool ODMCombineEnabled[DC__NUM_DPP__MAX];
- double OutputBpp[DC__NUM_DPP__MAX];
- unsigned int NumberOfDSCSlices[DC__NUM_DPP__MAX];
- bool DSCEnabled[DC__NUM_DPP__MAX];
- unsigned int DSCDelay[DC__NUM_DPP__MAX];
- unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
- enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
- enum output_encoder_class Output[DC__NUM_DPP__MAX];
- unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
- bool SynchronizedVBlank;
- unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
- unsigned int CursorWidth[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
- unsigned int CursorBPP[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
- bool XFCEnabled[DC__NUM_DPP__MAX];
- bool ScalerEnabled[DC__NUM_DPP__MAX];
-
- // Intermediates/Informational
- bool ImmediateFlipSupport;
- unsigned int SwathHeightY[DC__NUM_DPP__MAX];
- unsigned int SwathHeightC[DC__NUM_DPP__MAX];
- unsigned int DETBufferSizeY[DC__NUM_DPP__MAX];
- unsigned int DETBufferSizeC[DC__NUM_DPP__MAX];
- unsigned int LBBitPerPixel[DC__NUM_DPP__MAX];
- double LastPixelOfLineExtraWatermark;
- double TotalDataReadBandwidth;
- unsigned int TotalActiveWriteback;
- unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
- unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
- double BandwidthAvailableForImmediateFlip;
- unsigned int PrefetchMode;
- bool IgnoreViewportPositioning;
- double PrefetchBandwidth[DC__NUM_DPP__MAX];
- bool ErrorResult[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesFrame[DC__NUM_DPP__MAX];
-
- //
- // Calculated dml_ml->vba.Outputs
- //
- double DCFClkDeepSleep;
- double UrgentWatermark;
- double UrgentExtraLatency;
- double MemoryTripWatermark;
- double WritebackUrgentWatermark;
- double StutterExitWatermark;
- double StutterEnterPlusExitWatermark;
- double DRAMClockChangeWatermark;
- double WritebackDRAMClockChangeWatermark;
- double StutterEfficiency;
- double StutterEfficiencyNotIncludingVBlank;
- double MinUrgentLatencySupportUs;
- double NonUrgentLatencyTolerance;
- double MinActiveDRAMClockChangeLatencySupported;
- enum clock_change_support DRAMClockChangeSupport;
-
- // These are the clocks calcuated by the library but they are not actually
- // used explicitly. They are fetched by tests and then possibly used. The
- // ultimate values to use are the ones specified by the parameters to DML
- double DISPCLK_calculated;
- double DSCCLK_calculated[DC__NUM_DPP__MAX];
- double DPPCLK_calculated[DC__NUM_DPP__MAX];
-
- unsigned int VStartup[DC__NUM_DPP__MAX];
- unsigned int VUpdateOffsetPix[DC__NUM_DPP__MAX];
- unsigned int VUpdateWidthPix[DC__NUM_DPP__MAX];
- unsigned int VReadyOffsetPix[DC__NUM_DPP__MAX];
- unsigned int VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
-
- double ImmediateFlipBW;
- unsigned int TotImmediateFlipBytes;
- double TCalc;
- double MinTTUVBlank[DC__NUM_DPP__MAX];
- double VRatioPrefetchY[DC__NUM_DPP__MAX];
- double VRatioPrefetchC[DC__NUM_DPP__MAX];
- double DSTXAfterScaler[DC__NUM_DPP__MAX];
- double DSTYAfterScaler[DC__NUM_DPP__MAX];
-
- double DestinationLinesToRequestVMInVBlank[DC__NUM_DPP__MAX];
- double DestinationLinesToRequestRowInVBlank[DC__NUM_DPP__MAX];
- double DestinationLinesForPrefetch[DC__NUM_DPP__MAX];
- double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
- double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
-
- double XFCTransferDelay[DC__NUM_DPP__MAX];
- double XFCPrechargeDelay[DC__NUM_DPP__MAX];
- double XFCRemoteSurfaceFlipLatency[DC__NUM_DPP__MAX];
- double XFCPrefetchMargin[DC__NUM_DPP__MAX];
-
- display_e2e_pipe_params_st cache_pipes[DC__NUM_DPP__MAX];
- unsigned int cache_num_pipes;
- unsigned int pipe_plane[DC__NUM_DPP__MAX];
-
- /* vba mode support */
- /*inputs*/
- bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
- double MaxHSCLRatio;
- double MaxVSCLRatio;
- unsigned int MaxNumWriteback;
- bool WritebackLumaAndChromaScalingSupported;
- bool Cursor64BppSupport;
- double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
- double FabricClockPerState[DC__VOLTAGE_STATES + 1];
- double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
- double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
- double MaxDppclk[DC__VOLTAGE_STATES + 1];
- double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
- double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
- double MaxDispclk[DC__VOLTAGE_STATES + 1];
-
- /*outputs*/
- bool ScaleRatioAndTapsSupport;
- bool SourceFormatPixelAndScanSupport;
- unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
- double BytePerPixelInDETY[DC__NUM_DPP__MAX];
- double BytePerPixelInDETC[DC__NUM_DPP__MAX];
- double TotalReadBandwidthConsumedGBytePerSecond;
- double ReadBandwidth[DC__NUM_DPP__MAX];
- double TotalWriteBandwidthConsumedGBytePerSecond;
- double WriteBandwidth[DC__NUM_DPP__MAX];
- double TotalBandwidthConsumedGBytePerSecond;
- bool DCCEnabledInAnyPlane;
- bool WritebackLatencySupport;
- bool WritebackModeSupport;
- bool Writeback10bpc420Supported;
- bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
- unsigned int TotalNumberOfActiveWriteback;
- double CriticalPoint;
- double ReturnBWToDCNPerState;
- double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
- double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
- double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
- bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool PrefetchSupported[DC__VOLTAGE_STATES + 1];
- bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1];
- bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1];
- bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1];
- bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1];
- bool ModeSupport[DC__VOLTAGE_STATES + 1];
- bool DIOSupport[DC__VOLTAGE_STATES + 1];
- bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
- bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
- bool ROBSupport[DC__VOLTAGE_STATES + 1];
- bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1];
- bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool IsErrorResult[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
- bool prefetch_vm_bw_valid;
- bool prefetch_row_bw_valid;
- bool NumberOfOTGSupport;
- bool NonsupportedDSCInputBPC;
- bool WritebackScaleRatioAndTapsSupport;
- bool CursorSupport;
- bool PitchSupport;
-
- double WritebackLineBufferLumaBufferSize;
- double WritebackLineBufferChromaBufferSize;
- double WritebackMinHSCLRatio;
- double WritebackMinVSCLRatio;
- double WritebackMaxHSCLRatio;
- double WritebackMaxVSCLRatio;
- double WritebackMaxHSCLTaps;
- double WritebackMaxVSCLTaps;
- unsigned int MaxNumDPP;
- unsigned int MaxNumOTG;
- double CursorBufferSize;
- double CursorChunkSize;
- unsigned int Mode;
- unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double OutputLinkDPLanes[DC__NUM_DPP__MAX];
- double SwathWidthYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double SwathHeightYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double SwathHeightCPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double VRatioPreY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double VRatioPreC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double RequiredPrefetchPixelDataBW[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double RequiredDISPCLK[DC__VOLTAGE_STATES + 1];
- double TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1];
- double TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1];
- double PrefetchBW[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
- double MetaRowBytes[DC__NUM_DPP__MAX];
- double DPTEBytesPerRow[DC__NUM_DPP__MAX];
- double PrefetchLinesY[DC__NUM_DPP__MAX];
- double PrefetchLinesC[DC__NUM_DPP__MAX];
- unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
- unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
- double PrefillY[DC__NUM_DPP__MAX];
- double PrefillC[DC__NUM_DPP__MAX];
- double LineTimesForPrefetch[DC__NUM_DPP__MAX];
- double LinesForMetaPTE[DC__NUM_DPP__MAX];
- double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
- double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
- double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
- unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
- unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
- unsigned int Read256BlockWidthC[DC__NUM_DPP__MAX];
- unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
- double MaxSwathHeightY[DC__NUM_DPP__MAX];
- double MaxSwathHeightC[DC__NUM_DPP__MAX];
- double MinSwathHeightY[DC__NUM_DPP__MAX];
- double MinSwathHeightC[DC__NUM_DPP__MAX];
- double PSCL_FACTOR[DC__NUM_DPP__MAX];
- double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
- double MaximumVStartup[DC__NUM_DPP__MAX];
- double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
- double AlignedYPitch[DC__NUM_DPP__MAX];
- double AlignedCPitch[DC__NUM_DPP__MAX];
- double MaximumSwathWidth[DC__NUM_DPP__MAX];
- double final_flip_bw[DC__NUM_DPP__MAX];
- double ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1];
-
- double WritebackLumaVExtra;
- double WritebackChromaVExtra;
- double WritebackRequiredDISPCLK;
- double MaximumSwathWidthSupport;
- double MaximumSwathWidthInDETBuffer;
- double MaximumSwathWidthInLineBuffer;
- double MaxDispclkRoundedDownToDFSGranularity;
- double MaxDppclkRoundedDownToDFSGranularity;
- double PlaneRequiredDISPCLKWithoutODMCombine;
- double PlaneRequiredDISPCLK;
- double TotalNumberOfActiveOTG;
- double FECOverhead;
- double EffectiveFECOverhead;
- unsigned int Outbpp;
- unsigned int OutbppDSC;
- double TotalDSCUnitsRequired;
- double bpp;
- unsigned int slices;
- double SwathWidthGranularityY;
- double RoundedUpMaxSwathSizeBytesY;
- double SwathWidthGranularityC;
- double RoundedUpMaxSwathSizeBytesC;
- double LinesInDETLuma;
- double LinesInDETChroma;
- double EffectiveDETLBLinesLuma;
- double EffectiveDETLBLinesChroma;
- double ProjectedDCFCLKDeepSleep;
- double PDEAndMetaPTEBytesPerFrameY;
- double PDEAndMetaPTEBytesPerFrameC;
- unsigned int MetaRowBytesY;
- unsigned int MetaRowBytesC;
- unsigned int DPTEBytesPerRowC;
- unsigned int DPTEBytesPerRowY;
- double ExtraLatency;
- double TimeCalc;
- double TWait;
- double MaximumReadBandwidthWithPrefetch;
- double total_dcn_read_bw_with_flip;
-};
-
-#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
deleted file mode 100644
index 8ba962d..0000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
+++ /dev/null
@@ -1,1763 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "display_mode_lib.h"
-#include "display_mode_vba.h"
-#include "display_rq_dlg_calc.h"
-
-static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
- double *refcyc_per_req_delivery_pre_cur,
- double *refcyc_per_req_delivery_cur,
- double refclk_freq_in_mhz,
- double ref_freq_to_pix_freq,
- double hscale_pixel_rate_l,
- double hscl_ratio,
- double vratio_pre_l,
- double vratio_l,
- unsigned int cur_width,
- enum cursor_bpp cur_bpp);
-
-#include "dml_inline_defs.h"
-
-static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
-{
- unsigned int ret_val = 0;
-
- if (source_format == dm_444_16) {
- if (!is_chroma)
- ret_val = 2;
- } else if (source_format == dm_444_32) {
- if (!is_chroma)
- ret_val = 4;
- } else if (source_format == dm_444_64) {
- if (!is_chroma)
- ret_val = 8;
- } else if (source_format == dm_420_8) {
- if (is_chroma)
- ret_val = 2;
- else
- ret_val = 1;
- } else if (source_format == dm_420_10) {
- if (is_chroma)
- ret_val = 4;
- else
- ret_val = 2;
- } else if (source_format == dm_444_8) {
- ret_val = 1;
- }
- return ret_val;
-}
-
-static bool is_dual_plane(enum source_format_class source_format)
-{
- bool ret_val = 0;
-
- if ((source_format == dm_420_8) || (source_format == dm_420_10))
- ret_val = 1;
-
- return ret_val;
-}
-
-static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
- double refclk_freq_in_mhz,
- double pclk_freq_in_mhz,
- bool odm_combine,
- unsigned int recout_width,
- unsigned int hactive,
- double vratio,
- double hscale_pixel_rate,
- unsigned int delivery_width,
- unsigned int req_per_swath_ub)
-{
- double refcyc_per_delivery = 0.0;
-
- if (vratio <= 1.0) {
- if (odm_combine)
- refcyc_per_delivery = (double) refclk_freq_in_mhz
- * dml_min((double) recout_width, (double) hactive / 2.0)
- / pclk_freq_in_mhz / (double) req_per_swath_ub;
- else
- refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
- / pclk_freq_in_mhz / (double) req_per_swath_ub;
- } else {
- refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
- / (double) hscale_pixel_rate / (double) req_per_swath_ub;
- }
-
- dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
- dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
- dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
- dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
- dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
- dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
-
- return refcyc_per_delivery;
-
-}
-
-static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
-{
- if (tile_size == dm_256k_tile)
- return (256 * 1024);
- else if (tile_size == dm_64k_tile)
- return (64 * 1024);
- else
- return (4 * 1024);
-}
-
-static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
- display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
-{
- dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
- print__data_rq_sizing_params_st(mode_lib, rq_sizing);
-
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
-
- if (rq_sizing.min_chunk_bytes == 0)
- rq_regs->min_chunk_size = 0;
- else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
-
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
- rq_regs->min_meta_chunk_size = 0;
- else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
-
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
-}
-
-static void extract_rq_regs(struct display_mode_lib *mode_lib,
- display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
-{
- unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
- unsigned int detile_buf_plane1_addr = 0;
-
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
-
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
- 1) - 3;
-
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
- 1) - 3;
- }
-
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
-
- // FIXME: take the max between luma, chroma chunk size?
- // okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
- rq_regs->drq_expansion_mode = 0;
- } else {
- rq_regs->drq_expansion_mode = 2;
- }
- rq_regs->prq_expansion_mode = 1;
- rq_regs->mrq_expansion_mode = 1;
- rq_regs->crq_expansion_mode = 1;
-
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
- detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
- } else {
- detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
- 256,
- 0) / 64.0; // 2/3 to chroma
- }
- }
- rq_regs->plane1_base_address = detile_buf_plane1_addr;
-}
-
-static void handle_det_buf_split(struct display_mode_lib *mode_lib,
- display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
-{
- unsigned int total_swath_bytes = 0;
- unsigned int swath_bytes_l = 0;
- unsigned int swath_bytes_c = 0;
- unsigned int full_swath_bytes_packed_l = 0;
- unsigned int full_swath_bytes_packed_c = 0;
- bool req128_l = 0;
- bool req128_c = 0;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
- unsigned int log2_swath_height_l = 0;
- unsigned int log2_swath_height_c = 0;
- unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
-
- full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
- full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
-
- if (rq_param->yuv420_10bpc) {
- full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
- 256,
- 1) + 256;
- full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
- 256,
- 1) + 256;
- }
-
- if (rq_param->yuv420) {
- total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
-
- if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
- req128_l = 0;
- req128_c = 0;
- swath_bytes_l = full_swath_bytes_packed_l;
- swath_bytes_c = full_swath_bytes_packed_c;
- } else { //128b request (for luma only for yuv420 8bpc)
- req128_l = 1;
- req128_c = 0;
- swath_bytes_l = full_swath_bytes_packed_l / 2;
- swath_bytes_c = full_swath_bytes_packed_c;
- }
- // Note: assumption, the config that pass in will fit into
- // the detiled buffer.
- } else {
- total_swath_bytes = 2 * full_swath_bytes_packed_l;
-
- if (total_swath_bytes <= detile_buf_size_in_bytes)
- req128_l = 0;
- else
- req128_l = 1;
-
- swath_bytes_l = total_swath_bytes;
- swath_bytes_c = 0;
- }
- rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
- rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
-
- if (surf_linear) {
- log2_swath_height_l = 0;
- log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
- } else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
- }
- rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
- rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
-
- dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
- dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
- dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
- __func__,
- full_swath_bytes_packed_l);
- dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
- __func__,
- full_swath_bytes_packed_c);
-}
-
-static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- display_data_rq_dlg_params_st *rq_dlg_param,
- display_data_rq_misc_params_st *rq_misc_param,
- display_data_rq_sizing_params_st *rq_sizing_param,
- unsigned int vp_width,
- unsigned int vp_height,
- unsigned int data_pitch,
- unsigned int meta_pitch,
- unsigned int source_format,
- unsigned int tiling,
- unsigned int macro_tile_size,
- unsigned int source_scan,
- unsigned int is_chroma)
-{
- bool surf_linear = (tiling == dm_sw_linear);
- bool surf_vert = (source_scan == dm_vert);
-
- unsigned int bytes_per_element;
- unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
- false);
- unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
- true);
-
- unsigned int blk256_width = 0;
- unsigned int blk256_height = 0;
-
- unsigned int blk256_width_y = 0;
- unsigned int blk256_height_y = 0;
- unsigned int blk256_width_c = 0;
- unsigned int blk256_height_c = 0;
- unsigned int log2_bytes_per_element;
- unsigned int log2_blk256_width;
- unsigned int log2_blk256_height;
- unsigned int blk_bytes;
- unsigned int log2_blk_bytes;
- unsigned int log2_blk_height;
- unsigned int log2_blk_width;
- unsigned int log2_meta_req_bytes;
- unsigned int log2_meta_req_height;
- unsigned int log2_meta_req_width;
- unsigned int meta_req_width;
- unsigned int meta_req_height;
- unsigned int log2_meta_row_height;
- unsigned int meta_row_width_ub;
- unsigned int log2_meta_chunk_bytes;
- unsigned int log2_meta_chunk_height;
-
- //full sized meta chunk width in unit of data elements
- unsigned int log2_meta_chunk_width;
- unsigned int log2_min_meta_chunk_bytes;
- unsigned int min_meta_chunk_width;
- unsigned int meta_chunk_width;
- unsigned int meta_chunk_per_row_int;
- unsigned int meta_row_remainder;
- unsigned int meta_chunk_threshold;
- unsigned int meta_blk_bytes;
- unsigned int meta_blk_height;
- unsigned int meta_blk_width;
- unsigned int meta_surface_bytes;
- unsigned int vmpg_bytes;
- unsigned int meta_pte_req_per_frame_ub;
- unsigned int meta_pte_bytes_per_frame_ub;
- const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
- const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
- const unsigned int pde_proc_buffer_size_64k_reqs =
- mode_lib->ip.pde_proc_buffer_size_64k_reqs;
-
- unsigned int log2_vmpg_height = 0;
- unsigned int log2_vmpg_width = 0;
- unsigned int log2_dpte_req_height_ptes = 0;
- unsigned int log2_dpte_req_height = 0;
- unsigned int log2_dpte_req_width = 0;
- unsigned int log2_dpte_row_height_linear = 0;
- unsigned int log2_dpte_row_height = 0;
- unsigned int log2_dpte_group_width = 0;
- unsigned int dpte_row_width_ub = 0;
- unsigned int dpte_req_height = 0;
- unsigned int dpte_req_width = 0;
- unsigned int dpte_group_width = 0;
- unsigned int log2_dpte_group_bytes = 0;
- unsigned int log2_dpte_group_length = 0;
- unsigned int pde_buf_entries;
- bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
-
- Calculate256BBlockSizes((enum source_format_class)(source_format),
- (enum dm_swizzle_mode)(tiling),
- bytes_per_element_y,
- bytes_per_element_c,
- &blk256_height_y,
- &blk256_height_c,
- &blk256_width_y,
- &blk256_width_c);
-
- if (!is_chroma) {
- blk256_width = blk256_width_y;
- blk256_height = blk256_height_y;
- bytes_per_element = bytes_per_element_y;
- } else {
- blk256_width = blk256_width_c;
- blk256_height = blk256_height_c;
- bytes_per_element = bytes_per_element_c;
- }
-
- log2_bytes_per_element = dml_log2(bytes_per_element);
-
- dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
- dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
- dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
- dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
-
- log2_blk256_width = dml_log2((double) blk256_width);
- log2_blk256_height = dml_log2((double) blk256_height);
- blk_bytes = surf_linear ?
- 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
- log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
-
- // remember log rule
- // "+" in log is multiply
- // "-" in log is divide
- // "/2" is like square root
- // blk is vertical biased
- if (tiling != dm_sw_linear)
- log2_blk_height = log2_blk256_height
- + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
- else
- log2_blk_height = 0; // blk height of 1
-
- log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
-
- if (!surf_vert) {
- rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
- + blk256_width;
- rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
- } else {
- rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
- + blk256_height;
- rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
- }
-
- if (!surf_vert)
- rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
- * bytes_per_element;
- else
- rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
- * bytes_per_element;
-
- rq_misc_param->blk256_height = blk256_height;
- rq_misc_param->blk256_width = blk256_width;
-
- // -------
- // meta
- // -------
- log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
-
- // each 64b meta request for dcn is 8x8 meta elements and
- // a meta element covers one 256b block of the the data surface.
- log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
- log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
- - log2_meta_req_height;
- meta_req_width = 1 << log2_meta_req_width;
- meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
-
- // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
- // calculate upper bound of the meta_row_width
- if (!surf_vert) {
- log2_meta_row_height = log2_meta_req_height;
- meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
- + meta_req_width;
- rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
- } else {
- log2_meta_row_height = log2_meta_req_width;
- meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
- + meta_req_height;
- rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
- }
- rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
-
- rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
-
- log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
- log2_meta_chunk_height = log2_meta_row_height;
-
- //full sized meta chunk width in unit of data elements
- log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
- - log2_meta_chunk_height;
- log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
- min_meta_chunk_width = 1
- << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
- - log2_meta_chunk_height);
- meta_chunk_width = 1 << log2_meta_chunk_width;
- meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
- meta_row_remainder = meta_row_width_ub % meta_chunk_width;
- meta_chunk_threshold = 0;
- meta_blk_bytes = 4096;
- meta_blk_height = blk256_height * 64;
- meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
- meta_surface_bytes = meta_pitch
- * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
- * bytes_per_element / 256;
- vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
- meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
- 8 * vmpg_bytes,
- 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
- meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
- rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
-
- dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
- dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
- dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
- dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
- __func__,
- meta_pte_req_per_frame_ub);
- dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
- __func__,
- meta_pte_bytes_per_frame_ub);
-
- if (!surf_vert)
- meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
- else
- meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
-
- if (meta_row_remainder <= meta_chunk_threshold)
- rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
- else
- rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
-
- // ------
- // dpte
- // ------
- if (surf_linear) {
- log2_vmpg_height = 0; // one line high
- } else {
- log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
- }
- log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
-
- // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
- if (surf_linear) { //one 64B PTE request returns 8 PTEs
- log2_dpte_req_height_ptes = 0;
- log2_dpte_req_width = log2_vmpg_width + 3;
- log2_dpte_req_height = 0;
- } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
- //one 64B req gives 8x1 PTEs for 4KB tile
- log2_dpte_req_height_ptes = 0;
- log2_dpte_req_width = log2_blk_width + 3;
- log2_dpte_req_height = log2_blk_height + 0;
- } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
- //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
- log2_dpte_req_height_ptes = 4;
- log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
- log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
- } else { //64KB page size and must 64KB tile block
- //one 64B req gives 8x1 PTEs for 64KB tile
- log2_dpte_req_height_ptes = 0;
- log2_dpte_req_width = log2_blk_width + 3;
- log2_dpte_req_height = log2_blk_height + 0;
- }
-
- // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
- // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
- // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
- //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
- //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
- dpte_req_height = 1 << log2_dpte_req_height;
- dpte_req_width = 1 << log2_dpte_req_width;
-
- // calculate pitch dpte row buffer can hold
- // round the result down to a power of two.
- pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
- if (surf_linear) {
- unsigned int dpte_row_height;
-
- log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
- / bytes_per_element,
- dpte_buf_in_pte_reqs
- * dpte_req_width)
- / data_pitch),
- 1);
-
- ASSERT(log2_dpte_row_height_linear >= 3);
-
- if (log2_dpte_row_height_linear > 7)
- log2_dpte_row_height_linear = 7;
-
- log2_dpte_row_height = log2_dpte_row_height_linear;
- // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
- // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
- dpte_row_height = 1 << log2_dpte_row_height;
- dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
- dpte_req_width,
- 1) + dpte_req_width;
- rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
- } else {
- // the upper bound of the dpte_row_width without dependency on viewport position follows.
- // for tiled mode, row height is the same as req height and row store up to vp size upper bound
- if (!surf_vert) {
- log2_dpte_row_height = log2_dpte_req_height;
- dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
- + dpte_req_width;
- rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
- } else {
- log2_dpte_row_height =
- (log2_blk_width < log2_dpte_req_width) ?
- log2_blk_width : log2_dpte_req_width;
- dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
- + dpte_req_height;
- rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
- }
- }
- if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
- rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
- else
- rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
-
- rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
-
- // the dpte_group_bytes is reduced for the specific case of vertical
- // access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
- rq_sizing_param->dpte_group_bytes = 512;
- else
- //full size
- rq_sizing_param->dpte_group_bytes = 2048;
-
- //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
- log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
- log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
-
- // full sized data pte group width in elements
- if (!surf_vert)
- log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
- else
- log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
-
- //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
- if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
- log2_dpte_group_width = log2_dpte_group_width - 1;
-
- dpte_group_width = 1 << log2_dpte_group_width;
-
- // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
- // the upper bound for the dpte groups per row is as follows.
- rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
- 1);
-}
-
-static void get_surf_rq_param(struct display_mode_lib *mode_lib,
- display_data_rq_sizing_params_st *rq_sizing_param,
- display_data_rq_dlg_params_st *rq_dlg_param,
- display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_source_params_st pipe_src_param,
- bool is_chroma)
-{
- bool mode_422 = 0;
- unsigned int vp_width = 0;
- unsigned int vp_height = 0;
- unsigned int data_pitch = 0;
- unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
-
- // FIXME check if ppe apply for both luma and chroma in 422 case
- if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
- } else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
- }
-
- rq_sizing_param->chunk_bytes = 8192;
-
- if (rq_sizing_param->chunk_bytes == 64 * 1024)
- rq_sizing_param->min_chunk_bytes = 0;
- else
- rq_sizing_param->min_chunk_bytes = 1024;
-
- rq_sizing_param->meta_chunk_bytes = 2048;
- rq_sizing_param->min_meta_chunk_bytes = 256;
-
- rq_sizing_param->mpte_group_bytes = 2048;
-
- get_meta_and_pte_attr(mode_lib,
- rq_dlg_param,
- rq_misc_param,
- rq_sizing_param,
- vp_width,
- vp_height,
- data_pitch,
- meta_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
- is_chroma);
-}
-
-void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
- display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
-{
- // get param for luma surface
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
-
- get_surf_rq_param(mode_lib,
- &(rq_param->sizing.rq_l),
- &(rq_param->dlg.rq_l),
- &(rq_param->misc.rq_l),
- pipe_src_param,
- 0);
-
- if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
- // get param for chroma surface
- get_surf_rq_param(mode_lib,
- &(rq_param->sizing.rq_c),
- &(rq_param->dlg.rq_c),
- &(rq_param->misc.rq_c),
- pipe_src_param,
- 1);
- }
-
- // calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
-}
-
-void dml_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
- display_rq_regs_st *rq_regs,
- const display_pipe_source_params_st pipe_src_param)
-{
- display_rq_params_st rq_param = {0};
-
- memset(rq_regs, 0, sizeof(*rq_regs));
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_src_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
-
- print__rq_regs_st(mode_lib, *rq_regs);
-}
-
-// Note: currently taken in as is.
-// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
-void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx,
- display_dlg_regs_st *disp_dlg_regs,
- display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
- const bool cstate_en,
- const bool pstate_en,
- const bool vm_en,
- const bool ignore_viewport_pos,
- const bool immediate_flip_support)
-{
- const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
- const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
- const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
- const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
- const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
- const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
-
- // -------------------------
- // Section 1.15.2.1: OTG dependent Params
- // -------------------------
- // Timing
- unsigned int htotal = dst->htotal;
-// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
- unsigned int hblank_end = dst->hblank_end;
- unsigned int vblank_start = dst->vblank_start;
- unsigned int vblank_end = dst->vblank_end;
- unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
-
- double dppclk_freq_in_mhz = clks->dppclk_mhz;
- double dispclk_freq_in_mhz = clks->dispclk_mhz;
- double refclk_freq_in_mhz = clks->refclk_mhz;
- double pclk_freq_in_mhz = dst->pixel_rate_mhz;
- bool interlaced = dst->interlaced;
-
- double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
-
- double min_dcfclk_mhz;
- double t_calc_us;
- double min_ttu_vblank;
-
- double min_dst_y_ttu_vblank;
- unsigned int dlg_vblank_start;
- bool dual_plane;
- bool mode_422;
- unsigned int access_dir;
- unsigned int vp_height_l;
- unsigned int vp_width_l;
- unsigned int vp_height_c;
- unsigned int vp_width_c;
-
- // Scaling
- unsigned int htaps_l;
- unsigned int htaps_c;
- double hratio_l;
- double hratio_c;
- double vratio_l;
- double vratio_c;
- bool scl_enable;
-
- double line_time_in_us;
- // double vinit_l;
- // double vinit_c;
- // double vinit_bot_l;
- // double vinit_bot_c;
-
- // unsigned int swath_height_l;
- unsigned int swath_width_ub_l;
- // unsigned int dpte_bytes_per_row_ub_l;
- unsigned int dpte_groups_per_row_ub_l;
- // unsigned int meta_pte_bytes_per_frame_ub_l;
- // unsigned int meta_bytes_per_row_ub_l;
-
- // unsigned int swath_height_c;
- unsigned int swath_width_ub_c;
- // unsigned int dpte_bytes_per_row_ub_c;
- unsigned int dpte_groups_per_row_ub_c;
-
- unsigned int meta_chunks_per_row_ub_l;
- unsigned int meta_chunks_per_row_ub_c;
- unsigned int vupdate_offset;
- unsigned int vupdate_width;
- unsigned int vready_offset;
-
- unsigned int dppclk_delay_subtotal;
- unsigned int dispclk_delay_subtotal;
- unsigned int pixel_rate_delay_subtotal;
-
- unsigned int vstartup_start;
- unsigned int dst_x_after_scaler;
- unsigned int dst_y_after_scaler;
- double line_wait;
- double dst_y_prefetch;
- double dst_y_per_vm_vblank;
- double dst_y_per_row_vblank;
- double dst_y_per_vm_flip;
- double dst_y_per_row_flip;
- double min_dst_y_per_vm_vblank;
- double min_dst_y_per_row_vblank;
- double lsw;
- double vratio_pre_l;
- double vratio_pre_c;
- unsigned int req_per_swath_ub_l;
- unsigned int req_per_swath_ub_c;
- unsigned int meta_row_height_l;
- unsigned int meta_row_height_c;
- unsigned int swath_width_pixels_ub_l;
- unsigned int swath_width_pixels_ub_c;
- unsigned int scaler_rec_in_width_l;
- unsigned int scaler_rec_in_width_c;
- unsigned int dpte_row_height_l;
- unsigned int dpte_row_height_c;
- double hscale_pixel_rate_l;
- double hscale_pixel_rate_c;
- double min_hratio_fact_l;
- double min_hratio_fact_c;
- double refcyc_per_line_delivery_pre_l;
- double refcyc_per_line_delivery_pre_c;
- double refcyc_per_line_delivery_l;
- double refcyc_per_line_delivery_c;
-
- double refcyc_per_req_delivery_pre_l;
- double refcyc_per_req_delivery_pre_c;
- double refcyc_per_req_delivery_l;
- double refcyc_per_req_delivery_c;
-
- unsigned int full_recout_width;
- double xfc_transfer_delay;
- double xfc_precharge_delay;
- double xfc_remote_surface_flip_latency;
- double xfc_dst_y_delta_drq_limit;
- double xfc_prefetch_margin;
- double refcyc_per_req_delivery_pre_cur0;
- double refcyc_per_req_delivery_cur0;
- double refcyc_per_req_delivery_pre_cur1;
- double refcyc_per_req_delivery_cur1;
-
- memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
- memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
-
- dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
- dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
- dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
- dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
- dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
-
- dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
- dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
- dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
- dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
- dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
- ASSERT(ref_freq_to_pix_freq < 4.0);
-
- disp_dlg_regs->ref_freq_to_pix_freq =
- (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
- disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
- * dml_pow(2, 8));
- disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
- disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
- * (double) ref_freq_to_pix_freq);
- ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
-
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
- set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
- t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
- min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
-
- min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
- dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
-
- disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
- + min_dst_y_ttu_vblank) * dml_pow(2, 2));
- ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
-
- dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
- __func__,
- min_dcfclk_mhz);
- dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
- __func__,
- min_ttu_vblank);
- dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
- __func__,
- min_dst_y_ttu_vblank);
- dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
- __func__,
- t_calc_us);
- dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
- __func__,
- disp_dlg_regs->min_dst_y_next_start);
- dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
- __func__,
- ref_freq_to_pix_freq);
-
- // -------------------------
- // Section 1.15.2.2: Prefetch, Active and TTU
- // -------------------------
- // Prefetch Calc
- // Source
-// dcc_en = src.dcc;
- dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
- access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
-// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
-// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
- vp_height_l = src->viewport_height;
- vp_width_l = src->viewport_width;
- vp_height_c = src->viewport_height_c;
- vp_width_c = src->viewport_width_c;
-
- // Scaling
- htaps_l = taps->htaps;
- htaps_c = taps->htaps_c;
- hratio_l = scl->hscl_ratio;
- hratio_c = scl->hscl_ratio_c;
- vratio_l = scl->vscl_ratio;
- vratio_c = scl->vscl_ratio_c;
- scl_enable = scl->scl_enable;
-
- line_time_in_us = (htotal / pclk_freq_in_mhz);
-// vinit_l = scl.vinit;
-// vinit_c = scl.vinit_c;
-// vinit_bot_l = scl.vinit_bot;
-// vinit_bot_c = scl.vinit_bot_c;
-
-// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
-// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
-// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
-// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
-
-// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
-
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
- vupdate_offset = dst->vupdate_offset;
- vupdate_width = dst->vupdate_width;
- vready_offset = dst->vready_offset;
-
- dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
- dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
-
- if (scl_enable)
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
- else
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
-
- dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
- + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
-
- if (dout->dsc_enable) {
- double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
-
- dispclk_delay_subtotal += dsc_delay;
- }
-
- pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
- + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
-
- vstartup_start = dst->vstartup_start;
- if (interlaced) {
- if (vstartup_start / 2.0
- - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
- <= vblank_end / 2.0)
- disp_dlg_regs->vready_after_vcount0 = 1;
- else
- disp_dlg_regs->vready_after_vcount0 = 0;
- } else {
- if (vstartup_start
- - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
- <= vblank_end)
- disp_dlg_regs->vready_after_vcount0 = 1;
- else
- disp_dlg_regs->vready_after_vcount0 = 0;
- }
-
- // TODO: Where is this coming from?
- if (interlaced)
- vstartup_start = vstartup_start / 2;
-
- // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
- if (vstartup_start >= min_vblank) {
- dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
- __func__,
- vblank_start,
- vblank_end);
- dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
- __func__,
- vstartup_start,
- min_vblank);
- min_vblank = vstartup_start + 1;
- dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
- __func__,
- vstartup_start,
- min_vblank);
- }
-
- dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
-
- dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
- dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
- __func__,
- pixel_rate_delay_subtotal);
- dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
- __func__,
- dst_x_after_scaler);
- dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
- __func__,
- dst_y_after_scaler);
-
- // Lwait
- line_wait = mode_lib->soc.urgent_latency_us;
- if (cstate_en)
- line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
- if (pstate_en)
- line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
- + mode_lib->soc.urgent_latency_us,
- line_wait);
- line_wait = line_wait / line_time_in_us;
-
- dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
-
- dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
-
- min_dst_y_per_vm_vblank = 8.0;
- min_dst_y_per_row_vblank = 16.0;
-
- // magic!
- if (htotal <= 75) {
- min_vblank = 300;
- min_dst_y_per_vm_vblank = 100.0;
- min_dst_y_per_row_vblank = 100.0;
- }
-
- dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
- dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
-
- ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
- ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
-
- ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
- lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
-
- dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
-
- vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
-
- dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
- dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
-
- // Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
- swath_width_pixels_ub_l = 0;
- swath_width_pixels_ub_c = 0;
- scaler_rec_in_width_l = 0;
- scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
-
- if (mode_422) {
- swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
- swath_width_pixels_ub_c = swath_width_ub_c * 2;
- } else {
- swath_width_pixels_ub_l = swath_width_ub_l * 1;
- swath_width_pixels_ub_c = swath_width_ub_c * 1;
- }
-
- hscale_pixel_rate_l = 0.;
- hscale_pixel_rate_c = 0.;
- min_hratio_fact_l = 1.0;
- min_hratio_fact_c = 1.0;
-
- if (htaps_l <= 1)
- min_hratio_fact_l = 2.0;
- else if (htaps_l <= 6) {
- if ((hratio_l * 2.0) > 4.0)
- min_hratio_fact_l = 4.0;
- else
- min_hratio_fact_l = hratio_l * 2.0;
- } else {
- if (hratio_l > 4.0)
- min_hratio_fact_l = 4.0;
- else
- min_hratio_fact_l = hratio_l;
- }
-
- hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
-
- if (htaps_c <= 1)
- min_hratio_fact_c = 2.0;
- else if (htaps_c <= 6) {
- if ((hratio_c * 2.0) > 4.0)
- min_hratio_fact_c = 4.0;
- else
- min_hratio_fact_c = hratio_c * 2.0;
- } else {
- if (hratio_c > 4.0)
- min_hratio_fact_c = 4.0;
- else
- min_hratio_fact_c = hratio_c;
- }
-
- hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
-
- refcyc_per_line_delivery_pre_l = 0.;
- refcyc_per_line_delivery_pre_c = 0.;
- refcyc_per_line_delivery_l = 0.;
- refcyc_per_line_delivery_c = 0.;
-
- refcyc_per_req_delivery_pre_l = 0.;
- refcyc_per_req_delivery_pre_c = 0.;
- refcyc_per_req_delivery_l = 0.;
- refcyc_per_req_delivery_c = 0.;
-
- full_recout_width = 0;
- // In ODM
- if (src->is_hsplit) {
- // This "hack" is only allowed (and valid) for MPC combine. In ODM
- // combine, you MUST specify the full_recout_width...according to Oswin
- if (dst->full_recout_width == 0 && !dst->odm_combine) {
- dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
- __func__);
- full_recout_width = dst->recout_width * 2; // assume half split for dcn1
- } else
- full_recout_width = dst->full_recout_width;
- } else
- full_recout_width = dst->recout_width;
-
- // mpc_combine and odm_combine are mutually exclusive
- refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_pre_l,
- hscale_pixel_rate_l,
- swath_width_pixels_ub_l,
- 1); // per line
-
- refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_l,
- hscale_pixel_rate_l,
- swath_width_pixels_ub_l,
- 1); // per line
-
- dml_print("DML_DLG: %s: full_recout_width = %d\n",
- __func__,
- full_recout_width);
- dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
- __func__,
- hscale_pixel_rate_l);
- dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
- __func__,
- refcyc_per_line_delivery_pre_l);
- dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
- __func__,
- refcyc_per_line_delivery_l);
-
- if (dual_plane) {
- refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_pre_c,
- hscale_pixel_rate_c,
- swath_width_pixels_ub_c,
- 1); // per line
-
- refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_c,
- hscale_pixel_rate_c,
- swath_width_pixels_ub_c,
- 1); // per line
-
- dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
- __func__,
- refcyc_per_line_delivery_pre_c);
- dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
- __func__,
- refcyc_per_line_delivery_c);
- }
-
- // TTU - Luma / Chroma
- if (access_dir) { // vertical access
- scaler_rec_in_width_l = vp_height_l;
- scaler_rec_in_width_c = vp_height_c;
- } else {
- scaler_rec_in_width_l = vp_width_l;
- scaler_rec_in_width_c = vp_width_c;
- }
-
- refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_pre_l,
- hscale_pixel_rate_l,
- scaler_rec_in_width_l,
- req_per_swath_ub_l); // per req
- refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_l,
- hscale_pixel_rate_l,
- scaler_rec_in_width_l,
- req_per_swath_ub_l); // per req
-
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
- __func__,
- refcyc_per_req_delivery_pre_l);
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
- __func__,
- refcyc_per_req_delivery_l);
-
- ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
- ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
-
- if (dual_plane) {
- refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_pre_c,
- hscale_pixel_rate_c,
- scaler_rec_in_width_c,
- req_per_swath_ub_c); // per req
- refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
- refclk_freq_in_mhz,
- pclk_freq_in_mhz,
- dst->odm_combine,
- full_recout_width,
- dst->hactive,
- vratio_c,
- hscale_pixel_rate_c,
- scaler_rec_in_width_c,
- req_per_swath_ub_c); // per req
-
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
- __func__,
- refcyc_per_req_delivery_pre_c);
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
- __func__,
- refcyc_per_req_delivery_c);
-
- ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
- ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
- }
-
- // XFC
- xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
- xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
-
- // TTU - Cursor
- refcyc_per_req_delivery_pre_cur0 = 0.0;
- refcyc_per_req_delivery_cur0 = 0.0;
- if (src->num_cursors > 0) {
- calculate_ttu_cursor(mode_lib,
- &refcyc_per_req_delivery_pre_cur0,
- &refcyc_per_req_delivery_cur0,
- refclk_freq_in_mhz,
- ref_freq_to_pix_freq,
- hscale_pixel_rate_l,
- scl->hscl_ratio,
- vratio_pre_l,
- vratio_l,
- src->cur0_src_width,
- (enum cursor_bpp)(src->cur0_bpp));
- }
-
- refcyc_per_req_delivery_pre_cur1 = 0.0;
- refcyc_per_req_delivery_cur1 = 0.0;
- if (src->num_cursors > 1) {
- calculate_ttu_cursor(mode_lib,
- &refcyc_per_req_delivery_pre_cur1,
- &refcyc_per_req_delivery_cur1,
- refclk_freq_in_mhz,
- ref_freq_to_pix_freq,
- hscale_pixel_rate_l,
- scl->hscl_ratio,
- vratio_pre_l,
- vratio_l,
- src->cur1_src_width,
- (enum cursor_bpp)(src->cur1_bpp));
- }
-
- // TTU - Misc
- // all hard-coded
-
- // Assignment to register structures
- disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
- disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
- ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
- disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
- disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
- disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
-
- disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
- disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
-
- disp_dlg_regs->refcyc_per_pte_group_vblank_l =
- (unsigned int) (dst_y_per_row_vblank * (double) htotal
- * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
-
- if (dual_plane) {
- disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
- * (double) htotal * ref_freq_to_pix_freq
- / (double) dpte_groups_per_row_ub_c);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
- < (unsigned int) dml_pow(2, 13));
- }
-
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
- (unsigned int) (dst_y_per_row_vblank * (double) htotal
- * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
- ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
-
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
-
- disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
- * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
- disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
- * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
-
- if (dual_plane) {
- disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
- * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
- disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
- * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
- }
-
- disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
- / (double) vratio_l * dml_pow(2, 2));
- ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
-
- if (dual_plane) {
- disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
- / (double) vratio_c * dml_pow(2, 2));
- if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
- dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
- __func__,
- disp_dlg_regs->dst_y_per_pte_row_nom_c,
- (unsigned int) dml_pow(2, 17) - 1);
- }
- }
-
- disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
- / (double) vratio_l * dml_pow(2, 2));
- ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
-
- disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
-
- disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
- / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
- / (double) dpte_groups_per_row_ub_l);
- if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
- disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
- / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
- / (double) meta_chunks_per_row_ub_l);
- if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
- disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
-
- if (dual_plane) {
- disp_dlg_regs->refcyc_per_pte_group_nom_c =
- (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
- * (double) htotal * ref_freq_to_pix_freq
- / (double) dpte_groups_per_row_ub_c);
- if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
- disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
-
- // TODO: Is this the right calculation? Does htotal need to be halved?
- disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
- (unsigned int) ((double) meta_row_height_c / (double) vratio_c
- * (double) htotal * ref_freq_to_pix_freq
- / (double) meta_chunks_per_row_ub_c);
- if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
- disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
- }
-
- disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
- 1);
- disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
- 1);
- ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
- ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
-
- disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
- 1);
- disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
- 1);
- ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
- ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
-
- disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
- disp_dlg_regs->dst_y_offset_cur0 = 0;
- disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
- disp_dlg_regs->dst_y_offset_cur1 = 0;
-
- disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
- disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
- disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
- disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
- 1);
-
- // slave has to have this value also set to off
- if (src->xfc_enable && !src->xfc_slave)
- disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
- else
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
-
- disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
- * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
- * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
- * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
- * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
- (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
- * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
- (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
- disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
- * dml_pow(2, 10));
- disp_ttu_regs->qos_level_low_wm = 0;
- ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
- disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
- * ref_freq_to_pix_freq);
- ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
-
- disp_ttu_regs->qos_level_flip = 14;
- disp_ttu_regs->qos_level_fixed_l = 8;
- disp_ttu_regs->qos_level_fixed_c = 8;
- disp_ttu_regs->qos_level_fixed_cur0 = 8;
- disp_ttu_regs->qos_ramp_disable_l = 0;
- disp_ttu_regs->qos_ramp_disable_c = 0;
- disp_ttu_regs->qos_ramp_disable_cur0 = 0;
-
- disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
- ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
-
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
-}
-
-void dml_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
- display_dlg_regs_st *dlg_regs,
- display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx,
- const bool cstate_en,
- const bool pstate_en,
- const bool vm_en,
- const bool ignore_viewport_pos,
- const bool immediate_flip_support)
-{
- display_rq_params_st rq_param = {0};
- display_dlg_sys_params_st dlg_sys_param = {0};
-
- // Get watermark and Tex.
- dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
- dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
- e2e_pipe_param,
- num_pipes);
- dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
- dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
- dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
- dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
- dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
- e2e_pipe_param,
- num_pipes);
- dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
- e2e_pipe_param,
- num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
-
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
-
- // system parameter calculation done
-
- dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
- dml_rq_dlg_get_dlg_params(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx,
- dlg_regs,
- ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
- cstate_en,
- pstate_en,
- vm_en,
- ignore_viewport_pos,
- immediate_flip_support);
- dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
-}
-
-void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
-{
- memset(arb_param, 0, sizeof(*arb_param));
- arb_param->max_req_outstanding = 256;
- arb_param->min_req_outstanding = 68;
- arb_param->sat_level_us = 60;
-}
-
-void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
- double *refcyc_per_req_delivery_pre_cur,
- double *refcyc_per_req_delivery_cur,
- double refclk_freq_in_mhz,
- double ref_freq_to_pix_freq,
- double hscale_pixel_rate_l,
- double hscl_ratio,
- double vratio_pre_l,
- double vratio_l,
- unsigned int cur_width,
- enum cursor_bpp cur_bpp)
-{
- unsigned int cur_src_width = cur_width;
- unsigned int cur_req_size = 0;
- unsigned int cur_req_width = 0;
- double cur_width_ub = 0.0;
- double cur_req_per_width = 0.0;
- double hactive_cur = 0.0;
-
- ASSERT(cur_src_width <= 256);
-
- *refcyc_per_req_delivery_pre_cur = 0.0;
- *refcyc_per_req_delivery_cur = 0.0;
- if (cur_src_width > 0) {
- unsigned int cur_bit_per_pixel = 0;
-
- if (cur_bpp == dm_cur_2bit) {
- cur_req_size = 64; // byte
- cur_bit_per_pixel = 2;
- } else { // 32bit
- cur_bit_per_pixel = 32;
- if (cur_src_width >= 1 && cur_src_width <= 16)
- cur_req_size = 64;
- else if (cur_src_width >= 17 && cur_src_width <= 31)
- cur_req_size = 128;
- else
- cur_req_size = 256;
- }
-
- cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
- cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
- * (double) cur_req_width;
- cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
-
- if (vratio_pre_l <= 1.0) {
- *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
- / (double) cur_req_per_width;
- } else {
- *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
- * (double) cur_src_width / hscale_pixel_rate_l
- / (double) cur_req_per_width;
- }
-
- ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
-
- if (vratio_l <= 1.0) {
- *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
- / (double) cur_req_per_width;
- } else {
- *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
- * (double) cur_src_width / hscale_pixel_rate_l
- / (double) cur_req_per_width;
- }
-
- dml_print("DML_DLG: %s: cur_req_width = %d\n",
- __func__,
- cur_req_width);
- dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
- __func__,
- cur_width_ub);
- dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
- __func__,
- cur_req_per_width);
- dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
- __func__,
- hactive_cur);
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
- __func__,
- *refcyc_per_req_delivery_pre_cur);
- dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
- __func__,
- *refcyc_per_req_delivery_cur);
-
- ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
- }
-}
-
-unsigned int dml_rq_dlg_get_calculated_vstartup(struct display_mode_lib *mode_lib,
- display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx)
-{
- unsigned int vstartup_pipe[DC__NUM_PIPES__MAX];
- bool visited[DC__NUM_PIPES__MAX];
- unsigned int pipe_inst = 0;
- unsigned int i, j, k;
-
- for (k = 0; k < num_pipes; ++k)
- visited[k] = false;
-
- for (i = 0; i < num_pipes; i++) {
- if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
- unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
-
- for (j = i; j < num_pipes; j++) {
- if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
- && e2e_pipe_param[j].pipe.src.is_hsplit
- && !visited[j]) {
- vstartup_pipe[j] = get_vstartup_calculated(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_inst);
- visited[j] = true;
- }
- }
-
- pipe_inst++;
- }
-
- if (!visited[i]) {
- vstartup_pipe[i] = get_vstartup_calculated(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_inst);
- visited[i] = true;
- pipe_inst++;
- }
- }
-
- return vstartup_pipe[pipe_idx];
-
-}
-
-void dml_rq_dlg_get_row_heights(struct display_mode_lib *mode_lib,
- unsigned int *o_dpte_row_height,
- unsigned int *o_meta_row_height,
- unsigned int vp_width,
- unsigned int data_pitch,
- int source_format,
- int tiling,
- int macro_tile_size,
- int source_scan,
- int is_chroma)
-{
- display_data_rq_dlg_params_st rq_dlg_param;
- display_data_rq_misc_params_st rq_misc_param;
- display_data_rq_sizing_params_st rq_sizing_param;
-
- get_meta_and_pte_attr(mode_lib,
- &rq_dlg_param,
- &rq_misc_param,
- &rq_sizing_param,
- vp_width,
- 0, // dummy
- data_pitch,
- 0, // dummy
- source_format,
- tiling,
- macro_tile_size,
- source_scan,
- is_chroma);
-
- *o_dpte_row_height = rq_dlg_param.dpte_row_height;
- *o_meta_row_height = rq_dlg_param.meta_row_height;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
deleted file mode 100644
index efdd4c7..0000000
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML2_DISPLAY_RQ_DLG_CALC_H__
-#define __DML2_DISPLAY_RQ_DLG_CALC_H__
-
-#include "dml_common_defs.h"
-#include "display_rq_dlg_helpers.h"
-
-struct display_mode_lib;
-
-// Function: dml_rq_dlg_get_rq_params
-// Calculate requestor related parameters that register definition agnostic
-// (i.e. this layer does try to separate real values from register definition)
-// Input:
-// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
-// Output:
-// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
-//
-void dml_rq_dlg_get_rq_params(
- struct display_mode_lib *mode_lib,
- display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param);
-
-// Function: dml_rq_dlg_get_rq_reg
-// Main entry point for test to get the register values out of this DML class.
-// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
-// and then populate the rq_regs struct
-// Input:
-// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
-// Output:
-// rq_regs - struct that holds all the RQ registers field value.
-// See also: <display_rq_regs_st>
-void dml_rq_dlg_get_rq_reg(
- struct display_mode_lib *mode_lib,
- display_rq_regs_st *rq_regs,
- const display_pipe_source_params_st pipe_src_param);
-
-// Function: dml_rq_dlg_get_dlg_params
-// Calculate deadline related parameters
-//
-void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
- const display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx,
- display_dlg_regs_st *disp_dlg_regs,
- display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
- const bool cstate_en,
- const bool pstate_en,
- const bool vm_en,
- const bool ignore_viewport_pos,
- const bool immediate_flip_support);
-
-// Function: dml_rq_dlg_get_dlg_param_prefetch
-// For flip_bw programming guide change, now dml needs to calculate the flip_bytes and prefetch_bw
-// for ALL pipes and use this info to calculate the prefetch programming.
-// Output: prefetch_param.prefetch_bw and flip_bytes
-void dml_rq_dlg_get_dlg_params_prefetch(
- struct display_mode_lib *mode_lib,
- display_dlg_prefetch_param_st *prefetch_param,
- display_rq_dlg_params_st rq_dlg_param,
- display_dlg_sys_params_st dlg_sys_param,
- display_e2e_pipe_params_st e2e_pipe_param,
- const bool cstate_en,
- const bool pstate_en,
- const bool vm_en);
-
-// Function: dml_rq_dlg_get_dlg_reg
-// Calculate and return DLG and TTU register struct given the system setting
-// Output:
-// dlg_regs - output DLG register struct
-// ttu_regs - output DLG TTU register struct
-// Input:
-// e2e_pipe_param - "compacted" array of e2e pipe param struct
-// num_pipes - num of active "pipe" or "route"
-// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
-// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
-// Added for legacy or unrealistic timing tests.
-void dml_rq_dlg_get_dlg_reg(
- struct display_mode_lib *mode_lib,
- display_dlg_regs_st *dlg_regs,
- display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx,
- const bool cstate_en,
- const bool pstate_en,
- const bool vm_en,
- const bool ignore_viewport_pos,
- const bool immediate_flip_support);
-
-// Function: dml_rq_dlg_get_calculated_vstartup
-// Calculate and return vstartup
-// Output:
-// unsigned int vstartup
-// Input:
-// e2e_pipe_param - "compacted" array of e2e pipe param struct
-// num_pipes - num of active "pipe" or "route"
-// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
-// NOTE: this MUST be called after setting the prefetch mode!
-unsigned int dml_rq_dlg_get_calculated_vstartup(
- struct display_mode_lib *mode_lib,
- display_e2e_pipe_params_st *e2e_pipe_param,
- const unsigned int num_pipes,
- const unsigned int pipe_idx);
-
-// Function: dml_rq_dlg_get_row_heights
-// Calculate dpte and meta row heights
-void dml_rq_dlg_get_row_heights(
- struct display_mode_lib *mode_lib,
- unsigned int *o_dpte_row_height,
- unsigned int *o_meta_row_height,
- unsigned int vp_width,
- unsigned int data_pitch,
- int source_format,
- int tiling,
- int macro_tile_size,
- int source_scan,
- int is_chroma);
-
-// Function: dml_rq_dlg_get_arb_params
-void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 189052e..48400d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -24,6 +24,7 @@
*/
#include "display_rq_dlg_helpers.h"
+#include "dml_logger.h"
void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 1e4b1e3..c2037da 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -28,6 +28,15 @@
#include "dml_inline_defs.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 987d767..3041649 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -27,10 +27,11 @@
#define __DISPLAY_RQ_DLG_CALC_H__
#include "dml_common_defs.h"
-#include "display_rq_dlg_helpers.h"
struct display_mode_lib;
+#include "display_rq_dlg_helpers.h"
+
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
index b2847bc..f78cbae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -31,8 +31,6 @@
#include "display_mode_structs.h"
#include "display_mode_enums.h"
-#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
-#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
double dml_round(double a);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index e68086b..f9cf083 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -28,6 +28,7 @@
#include "dml_common_defs.h"
#include "../calcs/dcn_calc_math.h"
+#include "dml_logger.h"
static inline double dml_min(double a, double b)
{
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
index 7d1eec5..465859b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_logger.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,20 +19,20 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: AMD
+ *
*/
-#ifndef _PP_INSTANCE_H_
-#define _PP_INSTANCE_H_
-#include "hwmgr.h"
-struct pp_instance {
- uint32_t chip_family;
- uint32_t chip_id;
- bool pm_en;
- uint32_t feature_mask;
- void *device;
- struct pp_hwmgr *hwmgr;
- struct mutex pp_lock;
-};
+#ifndef __DML_LOGGER_H_
+#define __DML_LOGGER_H_
+
+#define DC_LOGGER \
+ mode_lib->logger
+
+#define dml_print(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
+#define DTRACE(str, ...) {DC_LOG_DML(str, ##__VA_ARGS__); }
#endif
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
index bc7d8c7..324239c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -27,6 +27,16 @@
#include "dc_features.h"
#include "dml_inline_defs.h"
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
{
to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
index 4ced9a7..ea3f888 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -34,9 +34,10 @@
#include "hw_factory_dce120.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#define block HPD
#define reg_num 0
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
index af3843a..39ef5c7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
@@ -33,9 +33,10 @@
#include "include/gpio_types.h"
#include "../hw_translate.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
index 409763c..32aa47a 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -34,9 +34,10 @@
#include "hw_factory_dcn10.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#define block HPD
#define reg_num 0
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
index 64a6915..fecc8688 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
@@ -33,9 +33,10 @@
#include "include/gpio_types.h"
#include "../hw_translate.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
index fc7a7d4..bb526ad 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -55,6 +55,8 @@ enum {
#define FROM_ENGINE(ptr) \
container_of((ptr), struct aux_engine, base)
+#define DC_LOGGER \
+ engine->base.ctx->logger
enum i2caux_engine_type dal_aux_engine_get_engine_type(
const struct engine *engine)
@@ -126,20 +128,8 @@ static void process_read_reply(
ctx->status =
I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
ctx->operation_succeeded = false;
- } else if (ctx->returned_byte < ctx->current_read_length) {
- ctx->current_read_length -= ctx->returned_byte;
-
- ctx->offset += ctx->returned_byte;
-
- ++ctx->invalid_reply_retry_aux_on_ack;
-
- if (ctx->invalid_reply_retry_aux_on_ack >
- AUX_INVALID_REPLY_RETRY_COUNTER) {
- ctx->status =
- I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
- ctx->operation_succeeded = false;
- }
} else {
+ ctx->current_read_length = ctx->returned_byte;
ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
ctx->transaction_complete = true;
ctx->operation_succeeded = true;
@@ -284,6 +274,15 @@ static bool read_command(
msleep(engine->delay);
} while (ctx.operation_succeeded && !ctx.transaction_complete);
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ request->payload.length = ctx.reply.length;
return ctx.operation_succeeded;
}
@@ -484,6 +483,14 @@ static bool write_command(
msleep(engine->delay);
} while (ctx.operation_succeeded && !ctx.transaction_complete);
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
return ctx.operation_succeeded;
}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
index 81f9f3e..5f47f6c 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -441,10 +441,6 @@ static void construct(
static void destruct(
struct aux_engine_dce110 *engine)
{
- struct aux_engine_dce110 *aux110 = engine;
-/*temp w/a, to do*/
- REG_UPDATE(AUX_ARB_CONTROL, AUX_DMCU_DONE_USING_AUX_REG, 1);
- REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
dal_aux_engine_destruct(&engine->base);
}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index 56e25b3..abd0095 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -48,6 +48,8 @@
/*
* This unit
*/
+#define DC_LOGGER \
+ hw_engine->base.base.base.ctx->logger
enum dc_i2c_status {
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
@@ -525,9 +527,7 @@ static void construct(
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0) {
- dm_logger_write(
- hw_engine->base.base.base.ctx->logger, LOG_WARNING,
- "Invalid base timer divider\n",
+ DC_LOG_WARNING("Invalid base timer divider\n",
__func__);
xtal_ref_div = 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
index 668981a..0e7b182 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -36,9 +36,10 @@
#include "../dce110/aux_engine_dce110.h"
#include "../dce110/i2caux_dce110.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
index 13b807d..e44a890 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -36,9 +36,10 @@
#include "../dce110/i2c_hw_engine_dce110.h"
#include "../dce110/i2caux_dce110.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
/* begin *********************
* macros to expend register list macro defined in HW object header file */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index e1593ff..5cbf662 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -253,6 +253,7 @@ bool dal_i2caux_submit_aux_command(
break;
}
+ cmd->payloads->length = request.payload.length;
++index_of_payload;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b69f321..8c51ad7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -119,6 +119,11 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream);
+
+ enum dc_status (*remove_stream_from_ctx)(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
};
struct audio_support{
@@ -139,6 +144,7 @@ struct resource_pool {
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
+ struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;
struct pp_smu_display_requirement_rv pp_smu_req;
@@ -147,6 +153,7 @@ struct resource_pool {
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
unsigned int ref_clock_inKhz;
+ unsigned int timing_generator_count;
/*
* reserved clock source for DP
@@ -170,6 +177,15 @@ struct resource_pool {
const struct resource_caps *res_cap;
};
+struct dcn_fe_clocks {
+ int dppclk_khz;
+};
+
+struct dcn_fe_bandwidth {
+ struct dcn_fe_clocks calc;
+ struct dcn_fe_clocks cur;
+};
+
struct stream_resource {
struct output_pixel_processor *opp;
struct timing_generator *tg;
@@ -178,6 +194,8 @@ struct stream_resource {
struct pixel_clk_params pix_clk_params;
struct encoder_info_frame encoder_info_frame;
+
+ struct abm *abm;
};
struct plane_resource {
@@ -187,6 +205,9 @@ struct plane_resource {
struct input_pixel_processor *ipp;
struct transform *xfm;
struct dpp *dpp;
+ uint8_t mpcc_inst;
+
+ struct dcn_fe_bandwidth bw;
};
struct pipe_ctx {
@@ -211,7 +232,6 @@ struct pipe_ctx {
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
- struct dwbc *dwbc;
};
struct resource_context {
@@ -238,19 +258,9 @@ struct dce_bw_output {
int blackout_recovery_time_us;
};
-struct dcn_bw_clocks {
- int dispclk_khz;
- bool dppclk_div;
- int dcfclk_khz;
- int dcfclk_deep_sleep_khz;
- int fclk_khz;
- int dram_ccm_us;
- int min_active_dram_ccm_us;
-};
-
struct dcn_bw_output {
- struct dcn_bw_clocks cur_clk;
- struct dcn_bw_clocks calc_clk;
+ struct dc_clocks cur_clk;
+ struct dc_clocks calc_clk;
struct dcn_watermark_set watermarks;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 0bf73b7..090b7a8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,7 +102,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 616c73e..2f783c6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -53,7 +53,7 @@ bool perform_link_training_with_retries(
bool is_mst_supported(struct dc_link *link);
-void detect_dp_sink_caps(struct dc_link *link);
+bool detect_dp_sink_caps(struct dc_link *link);
void detect_edp_sink_caps(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index ae2399f..a9bfe9f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -130,6 +130,9 @@ enum bw_defines {
struct bw_calcs_dceip {
enum bw_calcs_version version;
+ uint32_t percent_of_ideal_port_bw_received_after_urgent_latency;
+ uint32_t max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation;
+ uint32_t max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation;
bool large_cursor;
uint32_t cursor_max_outstanding_group_num;
bool dmif_pipe_en_fbc_chunk_tracker;
@@ -230,6 +233,7 @@ struct bw_calcs_vbios {
struct bw_calcs_data {
/* data for all displays */
+ bool display_synchronization_enabled;
uint32_t number_of_displays;
enum bw_defines underlay_surface_type;
enum bw_defines panning_and_bezel_adjustment;
@@ -241,6 +245,7 @@ struct bw_calcs_data {
bool d1_display_write_back_dwb_enable;
enum bw_defines d1_underlay_mode;
+ bool increase_voltage_to_support_mclk_switch;
bool cpup_state_change_enable;
bool cpuc_state_change_enable;
bool nbp_state_change_enable;
@@ -449,6 +454,7 @@ struct bw_calcs_data {
struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
struct bw_fixed min_dram_speed_change_margin[3][8];
struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
+ struct bw_fixed dispclk_required_for_dram_speed_change_pipe[3][8];
struct bw_fixed blackout_duration_margin[3][8];
struct bw_fixed dispclk_required_for_blackout_duration[3][8];
struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 1e231f6..132d18d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -349,10 +349,10 @@ struct dcn_bw_internal_vars {
float dst_x_after_scaler;
float dst_y_after_scaler;
float time_calc;
- float v_update_offset[number_of_planes_minus_one + 1];
+ float v_update_offset[number_of_planes_minus_one + 1][2];
float total_repeater_delay;
- float v_update_width[number_of_planes_minus_one + 1];
- float v_ready_offset[number_of_planes_minus_one + 1];
+ float v_update_width[number_of_planes_minus_one + 1][2];
+ float v_ready_offset[number_of_planes_minus_one + 1][2];
float time_setup;
float extra_latency;
float maximum_vstartup;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index c93b9b9..a83a484 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,9 +27,19 @@
#include "dm_services_types.h"
+struct abm_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
+
+ /* registers setting needs to be saved and restored at InitBacklight */
+ struct abm_backlight_registers stored_backlight_registers;
};
struct abm_funcs {
@@ -40,9 +50,9 @@ struct abm_funcs {
bool (*set_backlight_level)(struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id);
+ unsigned int controller_id,
+ bool use_smooth_brightness);
unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
- bool (*is_dmcu_initialized)(struct abm *abm);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 0574c29..de60f94 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -27,12 +27,22 @@
#include "dm_services_types.h"
+enum dmcu_state {
+ DMCU_NOT_INITIALIZED = 0,
+ DMCU_RUNNING = 1
+};
+
struct dmcu {
struct dc_context *ctx;
const struct dmcu_funcs *funcs;
+
+ enum dmcu_state dmcu_state;
+ struct dmcu_version dmcu_version;
+ unsigned int cached_wait_loop_number;
};
struct dmcu_funcs {
+ bool (*dmcu_init)(struct dmcu *dmcu);
bool (*load_iram)(struct dmcu *dmcu,
unsigned int start_offset,
const char *src,
@@ -44,7 +54,9 @@ struct dmcu_funcs {
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
void (*set_psr_wait_loop)(struct dmcu *dmcu,
unsigned int wait_loop_number);
- void (*get_psr_wait_loop)(unsigned int *psr_wait_loop_number);
+ void (*get_psr_wait_loop)(struct dmcu *dmcu,
+ unsigned int *psr_wait_loop_number);
+ bool (*is_dmcu_initialized)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 9420dfb..9999560 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -35,6 +35,8 @@ struct dpp {
int inst;
struct dpp_caps *caps;
struct pwl_params regamma_params;
+ struct pwl_params degamma_params;
+
};
struct dpp_grph_csc_adjustment {
@@ -62,63 +64,67 @@ struct dpp_funcs {
struct dpp *dpp,
const struct dpp_grph_csc_adjustment *adjust);
- void (*opp_set_csc_default)(
+ void (*dpp_set_csc_default)(
struct dpp *dpp,
enum dc_color_space colorspace);
- void (*opp_set_csc_adjustment)(
+ void (*dpp_set_csc_adjustment)(
struct dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
- void (*opp_power_on_regamma_lut)(
+ void (*dpp_power_on_regamma_lut)(
struct dpp *dpp,
bool power_on);
- void (*opp_program_regamma_lut)(
+ void (*dpp_program_regamma_lut)(
struct dpp *dpp,
const struct pwl_result_data *rgb,
uint32_t num);
- void (*opp_configure_regamma_lut)(
+ void (*dpp_configure_regamma_lut)(
struct dpp *dpp,
bool is_ram_a);
- void (*opp_program_regamma_lutb_settings)(
+ void (*dpp_program_regamma_lutb_settings)(
struct dpp *dpp,
const struct pwl_params *params);
- void (*opp_program_regamma_luta_settings)(
+ void (*dpp_program_regamma_luta_settings)(
struct dpp *dpp,
const struct pwl_params *params);
- void (*opp_program_regamma_pwl)(
- struct dpp *dpp, const struct pwl_params *params);
+ void (*dpp_program_regamma_pwl)(
+ struct dpp *dpp,
+ const struct pwl_params *params,
+ enum opp_regamma mode);
- void (*opp_set_regamma_mode)(
- struct dpp *dpp_base,
- enum opp_regamma mode);
+ void (*dpp_program_bias_and_scale)(
+ struct dpp *dpp,
+ struct dc_bias_and_scale *params);
- void (*ipp_set_degamma)(
+ void (*dpp_set_degamma)(
struct dpp *dpp_base,
enum ipp_degamma_mode mode);
- void (*ipp_program_input_lut)(
+ void (*dpp_program_input_lut)(
struct dpp *dpp_base,
const struct dc_gamma *gamma);
- void (*ipp_program_degamma_pwl)(struct dpp *dpp_base,
+ void (*dpp_program_degamma_pwl)(struct dpp *dpp_base,
const struct pwl_params *params);
- void (*ipp_setup)(
+ void (*dpp_setup)(
struct dpp *dpp_base,
- enum surface_pixel_format input_format,
- enum expansion_mode mode);
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space);
- void (*ipp_full_bypass)(struct dpp *dpp_base);
+ void (*dpp_full_bypass)(struct dpp *dpp_base);
void (*set_cursor_attributes)(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr);
+ enum dc_cursor_color_format color_format);
void (*set_cursor_position)(
struct dpp *dpp_base,
@@ -126,6 +132,14 @@ struct dpp_funcs {
const struct dc_cursor_mi_param *param,
uint32_t width
);
+ void (*dpp_set_hdr_multiplier)(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
+ void (*dpp_dppclk_control)(
+ struct dpp *dpp_base,
+ bool dppclk_div,
+ bool enable);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 0d186be..9ced254 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -28,15 +28,32 @@
#include "mem_input.h"
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
struct hubp {
struct hubp_funcs *funcs;
struct dc_context *ctx;
struct dc_plane_address request_address;
struct dc_plane_address current_address;
int inst;
+
+ /* run time states */
int opp_id;
int mpcc_id;
struct dc_cursor_attributes curs_attr;
+ bool power_gated;
};
@@ -100,6 +117,11 @@ struct hubp_funcs {
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param);
+ void (*hubp_disconnect)(struct hubp *hubp);
+
+ void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
+ void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 9602f26..b221581 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -73,7 +73,7 @@ struct pwl_result_data {
struct pwl_params {
struct gamma_curve arr_curve_points[34];
- struct curve_points arr_points[3];
+ struct curve_points arr_points[2];
struct pwl_result_data rgb_resulted[256 + 3];
uint32_t hw_points_num;
};
@@ -126,16 +126,69 @@ struct default_adjustment {
bool force_hw_default;
};
+
struct out_csc_color_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
};
+
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
OPP_REGAMMA_SRGB,
- OPP_REGAMMA_3_6,
+ OPP_REGAMMA_XVYCC,
OPP_REGAMMA_USER
};
+struct csc_transform {
+ uint16_t matrix[12];
+ bool enable_adjustment;
+};
+
+struct dc_bias_and_scale {
+ uint16_t scale_red;
+ uint16_t bias_red;
+ uint16_t scale_green;
+ uint16_t bias_green;
+ uint16_t scale_blue;
+ uint16_t bias_blue;
+};
+
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
index f11aa48..2109eac 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -85,8 +85,10 @@ struct ipp_funcs {
/* setup ipp to expand/convert input to pixel processor internal format */
void (*ipp_setup)(
struct input_pixel_processor *ipp,
- enum surface_pixel_format input_format,
- enum expansion_mode mode);
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space);
/* DCE function to setup IPP. TODO: see if we can consolidate to setup */
void (*ipp_program_prescale)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 498b7f0..54d8a13 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
void (*enable_tmds_output)(struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock);
void (*enable_dp_output)(struct link_encoder *enc,
const struct dc_link_settings *link_settings,
@@ -133,7 +132,7 @@ struct link_encoder_funcs {
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
void (*disable_output)(struct link_encoder *link_enc,
- enum signal_type signal, struct dc_link *link);
+ enum signal_type signal);
void (*dp_set_lane_settings)(struct link_encoder *enc,
const struct link_training_settings *link_settings);
void (*dp_set_phy_pattern)(struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index d4188b2..23a8d5e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -26,35 +26,162 @@
#define __DC_MPCC_H__
#include "dc_hw_types.h"
-#include "opp.h"
+#include "hw_shared.h"
-struct mpcc_cfg {
- int dpp_id;
- int opp_id;
- struct mpc_tree_cfg *tree_cfg;
- unsigned int z_index;
+#define MAX_MPCC 6
+#define MAX_OPP 6
- struct tg_color black_color;
- bool per_pixel_alpha;
- bool pre_multiplied_alpha;
+enum mpc_output_csc_mode {
+ MPC_OUTPUT_CSC_DISABLE = 0,
+ MPC_OUTPUT_CSC_COEF_A,
+ MPC_OUTPUT_CSC_COEF_B
+};
+
+
+enum mpcc_blend_mode {
+ MPCC_BLEND_MODE_BYPASS,
+ MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH,
+ MPCC_BLEND_MODE_TOP_LAYER_ONLY,
+ MPCC_BLEND_MODE_TOP_BOT_BLENDING
+};
+
+enum mpcc_alpha_blend_mode {
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
+};
+
+/*
+ * MPCC blending configuration
+ */
+struct mpcc_blnd_cfg {
+ struct tg_color black_color; /* background color */
+ enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
+ bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ int global_gain;
+ int global_alpha;
+ bool overlap_only;
+
+};
+
+struct mpcc_sm_cfg {
+ bool enable;
+ /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
+ int sm_mode;
+ /* 0- disable frame alternate, 1- enable frame alternate */
+ bool frame_alt;
+ /* 0- disable field alternate, 1- enable field alternate */
+ bool field_alt;
+ /* 0-no force,2-force frame polarity from top,3-force frame polarity from bottom */
+ int force_next_frame_porlarity;
+ /* 0-no force,2-force field polarity from top,3-force field polarity from bottom */
+ int force_next_field_polarity;
+};
+
+/*
+ * MPCC connection and blending configuration for a single MPCC instance.
+ * This struct is used as a node in an MPC tree.
+ */
+struct mpcc {
+ int mpcc_id; /* MPCC physical instance */
+ int dpp_id; /* DPP input to this MPCC */
+ struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
+ struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
+ struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
+};
+
+/*
+ * MPC tree represents all MPCC connections for a pipe.
+ */
+struct mpc_tree {
+ int opp_id; /* The OPP instance that owns this MPC tree */
+ struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
};
struct mpc {
const struct mpc_funcs *funcs;
struct dc_context *ctx;
+
+ struct mpcc mpcc_array[MAX_MPCC];
};
struct mpc_funcs {
- int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ /*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+ struct mpcc* (*insert_plane)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+ /*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+ void (*remove_mpcc)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
+
+ /*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+ void (*mpc_init)(struct mpc *mpc);
- void (*remove)(struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int mpcc_inst);
+ /*
+ * Update the blending configuration for a specified MPCC.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in] blnd_cfg - MPCC blending configuration.
+ * [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return: void
+ */
+ void (*update_blending)(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id);
+
+ struct mpcc* (*get_mpcc_for_dpp)(
+ struct mpc_tree *tree,
+ int dpp_id);
void (*wait_for_idle)(struct mpc *mpc, int id);
- void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ void (*assert_mpcc_idle_before_connect)(struct mpc *mpc, int mpcc_id);
+
+ void (*init_mpcc_list_from_hw)(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 75adb8f..d974d9e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -29,6 +29,7 @@
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "transform.h"
+#include "mpc.h"
struct fixed31_32;
@@ -204,7 +205,7 @@ struct output_pixel_processor {
struct dc_context *ctx;
uint32_t inst;
struct pwl_params regamma_params;
- struct mpc_tree_cfg mpc_tree;
+ struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
};
@@ -248,6 +249,21 @@ enum ovl_csc_adjust_item {
OVERLAY_COLOR_TEMPERATURE
};
+enum oppbuf_display_segmentation {
+ OPPBUF_DISPLAY_SEGMENTATION_1_SEGMENT = 0,
+ OPPBUF_DISPLAY_SEGMENTATION_2_SEGMENT = 1,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT = 2,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_LEFT = 3,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_RIGHT = 4
+};
+
+struct oppbuf_params {
+ uint32_t active_width;
+ enum oppbuf_display_segmentation mso_segmentation;
+ uint32_t mso_overlap_pixel_num;
+ uint32_t pixel_repetition;
+};
+
struct opp_funcs {
@@ -276,14 +292,15 @@ struct opp_funcs {
void (*opp_destroy)(struct output_pixel_processor **opp);
- void (*opp_set_stereo_polarity)(
- struct output_pixel_processor *opp,
- bool enable,
- bool rightEyePolarity);
+ void (*opp_program_stereo)(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing);
- void (*opp_set_test_pattern)(
+ void (*opp_pipe_clock_control)(
struct output_pixel_processor *opp,
bool enable);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index c6ab38c..3217b5b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -26,6 +26,8 @@
#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
#define __DAL_TIMING_GENERATOR_TYPES_H__
+#include "hw_shared.h"
+
struct dc_bios;
/* Contains CRTC vertical/horizontal pixel counters */
@@ -40,6 +42,19 @@ struct dcp_gsl_params {
int gsl_master;
};
+struct gsl_params {
+ int gsl0_en;
+ int gsl1_en;
+ int gsl2_en;
+ int gsl_master_en;
+ int gsl_master_mode;
+ int master_update_lock_gsl_en;
+ int gsl_window_start_x;
+ int gsl_window_end_x;
+ int gsl_window_start_y;
+ int gsl_window_end_y;
+};
+
/* define the structure of Dynamic Refresh Mode */
struct drr_params {
uint32_t vertical_total_min;
@@ -50,43 +65,6 @@ struct drr_params {
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
-enum test_pattern_dyn_range {
- TEST_PATTERN_DYN_RANGE_VESA = 0,
- TEST_PATTERN_DYN_RANGE_CEA
-};
-
-enum test_pattern_mode {
- TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
- TEST_PATTERN_MODE_VERTICALBARS,
- TEST_PATTERN_MODE_HORIZONTALBARS,
- TEST_PATTERN_MODE_SINGLERAMP_RGB,
- TEST_PATTERN_MODE_DUALRAMP_RGB
-};
-
-enum test_pattern_color_format {
- TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
- TEST_PATTERN_COLOR_FORMAT_BPC_8,
- TEST_PATTERN_COLOR_FORMAT_BPC_10,
- TEST_PATTERN_COLOR_FORMAT_BPC_12
-};
-
-enum controller_dp_test_pattern {
- CONTROLLER_DP_TEST_PATTERN_D102 = 0,
- CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
- CONTROLLER_DP_TEST_PATTERN_PRBS7,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
- CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
- CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
-};
-
enum crtc_state {
CRTC_STATE_VBLANK = 0,
CRTC_STATE_VACTIVE
@@ -100,6 +78,12 @@ struct _dlg_otg_param {
enum signal_type signal;
};
+struct vupdate_keepout_params {
+ int start_offset;
+ int end_offset;
+ int enable;
+};
+
struct crtc_stereo_flags {
uint8_t PROGRAM_STEREO : 1;
uint8_t PROGRAM_POLARITY : 1;
@@ -108,6 +92,36 @@ struct crtc_stereo_flags {
uint8_t DISABLE_STEREO_DP_SYNC : 1;
};
+enum crc_selection {
+ /* Order must match values expected by hardware */
+ UNION_WINDOW_A_B = 0,
+ UNION_WINDOW_A_NOT_B,
+ UNION_WINDOW_NOT_A_B,
+ UNION_WINDOW_NOT_A_NOT_B,
+ INTERSECT_WINDOW_A_B,
+ INTERSECT_WINDOW_A_NOT_B,
+ INTERSECT_WINDOW_NOT_A_B,
+ INTERSECT_WINDOW_NOT_A_NOT_B,
+};
+
+struct crc_params {
+ /* Regions used to calculate CRC*/
+ uint16_t windowa_x_start;
+ uint16_t windowa_x_end;
+ uint16_t windowa_y_start;
+ uint16_t windowa_y_end;
+
+ uint16_t windowb_x_start;
+ uint16_t windowb_x_end;
+ uint16_t windowb_y_start;
+ uint16_t windowb_y_end;
+
+ enum crc_selection selection;
+
+ bool continuous_mode;
+ bool enable;
+};
+
struct timing_generator {
const struct timing_generator_funcs *funcs;
struct dc_bios *bp;
@@ -158,7 +172,11 @@ struct timing_generator_funcs {
const struct dcp_gsl_params *gsl_params);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
- void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst);
+ void (*enable_reset_trigger)(struct timing_generator *tg,
+ int source_tg_inst);
+ void (*enable_crtc_reset)(struct timing_generator *tg,
+ int source_tg_inst,
+ struct crtc_trigger_info *crtc_tp);
void (*disable_reset_trigger)(struct timing_generator *tg);
void (*tear_down_global_swap_lock)(struct timing_generator *tg);
void (*enable_advanced_request)(struct timing_generator *tg,
@@ -178,6 +196,28 @@ struct timing_generator_funcs {
void (*program_stereo)(struct timing_generator *tg,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
bool (*is_stereo_left_eye)(struct timing_generator *tg);
+
+ void (*set_blank_data_double_buffer)(struct timing_generator *tg, bool enable);
+
+ void (*tg_init)(struct timing_generator *tg);
+ bool (*is_tg_enabled)(struct timing_generator *tg);
+ bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
+ void (*clear_optc_underflow)(struct timing_generator *tg);
+
+ /**
+ * Configure CRCs for the given timing generator. Return false if TG is
+ * not on.
+ */
+ bool (*configure_crc)(struct timing_generator *tg,
+ const struct crc_params *params);
+
+ /**
+ * Get CRCs for the given timing generator. Return false if CRCs are
+ * not enabled (via configure_crc).
+ */
+ bool (*get_crc)(struct timing_generator *tg,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index ea88997..c5b3623 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -30,7 +30,7 @@
#include "dc_hw_types.h"
#include "fixed31_32.h"
-#define CSC_TEMPERATURE_MATRIX_SIZE 9
+#define CSC_TEMPERATURE_MATRIX_SIZE 12
struct bit_depth_reduction_params;
@@ -250,8 +250,10 @@ struct transform_funcs {
void (*ipp_setup)(
struct transform *xfm_base,
- enum surface_pixel_format input_format,
- enum expansion_mode mode);
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space);
void (*ipp_full_bypass)(struct transform *xfm_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 8734689..e764cba 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -28,6 +28,7 @@
#include "dc_types.h"
#include "clock_source.h"
#include "inc/hw/timing_generator.h"
+#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "core_status.h"
@@ -39,6 +40,12 @@ enum pipe_gating_control {
struct dce_hwseq_wa {
bool blnd_crtc_trigger;
+ bool DEGVIDCN10_253;
+ bool false_optc_underflow;
+};
+
+struct hwseq_wa_state {
+ bool DEGVIDCN10_253_applied;
};
struct dce_hwseq {
@@ -47,6 +54,7 @@ struct dce_hwseq {
const struct dce_hwseq_shift *shifts;
const struct dce_hwseq_mask *masks;
struct dce_hwseq_wa wa;
+ struct hwseq_wa_state wa_state;
};
struct pipe_ctx;
@@ -106,7 +114,7 @@ struct hw_sequencer_funcs {
void (*power_down)(struct dc *dc);
- void (*enable_accelerated_mode)(struct dc *dc);
+ void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context);
void (*enable_timing_synchronization)(
struct dc *dc,
@@ -114,6 +122,11 @@ struct hw_sequencer_funcs {
int group_size,
struct pipe_ctx *grouped_pipes[]);
+ void (*enable_per_frame_crtc_position_reset)(
+ struct dc *dc,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+
void (*enable_display_pipe_clock_gating)(
struct dc_context *ctx,
bool clock_gating);
@@ -124,11 +137,7 @@ struct hw_sequencer_funcs {
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
- void (*power_down_front_end)(struct dc *dc, int fe_idx);
-
- void (*power_on_front_end)(struct dc *dc,
- struct pipe_ctx *pipe,
- struct dc_state *context);
+ void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
@@ -140,6 +149,7 @@ struct hw_sequencer_funcs {
void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
+ void (*blank_stream)(struct pipe_ctx *pipe_ctx);
void (*pipe_control_lock)(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -178,12 +188,19 @@ struct hw_sequencer_funcs {
void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
void (*optimize_shared_resources)(struct dc *dc);
+ void (*pplib_apply_display_requirements)(
+ struct dc *dc,
+ struct dc_state *context);
void (*edp_power_control)(
- struct link_encoder *enc,
+ struct dc_link *link,
bool enable);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
+ void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+
+ void (*set_cursor_position)(struct pipe_ctx *pipe);
+ void (*set_cursor_attribute)(struct pipe_ctx *pipe);
};
void color_space_to_black_color(
@@ -194,4 +211,8 @@ void color_space_to_black_color(
bool hwss_wait_for_blank_complete(
struct timing_generator *tg);
+const uint16_t *find_color_matrix(
+ enum dc_color_space color_space,
+ uint32_t *array_size);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index f2b8c9a..30be7bb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -51,6 +51,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+bool edp_receiver_ready_T9(struct dc_link *link);
+bool edp_receiver_ready_T7(struct dc_link *link);
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index f7e40b2..afe0876 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -36,27 +36,25 @@
#include "dc.h"
#include "core_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
+#define DC_LOGGER \
+ irq_service->ctx->logger
+
+static bool hpd_ack(struct irq_service *irq_service,
+ const struct irq_source_info *info)
{
uint32_t addr = info->status_reg;
uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
+ uint32_t current_status = get_reg_field_value(value,
+ DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
dal_irq_service_ack_generic(irq_service, info);
value = dm_read_reg(irq_service->ctx, info->enable_reg);
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
+ set_reg_field_value(value, current_status ? 0 : 1,
+ DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
dm_write_reg(irq_service->ctx, info->enable_reg, value);
@@ -176,48 +174,41 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
#define dc_underflow_int_entry(reg_num) \
[DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
-bool dal_irq_service_dummy_set(
- struct irq_service *irq_service,
- const struct irq_source_info *info,
- bool enable)
+bool dal_irq_service_dummy_set(struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
{
- dm_logger_write(
- irq_service->ctx->logger, LOG_ERROR,
- "%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source\n",
+ __func__);
return false;
}
-bool dal_irq_service_dummy_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
+bool dal_irq_service_dummy_ack(struct irq_service *irq_service,
+ const struct irq_source_info *info)
{
- dm_logger_write(
- irq_service->ctx->logger, LOG_ERROR,
- "%s: called for non-implemented irq source\n",
- __func__);
+ DC_LOG_ERROR("%s: called for non-implemented irq source\n",
+ __func__);
return false;
}
-bool dce110_vblank_set(
- struct irq_service *irq_service,
- const struct irq_source_info *info,
- bool enable)
+bool dce110_vblank_set(struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
{
struct dc_context *dc_ctx = irq_service->ctx;
struct dc *core_dc = irq_service->ctx->dc;
- enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(
- irq_service->ctx->dc,
- info->src_id,
- info->ext_id);
+ enum dc_irq_source dal_irq_src =
+ dc_interrupt_to_irq_source(irq_service->ctx->dc,
+ info->src_id,
+ info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
struct timing_generator *tg =
core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
- if (!tg->funcs->arm_vert_intr(tg, 2)) {
+ if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
DC_ERROR("Failed to get VBLANK!\n");
return false;
}
@@ -225,7 +216,6 @@ bool dce110_vblank_set(
dal_irq_service_set_generic(irq_service, info, enable);
return true;
-
}
static const struct irq_source_info_funcs dummy_irq_info_funcs = {
@@ -406,9 +396,8 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = {
.to_dal_irq_source = to_dal_irq_source_dce110
};
-static void construct(
- struct irq_service *irq_service,
- struct irq_service_init_data *init_data)
+static void construct(struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
{
dal_irq_service_construct(irq_service, init_data);
@@ -416,8 +405,8 @@ static void construct(
irq_service->funcs = &irq_service_funcs_dce110;
}
-struct irq_service *dal_irq_service_dce110_create(
- struct irq_service_init_data *init_data)
+struct irq_service *
+dal_irq_service_dce110_create(struct irq_service_init_data *init_data)
{
struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 2ad56b1..1ea7256 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -30,9 +30,10 @@
#include "irq_service_dce120.h"
#include "../dce110/irq_service_dce110.h"
-#include "vega10/DC/dce_12_0_offset.h"
-#include "vega10/DC/dce_12_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dce/dce_12_0_offset.h"
+#include "dce/dce_12_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#include "ivsrcid/ivsrcid_vislands30.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 74ad247..e04ae49 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -29,9 +29,10 @@
#include "../dce110/irq_service_dce110.h"
-#include "raven1/DCN/dcn_1_0_offset.h"
-#include "raven1/DCN/dcn_1_0_sh_mask.h"
-#include "vega10/soc15ip.h"
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
#include "irq_service_dcn10.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index b106513..dcdfa0f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -47,6 +47,8 @@
#define CTX \
irq_service->ctx
+#define DC_LOGGER \
+ irq_service->ctx->logger
void dal_irq_service_construct(
struct irq_service *irq_service,
@@ -104,9 +106,7 @@ bool dal_irq_service_set(
find_irq_source_info(irq_service, source);
if (!info) {
- dm_logger_write(
- irq_service->ctx->logger, LOG_ERROR,
- "%s: cannot find irq info table entry for %d\n",
+ DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
__func__,
source);
return false;
@@ -142,9 +142,7 @@ bool dal_irq_service_ack(
find_irq_source_info(irq_service, source);
if (!info) {
- dm_logger_write(
- irq_service->ctx->logger, LOG_ERROR,
- "%s: cannot find irq info table entry for %d\n",
+ DC_LOG_ERROR("%s: cannot find irq info table entry for %d\n",
__func__,
source);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a87c032..a407892 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -26,15 +26,13 @@
#ifndef _OS_TYPES_H_
#define _OS_TYPES_H_
-#if defined __KERNEL__
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <drm/drmP.h>
#include <linux/kref.h>
-#include "cgs_linux.h"
+#include "cgs_common.h"
#if defined(__BIG_ENDIAN) && !defined(BIGENDIAN_CPU)
#define BIGENDIAN_CPU
@@ -46,14 +44,12 @@
#undef WRITE
#undef FRAME_SIZE
-#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+#define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
-
-#define dm_vlog(fmt, args) vprintk(fmt, args)
-
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
#endif
/*
@@ -89,8 +85,4 @@
BREAK_TO_DEBUGGER(); \
} while (0)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include <asm/fpu/api.h>
-#endif
-
#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 88c2bde..1c079ba 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock) {}
static void virtual_link_encoder_enable_dp_output(
@@ -58,8 +57,7 @@ static void virtual_link_encoder_enable_dp_mst_output(
static void virtual_link_encoder_disable_output(
struct link_encoder *link_enc,
- enum signal_type signal,
- struct dc_link *link) {}
+ enum signal_type signal) {}
static void virtual_link_encoder_dp_set_lane_settings(
struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 7abe663..9831cb5 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -109,6 +109,14 @@
#define ASIC_REV_IS_STONEY(rev) \
((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
+/* DCE12 */
+
+#define AI_GREENLAND_P_A0 1
+#define AI_GREENLAND_P_A1 2
+
+#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN)
+#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN)
+
/* DCN1_0 */
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
#define RAVEN_A0 0x01
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 0ff2a89..019e7a0 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -27,12 +27,8 @@
#define DP_BRANCH_DEVICE_ID_1 0x0010FA
#define DP_BRANCH_DEVICE_ID_2 0x0022B9
-#define DP_SINK_DEVICE_ID_1 0x4CE000
#define DP_BRANCH_DEVICE_ID_3 0x00001A
#define DP_BRANCH_DEVICE_ID_4 0x0080e1
-#define DP_BRANCH_DEVICE_ID_5 0x006037
-#define DP_SINK_DEVICE_ID_2 0x001CF8
-
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
@@ -115,40 +111,11 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
-/*DP to VGA converter*/
-static const uint8_t DP_VGA_CONVERTER_ID_1[] = "mVGAa";
-/*DP to Dual link DVI converter*/
-static const uint8_t DP_DVI_CONVERTER_ID_1[] = "m2DVIa";
/*Travis*/
static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
/*Nutmeg*/
static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
-/*DP to VGA converter*/
-static const uint8_t DP_VGA_CONVERTER_ID_4[] = "DpVga";
/*DP to Dual link DVI converter*/
static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
-/*DP to Dual link DVI converter 2*/
-static const uint8_t DP_DVI_CONVERTER_ID_42[] = "v2DVIa";
-
-static const uint8_t DP_SINK_DEV_STRING_ID2_REV0[] = "\0\0\0\0\0\0";
-
-/* Identifies second generation PSR TCON from Parade: Device ID string:
- * yy-xx-**-**-**-**
- */
-/* xx - Hw ID high byte */
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_HIGH_BYTE =
- 0x06;
-
-/* yy - HW ID low byte, the same silicon has several package/feature flavors */
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE1 =
- 0x61;
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE2 =
- 0x62;
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE3 =
- 0x63;
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE4 =
- 0x72;
-static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE5 =
- 0x73;
#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 3248f69..0de2586 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -463,4 +463,14 @@ uint32_t dal_fixed31_32_u2d19(
uint32_t dal_fixed31_32_u0d19(
struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg);
+
+int32_t dal_fixed31_32_s4d19(
+ struct fixed31_32 arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 7a9b43f..36bbad5 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -419,11 +419,6 @@ struct bios_event_info {
bool backlight_changed;
};
-enum {
- HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
- TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
-};
-
/*
* DFS-bypass flag
*/
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 5eb2b4d..c419743 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -233,10 +233,6 @@ static inline struct graphics_object_id dal_graphics_object_id_init(
return result;
}
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2);
-
/* Based on internal data members memory layout */
static inline uint32_t dal_graphics_object_id_to_uint(
struct graphics_object_id id)
@@ -248,7 +244,7 @@ static inline enum controller_id dal_graphics_object_id_get_controller_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_CONTROLLER)
- return id.id;
+ return (enum controller_id) id.id;
return CONTROLLER_ID_UNDEFINED;
}
@@ -256,7 +252,7 @@ static inline enum clock_source_id dal_graphics_object_id_get_clock_source_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
- return id.id;
+ return (enum clock_source_id) id.id;
return CLOCK_SOURCE_ID_UNDEFINED;
}
@@ -264,7 +260,7 @@ static inline enum encoder_id dal_graphics_object_id_get_encoder_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_ENCODER)
- return id.id;
+ return (enum encoder_id) id.id;
return ENCODER_ID_UNKNOWN;
}
@@ -272,7 +268,7 @@ static inline enum connector_id dal_graphics_object_id_get_connector_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_CONNECTOR)
- return id.id;
+ return (enum connector_id) id.id;
return CONNECTOR_ID_UNKNOWN;
}
@@ -280,7 +276,7 @@ static inline enum audio_id dal_graphics_object_id_get_audio_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_AUDIO)
- return id.id;
+ return (enum audio_id) id.id;
return AUDIO_ID_UNKNOWN;
}
@@ -288,7 +284,7 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
struct graphics_object_id id)
{
if (id.type == OBJECT_TYPE_ENGINE)
- return id.id;
+ return (enum engine_id) id.id;
return ENGINE_ID_UNKNOWN;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index adea1a5..80f0d93 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -58,11 +58,14 @@ enum {
enum link_training_result {
LINK_TRAINING_SUCCESS,
- LINK_TRAINING_CR_FAIL,
+ LINK_TRAINING_CR_FAIL_LANE0,
+ LINK_TRAINING_CR_FAIL_LANE1,
+ LINK_TRAINING_CR_FAIL_LANE23,
/* CR DONE bit is cleared during EQ step */
LINK_TRAINING_EQ_FAIL_CR,
/* other failure during EQ step */
LINK_TRAINING_EQ_FAIL_EQ,
+ LINK_TRAINING_LQA_FAIL,
};
struct link_training_settings {
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 8e1fe70..28dee96 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -57,6 +57,11 @@ void dm_logger_append(
const char *msg,
...);
+void dm_logger_append_va(
+ struct log_entry *entry,
+ const char *msg,
+ va_list args);
+
void dm_logger_open(
struct dal_logger *logger,
struct log_entry *entry,
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index e2ff8cd..427796b 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -29,6 +29,39 @@
#include "os_types.h"
#define MAX_NAME_LEN 32
+#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__)
+#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__)
+#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__)
+#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__)
+#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__)
+#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__)
+#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__)
+#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__)
+#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__)
+#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__)
+#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__)
+#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__)
+#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__)
+#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__)
+#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__)
+#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__)
+#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__)
+#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__)
+#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__)
+#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__)
+#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__)
+#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__)
+#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__)
+#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__)
+#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__)
+#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__)
+#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__)
+#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__)
+#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__)
+#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__)
+#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__)
+#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__)
+
struct dal_logger;
@@ -65,6 +98,7 @@ enum dc_log_type {
LOG_EVENT_UNDERFLOW,
LOG_IF_TRACE,
LOG_PERF_TRACE,
+ LOG_PROFILING,
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index b5ebde6..199c5db 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -26,6 +26,11 @@
#ifndef __DC_SIGNAL_TYPES_H__
#define __DC_SIGNAL_TYPES_H__
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+
enum signal_type {
SIGNAL_TYPE_NONE = 0L, /* no signal */
SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
diff --git a/drivers/gpu/drm/amd/display/modules/color/Makefile b/drivers/gpu/drm/amd/display/modules/color/Makefile
new file mode 100644
index 0000000..65c33a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2018 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the color sub-module of DAL.
+#
+
+MOD_COLOR = color_gamma.o
+
+AMD_DAL_MOD_COLOR = $(addprefix $(AMDDALPATH)/modules/color/,$(MOD_COLOR))
+#$(info ************ DAL COLOR MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_COLOR)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
new file mode 100644
index 0000000..e7e374f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -0,0 +1,1396 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "opp.h"
+#include "color_gamma.h"
+
+
+#define NUM_PTS_IN_REGION 16
+#define NUM_REGIONS 32
+#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
+
+static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
+
+static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
+static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
+
+static bool pq_initialized; /* = false; */
+static bool de_pq_initialized; /* = false; */
+
+/* one-time setup of X points */
+void setup_x_points_distribution(void)
+{
+ struct fixed31_32 region_size = dal_fixed31_32_from_int(128);
+ int32_t segment;
+ uint32_t seg_offset;
+ uint32_t index;
+ struct fixed31_32 increment;
+
+ coordinates_x[MAX_HW_POINTS].x = region_size;
+ coordinates_x[MAX_HW_POINTS + 1].x = region_size;
+
+ for (segment = 6; segment > (6 - NUM_REGIONS); segment--) {
+ region_size = dal_fixed31_32_div_int(region_size, 2);
+ increment = dal_fixed31_32_div_int(region_size,
+ NUM_PTS_IN_REGION);
+ seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION;
+ coordinates_x[seg_offset].x = region_size;
+
+ for (index = seg_offset + 1;
+ index < seg_offset + NUM_PTS_IN_REGION;
+ index++) {
+ coordinates_x[index].x = dal_fixed31_32_add
+ (coordinates_x[index-1].x, increment);
+ }
+ }
+}
+
+static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
+{
+ /* consts for PQ gamma formula. */
+ const struct fixed31_32 m1 =
+ dal_fixed31_32_from_fraction(159301758, 1000000000);
+ const struct fixed31_32 m2 =
+ dal_fixed31_32_from_fraction(7884375, 100000);
+ const struct fixed31_32 c1 =
+ dal_fixed31_32_from_fraction(8359375, 10000000);
+ const struct fixed31_32 c2 =
+ dal_fixed31_32_from_fraction(188515625, 10000000);
+ const struct fixed31_32 c3 =
+ dal_fixed31_32_from_fraction(186875, 10000);
+
+ struct fixed31_32 l_pow_m1;
+ struct fixed31_32 base;
+
+ if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
+ in_x = dal_fixed31_32_zero;
+
+ l_pow_m1 = dal_fixed31_32_pow(in_x, m1);
+ base = dal_fixed31_32_div(
+ dal_fixed31_32_add(c1,
+ (dal_fixed31_32_mul(c2, l_pow_m1))),
+ dal_fixed31_32_add(dal_fixed31_32_one,
+ (dal_fixed31_32_mul(c3, l_pow_m1))));
+ *out_y = dal_fixed31_32_pow(base, m2);
+}
+
+static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
+{
+ /* consts for dePQ gamma formula. */
+ const struct fixed31_32 m1 =
+ dal_fixed31_32_from_fraction(159301758, 1000000000);
+ const struct fixed31_32 m2 =
+ dal_fixed31_32_from_fraction(7884375, 100000);
+ const struct fixed31_32 c1 =
+ dal_fixed31_32_from_fraction(8359375, 10000000);
+ const struct fixed31_32 c2 =
+ dal_fixed31_32_from_fraction(188515625, 10000000);
+ const struct fixed31_32 c3 =
+ dal_fixed31_32_from_fraction(186875, 10000);
+
+ struct fixed31_32 l_pow_m1;
+ struct fixed31_32 base, div;
+
+
+ if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero))
+ in_x = dal_fixed31_32_zero;
+
+ l_pow_m1 = dal_fixed31_32_pow(in_x,
+ dal_fixed31_32_div(dal_fixed31_32_one, m2));
+ base = dal_fixed31_32_sub(l_pow_m1, c1);
+
+ if (dal_fixed31_32_lt(base, dal_fixed31_32_zero))
+ base = dal_fixed31_32_zero;
+
+ div = dal_fixed31_32_sub(c2, dal_fixed31_32_mul(c3, l_pow_m1));
+
+ *out_y = dal_fixed31_32_pow(dal_fixed31_32_div(base, div),
+ dal_fixed31_32_div(dal_fixed31_32_one, m1));
+
+}
+/* one-time pre-compute PQ values - only for sdr_white_level 80 */
+void precompute_pq(void)
+{
+ int i;
+ struct fixed31_32 x;
+ const struct hw_x_point *coord_x = coordinates_x + 32;
+ struct fixed31_32 scaling_factor =
+ dal_fixed31_32_from_fraction(80, 10000);
+
+ /* pow function has problems with arguments too small */
+ for (i = 0; i < 32; i++)
+ pq_table[i] = dal_fixed31_32_zero;
+
+ for (i = 32; i <= MAX_HW_POINTS; i++) {
+ x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
+ compute_pq(x, &pq_table[i]);
+ ++coord_x;
+ }
+}
+
+/* one-time pre-compute dePQ values - only for max pixel value 125 FP16 */
+void precompute_de_pq(void)
+{
+ int i;
+ struct fixed31_32 y;
+ uint32_t begin_index, end_index;
+
+ struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
+
+ /* X points is 2^-25 to 2^7
+ * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
+ */
+ begin_index = 13 * NUM_PTS_IN_REGION;
+ end_index = begin_index + 12 * NUM_PTS_IN_REGION;
+
+ for (i = 0; i <= begin_index; i++)
+ de_pq_table[i] = dal_fixed31_32_zero;
+
+ for (; i <= end_index; i++) {
+ compute_de_pq(coordinates_x[i].x, &y);
+ de_pq_table[i] = dal_fixed31_32_mul(y, scaling_factor);
+ }
+
+ for (; i <= MAX_HW_POINTS; i++)
+ de_pq_table[i] = de_pq_table[i-1];
+}
+struct dividers {
+ struct fixed31_32 divider1;
+ struct fixed31_32 divider2;
+ struct fixed31_32 divider3;
+};
+
+static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
+{
+ static const int32_t numerator01[] = { 31308, 180000};
+ static const int32_t numerator02[] = { 12920, 4500};
+ static const int32_t numerator03[] = { 55, 99};
+ static const int32_t numerator04[] = { 55, 99};
+ static const int32_t numerator05[] = { 2400, 2200};
+
+ uint32_t i = 0;
+ uint32_t index = is_2_4 == true ? 0:1;
+
+ do {
+ coefficients->a0[i] = dal_fixed31_32_from_fraction(
+ numerator01[index], 10000000);
+ coefficients->a1[i] = dal_fixed31_32_from_fraction(
+ numerator02[index], 1000);
+ coefficients->a2[i] = dal_fixed31_32_from_fraction(
+ numerator03[index], 1000);
+ coefficients->a3[i] = dal_fixed31_32_from_fraction(
+ numerator04[index], 1000);
+ coefficients->user_gamma[i] = dal_fixed31_32_from_fraction(
+ numerator05[index], 1000);
+
+ ++i;
+ } while (i != ARRAY_SIZE(coefficients->a0));
+}
+
+static struct fixed31_32 translate_from_linear_space(
+ struct fixed31_32 arg,
+ struct fixed31_32 a0,
+ struct fixed31_32 a1,
+ struct fixed31_32 a2,
+ struct fixed31_32 a3,
+ struct fixed31_32 gamma)
+{
+ const struct fixed31_32 one = dal_fixed31_32_from_int(1);
+
+ if (dal_fixed31_32_lt(one, arg))
+ return one;
+
+ if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
+ return dal_fixed31_32_sub(
+ a2,
+ dal_fixed31_32_mul(
+ dal_fixed31_32_add(
+ one,
+ a3),
+ dal_fixed31_32_pow(
+ dal_fixed31_32_neg(arg),
+ dal_fixed31_32_recip(gamma))));
+ else if (dal_fixed31_32_le(a0, arg))
+ return dal_fixed31_32_sub(
+ dal_fixed31_32_mul(
+ dal_fixed31_32_add(
+ one,
+ a3),
+ dal_fixed31_32_pow(
+ arg,
+ dal_fixed31_32_recip(gamma))),
+ a2);
+ else
+ return dal_fixed31_32_mul(
+ arg,
+ a1);
+}
+
+static struct fixed31_32 translate_to_linear_space(
+ struct fixed31_32 arg,
+ struct fixed31_32 a0,
+ struct fixed31_32 a1,
+ struct fixed31_32 a2,
+ struct fixed31_32 a3,
+ struct fixed31_32 gamma)
+{
+ struct fixed31_32 linear;
+
+ a0 = dal_fixed31_32_mul(a0, a1);
+ if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0)))
+
+ linear = dal_fixed31_32_neg(
+ dal_fixed31_32_pow(
+ dal_fixed31_32_div(
+ dal_fixed31_32_sub(a2, arg),
+ dal_fixed31_32_add(
+ dal_fixed31_32_one, a3)), gamma));
+
+ else if (dal_fixed31_32_le(dal_fixed31_32_neg(a0), arg) &&
+ dal_fixed31_32_le(arg, a0))
+ linear = dal_fixed31_32_div(arg, a1);
+ else
+ linear = dal_fixed31_32_pow(
+ dal_fixed31_32_div(
+ dal_fixed31_32_add(a2, arg),
+ dal_fixed31_32_add(
+ dal_fixed31_32_one, a3)), gamma);
+
+ return linear;
+}
+
+static inline struct fixed31_32 translate_from_linear_space_ex(
+ struct fixed31_32 arg,
+ struct gamma_coefficients *coeff,
+ uint32_t color_index)
+{
+ return translate_from_linear_space(
+ arg,
+ coeff->a0[color_index],
+ coeff->a1[color_index],
+ coeff->a2[color_index],
+ coeff->a3[color_index],
+ coeff->user_gamma[color_index]);
+}
+
+
+static inline struct fixed31_32 translate_to_linear_space_ex(
+ struct fixed31_32 arg,
+ struct gamma_coefficients *coeff,
+ uint32_t color_index)
+{
+ return translate_to_linear_space(
+ arg,
+ coeff->a0[color_index],
+ coeff->a1[color_index],
+ coeff->a2[color_index],
+ coeff->a3[color_index],
+ coeff->user_gamma[color_index]);
+}
+
+
+static bool find_software_points(
+ const struct dc_gamma *ramp,
+ const struct gamma_pixel *axis_x,
+ struct fixed31_32 hw_point,
+ enum channel_name channel,
+ uint32_t *index_to_start,
+ uint32_t *index_left,
+ uint32_t *index_right,
+ enum hw_point_position *pos)
+{
+ const uint32_t max_number = ramp->num_entries + 3;
+
+ struct fixed31_32 left, right;
+
+ uint32_t i = *index_to_start;
+
+ while (i < max_number) {
+ if (channel == CHANNEL_NAME_RED) {
+ left = axis_x[i].r;
+
+ if (i < max_number - 1)
+ right = axis_x[i + 1].r;
+ else
+ right = axis_x[max_number - 1].r;
+ } else if (channel == CHANNEL_NAME_GREEN) {
+ left = axis_x[i].g;
+
+ if (i < max_number - 1)
+ right = axis_x[i + 1].g;
+ else
+ right = axis_x[max_number - 1].g;
+ } else {
+ left = axis_x[i].b;
+
+ if (i < max_number - 1)
+ right = axis_x[i + 1].b;
+ else
+ right = axis_x[max_number - 1].b;
+ }
+
+ if (dal_fixed31_32_le(left, hw_point) &&
+ dal_fixed31_32_le(hw_point, right)) {
+ *index_to_start = i;
+ *index_left = i;
+
+ if (i < max_number - 1)
+ *index_right = i + 1;
+ else
+ *index_right = max_number - 1;
+
+ *pos = HW_POINT_POSITION_MIDDLE;
+
+ return true;
+ } else if ((i == *index_to_start) &&
+ dal_fixed31_32_le(hw_point, left)) {
+ *index_to_start = i;
+ *index_left = i;
+ *index_right = i;
+
+ *pos = HW_POINT_POSITION_LEFT;
+
+ return true;
+ } else if ((i == max_number - 1) &&
+ dal_fixed31_32_le(right, hw_point)) {
+ *index_to_start = i;
+ *index_left = i;
+ *index_right = i;
+
+ *pos = HW_POINT_POSITION_RIGHT;
+
+ return true;
+ }
+
+ ++i;
+ }
+
+ return false;
+}
+
+static bool build_custom_gamma_mapping_coefficients_worker(
+ const struct dc_gamma *ramp,
+ struct pixel_gamma_point *coeff,
+ const struct hw_x_point *coordinates_x,
+ const struct gamma_pixel *axis_x,
+ enum channel_name channel,
+ uint32_t number_of_points)
+{
+ uint32_t i = 0;
+
+ while (i <= number_of_points) {
+ struct fixed31_32 coord_x;
+
+ uint32_t index_to_start = 0;
+ uint32_t index_left = 0;
+ uint32_t index_right = 0;
+
+ enum hw_point_position hw_pos;
+
+ struct gamma_point *point;
+
+ struct fixed31_32 left_pos;
+ struct fixed31_32 right_pos;
+
+ if (channel == CHANNEL_NAME_RED)
+ coord_x = coordinates_x[i].regamma_y_red;
+ else if (channel == CHANNEL_NAME_GREEN)
+ coord_x = coordinates_x[i].regamma_y_green;
+ else
+ coord_x = coordinates_x[i].regamma_y_blue;
+
+ if (!find_software_points(
+ ramp, axis_x, coord_x, channel,
+ &index_to_start, &index_left, &index_right, &hw_pos)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (index_left >= ramp->num_entries + 3) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (index_right >= ramp->num_entries + 3) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (channel == CHANNEL_NAME_RED) {
+ point = &coeff[i].r;
+
+ left_pos = axis_x[index_left].r;
+ right_pos = axis_x[index_right].r;
+ } else if (channel == CHANNEL_NAME_GREEN) {
+ point = &coeff[i].g;
+
+ left_pos = axis_x[index_left].g;
+ right_pos = axis_x[index_right].g;
+ } else {
+ point = &coeff[i].b;
+
+ left_pos = axis_x[index_left].b;
+ right_pos = axis_x[index_right].b;
+ }
+
+ if (hw_pos == HW_POINT_POSITION_MIDDLE)
+ point->coeff = dal_fixed31_32_div(
+ dal_fixed31_32_sub(
+ coord_x,
+ left_pos),
+ dal_fixed31_32_sub(
+ right_pos,
+ left_pos));
+ else if (hw_pos == HW_POINT_POSITION_LEFT)
+ point->coeff = dal_fixed31_32_zero;
+ else if (hw_pos == HW_POINT_POSITION_RIGHT)
+ point->coeff = dal_fixed31_32_from_int(2);
+ else {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ point->left_index = index_left;
+ point->right_index = index_right;
+ point->pos = hw_pos;
+
+ ++i;
+ }
+
+ return true;
+}
+
+static struct fixed31_32 calculate_mapped_value(
+ struct pwl_float_data *rgb,
+ const struct pixel_gamma_point *coeff,
+ enum channel_name channel,
+ uint32_t max_index)
+{
+ const struct gamma_point *point;
+
+ struct fixed31_32 result;
+
+ if (channel == CHANNEL_NAME_RED)
+ point = &coeff->r;
+ else if (channel == CHANNEL_NAME_GREEN)
+ point = &coeff->g;
+ else
+ point = &coeff->b;
+
+ if ((point->left_index < 0) || (point->left_index > max_index)) {
+ BREAK_TO_DEBUGGER();
+ return dal_fixed31_32_zero;
+ }
+
+ if ((point->right_index < 0) || (point->right_index > max_index)) {
+ BREAK_TO_DEBUGGER();
+ return dal_fixed31_32_zero;
+ }
+
+ if (point->pos == HW_POINT_POSITION_MIDDLE)
+ if (channel == CHANNEL_NAME_RED)
+ result = dal_fixed31_32_add(
+ dal_fixed31_32_mul(
+ point->coeff,
+ dal_fixed31_32_sub(
+ rgb[point->right_index].r,
+ rgb[point->left_index].r)),
+ rgb[point->left_index].r);
+ else if (channel == CHANNEL_NAME_GREEN)
+ result = dal_fixed31_32_add(
+ dal_fixed31_32_mul(
+ point->coeff,
+ dal_fixed31_32_sub(
+ rgb[point->right_index].g,
+ rgb[point->left_index].g)),
+ rgb[point->left_index].g);
+ else
+ result = dal_fixed31_32_add(
+ dal_fixed31_32_mul(
+ point->coeff,
+ dal_fixed31_32_sub(
+ rgb[point->right_index].b,
+ rgb[point->left_index].b)),
+ rgb[point->left_index].b);
+ else if (point->pos == HW_POINT_POSITION_LEFT) {
+ BREAK_TO_DEBUGGER();
+ result = dal_fixed31_32_zero;
+ } else {
+ BREAK_TO_DEBUGGER();
+ result = dal_fixed31_32_one;
+ }
+
+ return result;
+}
+
+static void build_pq(struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x,
+ uint32_t sdr_white_level)
+{
+ uint32_t i, start_index;
+
+ struct pwl_float_data_ex *rgb = rgb_regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+ struct fixed31_32 x;
+ struct fixed31_32 output;
+ struct fixed31_32 scaling_factor =
+ dal_fixed31_32_from_fraction(sdr_white_level, 10000);
+
+ if (!pq_initialized && sdr_white_level == 80) {
+ precompute_pq();
+ pq_initialized = true;
+ }
+
+ /* TODO: start index is from segment 2^-24, skipping first segment
+ * due to x values too small for power calculations
+ */
+ start_index = 32;
+ rgb += start_index;
+ coord_x += start_index;
+
+ for (i = start_index; i <= hw_points_num; i++) {
+ /* Multiply 0.008 as regamma is 0-1 and FP16 input is 0-125.
+ * FP 1.0 = 80nits
+ */
+ if (sdr_white_level == 80) {
+ output = pq_table[i];
+ } else {
+ x = dal_fixed31_32_mul(coord_x->x, scaling_factor);
+ compute_pq(x, &output);
+ }
+
+ /* should really not happen? */
+ if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
+ output = dal_fixed31_32_zero;
+ else if (dal_fixed31_32_lt(dal_fixed31_32_one, output))
+ output = dal_fixed31_32_one;
+
+ rgb->r = output;
+ rgb->g = output;
+ rgb->b = output;
+
+ ++coord_x;
+ ++rgb;
+ }
+}
+
+static void build_de_pq(struct pwl_float_data_ex *de_pq,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x)
+{
+ uint32_t i;
+ struct fixed31_32 output;
+
+ struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125);
+
+ if (!de_pq_initialized) {
+ precompute_de_pq();
+ de_pq_initialized = true;
+ }
+
+
+ for (i = 0; i <= hw_points_num; i++) {
+ output = de_pq_table[i];
+ /* should really not happen? */
+ if (dal_fixed31_32_lt(output, dal_fixed31_32_zero))
+ output = dal_fixed31_32_zero;
+ else if (dal_fixed31_32_lt(scaling_factor, output))
+ output = scaling_factor;
+ de_pq[i].r = output;
+ de_pq[i].g = output;
+ de_pq[i].b = output;
+ }
+}
+
+static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_2_4)
+{
+ uint32_t i;
+
+ struct gamma_coefficients coeff;
+ struct pwl_float_data_ex *rgb = rgb_regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+
+ build_coefficients(&coeff, is_2_4);
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ /*TODO use y vs r,g,b*/
+ rgb->r = translate_from_linear_space_ex(
+ coord_x->x, &coeff, 0);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
+static void build_degamma(struct pwl_float_data_ex *curve,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x, bool is_2_4)
+{
+ uint32_t i;
+ struct gamma_coefficients coeff;
+ uint32_t begin_index, end_index;
+
+ build_coefficients(&coeff, is_2_4);
+ i = 0;
+
+ /* X points is 2^-25 to 2^7
+ * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
+ */
+ begin_index = 13 * NUM_PTS_IN_REGION;
+ end_index = begin_index + 12 * NUM_PTS_IN_REGION;
+
+ while (i != begin_index) {
+ curve[i].r = dal_fixed31_32_zero;
+ curve[i].g = dal_fixed31_32_zero;
+ curve[i].b = dal_fixed31_32_zero;
+ i++;
+ }
+
+ while (i != end_index) {
+ curve[i].r = translate_to_linear_space_ex(
+ coordinate_x[i].x, &coeff, 0);
+ curve[i].g = curve[i].r;
+ curve[i].b = curve[i].r;
+ i++;
+ }
+ while (i != hw_points_num + 1) {
+ curve[i].r = dal_fixed31_32_one;
+ curve[i].g = dal_fixed31_32_one;
+ curve[i].b = dal_fixed31_32_one;
+ i++;
+ }
+}
+
+static bool scale_gamma(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+{
+ const struct fixed31_32 max_driver = dal_fixed31_32_from_int(0xFFFF);
+ const struct fixed31_32 max_os = dal_fixed31_32_from_int(0xFF00);
+ struct fixed31_32 scaler = max_os;
+ uint32_t i;
+ struct pwl_float_data *rgb = pwl_rgb;
+ struct pwl_float_data *rgb_last = rgb + ramp->num_entries - 1;
+
+ i = 0;
+
+ do {
+ if (dal_fixed31_32_lt(max_os, ramp->entries.red[i]) ||
+ dal_fixed31_32_lt(max_os, ramp->entries.green[i]) ||
+ dal_fixed31_32_lt(max_os, ramp->entries.blue[i])) {
+ scaler = max_driver;
+ break;
+ }
+ ++i;
+ } while (i != ramp->num_entries);
+
+ i = 0;
+
+ do {
+ rgb->r = dal_fixed31_32_div(
+ ramp->entries.red[i], scaler);
+ rgb->g = dal_fixed31_32_div(
+ ramp->entries.green[i], scaler);
+ rgb->b = dal_fixed31_32_div(
+ ramp->entries.blue[i], scaler);
+
+ ++rgb;
+ ++i;
+ } while (i != ramp->num_entries);
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider1);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider1);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider1);
+
+ ++rgb;
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider2);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider2);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider2);
+
+ ++rgb;
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider3);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider3);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider3);
+
+ return true;
+}
+
+static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+ const struct dc_gamma *ramp,
+ struct dividers dividers)
+{
+ uint32_t i;
+ struct fixed31_32 min = dal_fixed31_32_zero;
+ struct fixed31_32 max = dal_fixed31_32_one;
+
+ struct fixed31_32 delta = dal_fixed31_32_zero;
+ struct fixed31_32 offset = dal_fixed31_32_zero;
+
+ for (i = 0 ; i < ramp->num_entries; i++) {
+ if (dal_fixed31_32_lt(ramp->entries.red[i], min))
+ min = ramp->entries.red[i];
+
+ if (dal_fixed31_32_lt(ramp->entries.green[i], min))
+ min = ramp->entries.green[i];
+
+ if (dal_fixed31_32_lt(ramp->entries.blue[i], min))
+ min = ramp->entries.blue[i];
+
+ if (dal_fixed31_32_lt(max, ramp->entries.red[i]))
+ max = ramp->entries.red[i];
+
+ if (dal_fixed31_32_lt(max, ramp->entries.green[i]))
+ max = ramp->entries.green[i];
+
+ if (dal_fixed31_32_lt(max, ramp->entries.blue[i]))
+ max = ramp->entries.blue[i];
+ }
+
+ if (dal_fixed31_32_lt(min, dal_fixed31_32_zero))
+ delta = dal_fixed31_32_neg(min);
+
+ offset = dal_fixed31_32_add(min, max);
+
+ for (i = 0 ; i < ramp->num_entries; i++) {
+ pwl_rgb[i].r = dal_fixed31_32_div(
+ dal_fixed31_32_add(
+ ramp->entries.red[i], delta), offset);
+ pwl_rgb[i].g = dal_fixed31_32_div(
+ dal_fixed31_32_add(
+ ramp->entries.green[i], delta), offset);
+ pwl_rgb[i].b = dal_fixed31_32_div(
+ dal_fixed31_32_add(
+ ramp->entries.blue[i], delta), offset);
+
+ }
+
+ pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
+ pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
+ pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+ ++i;
+ pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r);
+ pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
+ pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
+ pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+
+ return true;
+}
+
+/*
+ * RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here
+ * Input is evenly distributed in the output color space as specified in
+ * SetTimings
+ *
+ * Interpolation details:
+ * 1D LUT has 4096 values which give curve correction in 0-1 float range
+ * for evenly spaced points in 0-1 range. lut1D[index] gives correction
+ * for index/4095.
+ * First we find index for which:
+ * index/4095 < regamma_y < (index+1)/4095 =>
+ * index < 4095*regamma_y < index + 1
+ * norm_y = 4095*regamma_y, and index is just truncating to nearest integer
+ * lut1 = lut1D[index], lut2 = lut1D[index+1]
+ *
+ *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ */
+static void apply_lut_1d(
+ const struct dc_gamma *ramp,
+ uint32_t num_hw_points,
+ struct dc_transfer_func_distributed_points *tf_pts)
+{
+ int i = 0;
+ int color = 0;
+ struct fixed31_32 *regamma_y;
+ struct fixed31_32 norm_y;
+ struct fixed31_32 lut1;
+ struct fixed31_32 lut2;
+ const int max_lut_index = 4095;
+ const struct fixed31_32 max_lut_index_f =
+ dal_fixed31_32_from_int_nonconst(max_lut_index);
+ int32_t index = 0, index_next = 0;
+ struct fixed31_32 index_f;
+ struct fixed31_32 delta_lut;
+ struct fixed31_32 delta_index;
+
+ if (ramp->type != GAMMA_CS_TFM_1D)
+ return; // this is not expected
+
+ for (i = 0; i < num_hw_points; i++) {
+ for (color = 0; color < 3; color++) {
+ if (color == 0)
+ regamma_y = &tf_pts->red[i];
+ else if (color == 1)
+ regamma_y = &tf_pts->green[i];
+ else
+ regamma_y = &tf_pts->blue[i];
+
+ norm_y = dal_fixed31_32_mul(max_lut_index_f,
+ *regamma_y);
+ index = dal_fixed31_32_floor(norm_y);
+ index_f = dal_fixed31_32_from_int_nonconst(index);
+
+ if (index < 0 || index > max_lut_index)
+ continue;
+
+ index_next = (index == max_lut_index) ? index : index+1;
+
+ if (color == 0) {
+ lut1 = ramp->entries.red[index];
+ lut2 = ramp->entries.red[index_next];
+ } else if (color == 1) {
+ lut1 = ramp->entries.green[index];
+ lut2 = ramp->entries.green[index_next];
+ } else {
+ lut1 = ramp->entries.blue[index];
+ lut2 = ramp->entries.blue[index_next];
+ }
+
+ // we have everything now, so interpolate
+ delta_lut = dal_fixed31_32_sub(lut2, lut1);
+ delta_index = dal_fixed31_32_sub(norm_y, index_f);
+
+ *regamma_y = dal_fixed31_32_add(lut1,
+ dal_fixed31_32_mul(delta_index, delta_lut));
+ }
+ }
+}
+
+static void build_evenly_distributed_points(
+ struct gamma_pixel *points,
+ uint32_t numberof_points,
+ struct dividers dividers)
+{
+ struct gamma_pixel *p = points;
+ struct gamma_pixel *p_last = p + numberof_points - 1;
+
+ uint32_t i = 0;
+
+ do {
+ struct fixed31_32 value = dal_fixed31_32_from_fraction(i,
+ numberof_points - 1);
+
+ p->r = value;
+ p->g = value;
+ p->b = value;
+
+ ++p;
+ ++i;
+ } while (i != numberof_points);
+
+ p->r = dal_fixed31_32_div(p_last->r, dividers.divider1);
+ p->g = dal_fixed31_32_div(p_last->g, dividers.divider1);
+ p->b = dal_fixed31_32_div(p_last->b, dividers.divider1);
+
+ ++p;
+
+ p->r = dal_fixed31_32_div(p_last->r, dividers.divider2);
+ p->g = dal_fixed31_32_div(p_last->g, dividers.divider2);
+ p->b = dal_fixed31_32_div(p_last->b, dividers.divider2);
+
+ ++p;
+
+ p->r = dal_fixed31_32_div(p_last->r, dividers.divider3);
+ p->g = dal_fixed31_32_div(p_last->g, dividers.divider3);
+ p->b = dal_fixed31_32_div(p_last->b, dividers.divider3);
+}
+
+static inline void copy_rgb_regamma_to_coordinates_x(
+ struct hw_x_point *coordinates_x,
+ uint32_t hw_points_num,
+ const struct pwl_float_data_ex *rgb_ex)
+{
+ struct hw_x_point *coords = coordinates_x;
+ uint32_t i = 0;
+ const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
+
+ while (i <= hw_points_num) {
+ coords->regamma_y_red = rgb_regamma->r;
+ coords->regamma_y_green = rgb_regamma->g;
+ coords->regamma_y_blue = rgb_regamma->b;
+
+ ++coords;
+ ++rgb_regamma;
+ ++i;
+ }
+}
+
+static bool calculate_interpolated_hardware_curve(
+ const struct dc_gamma *ramp,
+ struct pixel_gamma_point *coeff128,
+ struct pwl_float_data *rgb_user,
+ const struct hw_x_point *coordinates_x,
+ const struct gamma_pixel *axis_x,
+ uint32_t number_of_points,
+ struct dc_transfer_func_distributed_points *tf_pts)
+{
+
+ const struct pixel_gamma_point *coeff = coeff128;
+ uint32_t max_entries = 3 - 1;
+
+ uint32_t i = 0;
+
+ for (i = 0; i < 3; i++) {
+ if (!build_custom_gamma_mapping_coefficients_worker(
+ ramp, coeff128, coordinates_x, axis_x, i,
+ number_of_points))
+ return false;
+ }
+
+ i = 0;
+ max_entries += ramp->num_entries;
+
+ /* TODO: float point case */
+
+ while (i <= number_of_points) {
+ tf_pts->red[i] = calculate_mapped_value(
+ rgb_user, coeff, CHANNEL_NAME_RED, max_entries);
+ tf_pts->green[i] = calculate_mapped_value(
+ rgb_user, coeff, CHANNEL_NAME_GREEN, max_entries);
+ tf_pts->blue[i] = calculate_mapped_value(
+ rgb_user, coeff, CHANNEL_NAME_BLUE, max_entries);
+
+ ++coeff;
+ ++i;
+ }
+
+ return true;
+}
+
+static void build_new_custom_resulted_curve(
+ uint32_t hw_points_num,
+ struct dc_transfer_func_distributed_points *tf_pts)
+{
+ uint32_t i;
+
+ i = 0;
+
+ while (i != hw_points_num + 1) {
+ tf_pts->red[i] = dal_fixed31_32_clamp(
+ tf_pts->red[i], dal_fixed31_32_zero,
+ dal_fixed31_32_one);
+ tf_pts->green[i] = dal_fixed31_32_clamp(
+ tf_pts->green[i], dal_fixed31_32_zero,
+ dal_fixed31_32_one);
+ tf_pts->blue[i] = dal_fixed31_32_clamp(
+ tf_pts->blue[i], dal_fixed31_32_zero,
+ dal_fixed31_32_one);
+
+ ++i;
+ }
+}
+
+static bool map_regamma_hw_to_x_user(
+ const struct dc_gamma *ramp,
+ struct pixel_gamma_point *coeff128,
+ struct pwl_float_data *rgb_user,
+ struct hw_x_point *coords_x,
+ const struct gamma_pixel *axis_x,
+ const struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num,
+ struct dc_transfer_func_distributed_points *tf_pts,
+ bool mapUserRamp)
+{
+ /* setup to spare calculated ideal regamma values */
+
+ int i = 0;
+ struct hw_x_point *coords = coords_x;
+ const struct pwl_float_data_ex *regamma = rgb_regamma;
+
+ if (mapUserRamp) {
+ copy_rgb_regamma_to_coordinates_x(coords,
+ hw_points_num,
+ rgb_regamma);
+
+ calculate_interpolated_hardware_curve(
+ ramp, coeff128, rgb_user, coords, axis_x,
+ hw_points_num, tf_pts);
+ } else {
+ /* just copy current rgb_regamma into tf_pts */
+ while (i <= hw_points_num) {
+ tf_pts->red[i] = regamma->r;
+ tf_pts->green[i] = regamma->g;
+ tf_pts->blue[i] = regamma->b;
+
+ ++regamma;
+ ++i;
+ }
+ }
+
+ build_new_custom_resulted_curve(hw_points_num, tf_pts);
+
+ return true;
+}
+
+#define _EXTRA_POINTS 3
+
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp)
+{
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+
+ struct pwl_float_data *rgb_user = NULL;
+ struct pwl_float_data_ex *rgb_regamma = NULL;
+ struct gamma_pixel *axix_x = NULL;
+ struct pixel_gamma_point *coeff = NULL;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ bool ret = false;
+
+ if (output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ /* we can use hardcoded curve for plain SRGB TF */
+ if (output_tf->type == TF_TYPE_PREDEFINED &&
+ output_tf->tf == TRANSFER_FUNCTION_SRGB &&
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ return true;
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+ rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+ axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+ GFP_KERNEL);
+ if (!axix_x)
+ goto axix_x_alloc_fail;
+ coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+ dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+ dividers.divider2 = dal_fixed31_32_from_int(2);
+ dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+
+ tf = output_tf->tf;
+
+ build_evenly_distributed_points(
+ axix_x,
+ ramp->num_entries,
+ dividers);
+
+ if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ scale_gamma(rgb_user, ramp, dividers);
+ else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+ scale_gamma_dx(rgb_user, ramp, dividers);
+
+ if (tf == TRANSFER_FUNCTION_PQ) {
+ tf_pts->end_exponent = 7;
+ tf_pts->x_point_at_y1_red = 125;
+ tf_pts->x_point_at_y1_green = 125;
+ tf_pts->x_point_at_y1_blue = 125;
+
+ build_pq(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ output_tf->sdr_ref_white_level);
+ } else {
+ tf_pts->end_exponent = 0;
+ tf_pts->x_point_at_y1_red = 1;
+ tf_pts->x_point_at_y1_green = 1;
+ tf_pts->x_point_at_y1_blue = 1;
+
+ build_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
+ }
+
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axix_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+ (mapUserRamp || ramp->type != GAMMA_RGB_256) &&
+ ramp->type != GAMMA_CS_TFM_1D);
+
+ if (ramp->type == GAMMA_CS_TFM_1D)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
+
+ ret = true;
+
+ kfree(coeff);
+coeff_alloc_fail:
+ kfree(axix_x);
+axix_x_alloc_fail:
+ kfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+ kfree(rgb_user);
+rgb_user_alloc_fail:
+ return ret;
+}
+
+bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp)
+{
+ struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
+ struct dividers dividers;
+
+ struct pwl_float_data *rgb_user = NULL;
+ struct pwl_float_data_ex *curve = NULL;
+ struct gamma_pixel *axix_x = NULL;
+ struct pixel_gamma_point *coeff = NULL;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ bool ret = false;
+
+ if (input_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ /* we can use hardcoded curve for plain SRGB TF */
+ if (input_tf->type == TF_TYPE_PREDEFINED &&
+ input_tf->tf == TRANSFER_FUNCTION_SRGB &&
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ return true;
+
+ input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+ curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!curve)
+ goto curve_alloc_fail;
+ axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!axix_x)
+ goto axix_x_alloc_fail;
+ coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ if (!coeff)
+ goto coeff_alloc_fail;
+
+ dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+ dividers.divider2 = dal_fixed31_32_from_int(2);
+ dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+
+ tf = input_tf->tf;
+
+ build_evenly_distributed_points(
+ axix_x,
+ ramp->num_entries,
+ dividers);
+
+ if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ scale_gamma(rgb_user, ramp, dividers);
+ else if (ramp->type == GAMMA_RGB_FLOAT_1024)
+ scale_gamma_dx(rgb_user, ramp, dividers);
+
+ if (tf == TRANSFER_FUNCTION_PQ)
+ build_de_pq(curve,
+ MAX_HW_POINTS,
+ coordinates_x);
+ else
+ build_degamma(curve,
+ MAX_HW_POINTS,
+ coordinates_x,
+ tf == TRANSFER_FUNCTION_SRGB ? true:false);
+
+ tf_pts->end_exponent = 0;
+ tf_pts->x_point_at_y1_red = 1;
+ tf_pts->x_point_at_y1_green = 1;
+ tf_pts->x_point_at_y1_blue = 1;
+
+ map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+ coordinates_x, axix_x, curve,
+ MAX_HW_POINTS, tf_pts,
+ mapUserRamp);
+
+ ret = true;
+
+ kfree(coeff);
+coeff_alloc_fail:
+ kfree(axix_x);
+axix_x_alloc_fail:
+ kfree(curve);
+curve_alloc_fail:
+ kfree(rgb_user);
+rgb_user_alloc_fail:
+
+ return ret;
+
+}
+
+
+bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points)
+{
+ uint32_t i;
+ bool ret = false;
+ struct pwl_float_data_ex *rgb_regamma = NULL;
+
+ if (trans == TRANSFER_FUNCTION_UNITY ||
+ trans == TRANSFER_FUNCTION_LINEAR) {
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+ points->x_point_at_y1_green = 1;
+ points->x_point_at_y1_blue = 1;
+
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = coordinates_x[i].x;
+ points->green[i] = coordinates_x[i].x;
+ points->blue[i] = coordinates_x[i].x;
+ }
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_PQ) {
+ rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
+ _EXTRA_POINTS), GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+ points->end_exponent = 7;
+ points->x_point_at_y1_red = 125;
+ points->x_point_at_y1_green = 125;
+ points->x_point_at_y1_blue = 125;
+
+
+ build_pq(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ 80);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+ points->blue[i] = rgb_regamma[i].b;
+ }
+ ret = true;
+
+ kfree(rgb_regamma);
+ } else if (trans == TRANSFER_FUNCTION_SRGB ||
+ trans == TRANSFER_FUNCTION_BT709) {
+ rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
+ _EXTRA_POINTS), GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+ points->x_point_at_y1_green = 1;
+ points->x_point_at_y1_blue = 1;
+
+ build_regamma(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_regamma[i].r;
+ points->green[i] = rgb_regamma[i].g;
+ points->blue[i] = rgb_regamma[i].b;
+ }
+ ret = true;
+
+ kfree(rgb_regamma);
+ }
+rgb_regamma_alloc_fail:
+ return ret;
+}
+
+
+bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points)
+{
+ uint32_t i;
+ bool ret = false;
+ struct pwl_float_data_ex *rgb_degamma = NULL;
+
+ if (trans == TRANSFER_FUNCTION_UNITY ||
+ trans == TRANSFER_FUNCTION_LINEAR) {
+
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = coordinates_x[i].x;
+ points->green[i] = coordinates_x[i].x;
+ points->blue[i] = coordinates_x[i].x;
+ }
+ ret = true;
+ } else if (trans == TRANSFER_FUNCTION_PQ) {
+ rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
+ _EXTRA_POINTS), GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+
+ build_de_pq(rgb_degamma,
+ MAX_HW_POINTS,
+ coordinates_x);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_degamma[i].r;
+ points->green[i] = rgb_degamma[i].g;
+ points->blue[i] = rgb_degamma[i].b;
+ }
+ ret = true;
+
+ kfree(rgb_degamma);
+ } else if (trans == TRANSFER_FUNCTION_SRGB ||
+ trans == TRANSFER_FUNCTION_BT709) {
+ rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
+ _EXTRA_POINTS), GFP_KERNEL);
+ if (!rgb_degamma)
+ goto rgb_degamma_alloc_fail;
+
+ build_degamma(rgb_degamma,
+ MAX_HW_POINTS,
+ coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
+ for (i = 0; i <= MAX_HW_POINTS ; i++) {
+ points->red[i] = rgb_degamma[i].r;
+ points->green[i] = rgb_degamma[i].g;
+ points->blue[i] = rgb_degamma[i].b;
+ }
+ ret = true;
+
+ kfree(rgb_degamma);
+ }
+ points->end_exponent = 0;
+ points->x_point_at_y1_red = 1;
+ points->x_point_at_y1_green = 1;
+ points->x_point_at_y1_blue = 1;
+
+rgb_degamma_alloc_fail:
+ return ret;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
new file mode 100644
index 0000000..b7f9bc2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef COLOR_MOD_COLOR_GAMMA_H_
+#define COLOR_MOD_COLOR_GAMMA_H_
+
+struct dc_transfer_func;
+struct dc_gamma;
+struct dc_transfer_func_distributed_points;
+struct dc_rgb_fixed;
+enum dc_transfer_func_predefined;
+
+void setup_x_points_distribution(void);
+void precompute_pq(void);
+void precompute_de_pq(void);
+
+bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp);
+
+bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+ const struct dc_gamma *ramp, bool mapUserRamp);
+
+bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points);
+
+bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+ struct dc_transfer_func_distributed_points *points);
+
+
+
+#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4d7db4a..27d4003 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -33,7 +33,7 @@
/* Refresh rate ramp at a fixed rate of 65 Hz/second */
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
-#define RENDER_TIMES_MAX_COUNT 20
+#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -46,13 +46,15 @@
#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
+#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault"
+
struct gradual_static_ramp {
bool ramp_is_active;
bool ramp_direction_is_up;
unsigned int ramp_current_frame_duration_in_ns;
};
-struct time_cache {
+struct freesync_time {
/* video (48Hz feature) related */
unsigned int update_duration_in_ns;
@@ -64,6 +66,9 @@ struct time_cache {
unsigned int render_times_index;
unsigned int render_times[RENDER_TIMES_MAX_COUNT];
+
+ unsigned int min_window;
+ unsigned int max_window;
};
struct below_the_range {
@@ -98,11 +103,14 @@ struct freesync_state {
bool static_screen;
bool video;
+ unsigned int vmin;
+ unsigned int vmax;
+
+ struct freesync_time time;
+
unsigned int nominal_refresh_rate_in_micro_hz;
bool windowed_fullscreen;
- struct time_cache time;
-
struct gradual_static_ramp static_ramp;
struct below_the_range btr;
struct fixed_refresh fixed_refresh;
@@ -119,27 +127,21 @@ struct freesync_entity {
struct freesync_registry_options {
bool drr_external_supported;
bool drr_internal_supported;
+ bool lcd_freesync_default_set;
+ int lcd_freesync_default_value;
};
struct core_freesync {
struct mod_freesync public;
struct dc *dc;
+ struct freesync_registry_options opts;
struct freesync_entity *map;
int num_entities;
- struct freesync_registry_options opts;
};
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
-static bool check_dc_support(const struct dc *dc)
-{
- if (dc->stream_funcs.adjust_vmin_vmax == NULL)
- return false;
-
- return true;
-}
-
struct mod_freesync *mod_freesync_create(struct dc *dc)
{
struct core_freesync *core_freesync =
@@ -154,7 +156,7 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
goto fail_alloc_context;
core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
- GFP_KERNEL);
+ GFP_KERNEL);
if (core_freesync->map == NULL)
goto fail_alloc_map;
@@ -169,9 +171,6 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
core_freesync->dc = dc;
- if (!check_dc_support(dc))
- goto fail_construct;
-
/* Create initial module folder in registry for freesync enable data */
flag.save_per_edid = true;
flag.save_per_link = false;
@@ -194,6 +193,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
(data & 1) ? false : true;
}
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_DEFAULT_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.lcd_freesync_default_set = true;
+ core_freesync->opts.lcd_freesync_default_value = data;
+ } else {
+ core_freesync->opts.lcd_freesync_default_set = false;
+ core_freesync->opts.lcd_freesync_default_value = 0;
+ }
+
return &core_freesync->public;
fail_construct:
@@ -299,6 +308,18 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
core_freesync->map[core_freesync->num_entities].user_enable.
enable_for_video =
(persistent_freesync_enable & 4) ? true : false;
+ /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */
+ } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) {
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming =
+ (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static =
+ (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video =
+ (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false;
+ dm_write_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &core_freesync->opts.lcd_freesync_default_value,
+ sizeof(int), &flag);
} else {
core_freesync->map[core_freesync->num_entities].user_enable.
enable_for_gaming = false;
@@ -341,6 +362,25 @@ bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
return true;
}
+static void adjust_vmin_vmax(struct core_freesync *core_freesync,
+ struct dc_stream_state **streams,
+ int num_streams,
+ int map_index,
+ unsigned int v_total_min,
+ unsigned int v_total_max)
+{
+ if (num_streams == 0 || streams == NULL || num_streams > 1)
+ return;
+
+ core_freesync->map[map_index].state.vmin = v_total_min;
+ core_freesync->map[map_index].state.vmax = v_total_max;
+
+ dc_stream_adjust_vmin_vmax(core_freesync->dc, streams,
+ num_streams, v_total_min,
+ v_total_max);
+}
+
+
static void update_stream_freesync_context(struct core_freesync *core_freesync,
struct dc_stream_state *stream)
{
@@ -599,9 +639,9 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
update_stream_freesync_context(core_freesync,
streams[stream_idx]);
- core_freesync->dc->stream_funcs.
- adjust_vmin_vmax(core_freesync->dc, streams,
- num_streams, v_total_min,
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_min,
v_total_max);
return true;
@@ -625,10 +665,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
core_freesync,
streams[stream_idx]);
- core_freesync->dc->stream_funcs.
adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, v_total_nominal,
+ core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
v_total_nominal);
}
return true;
@@ -645,10 +685,9 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
core_freesync,
streams[stream_idx]);
- core_freesync->dc->stream_funcs.
- adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, v_total_nominal,
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
v_total_nominal);
/* Reset the cached variables */
@@ -665,11 +704,10 @@ static bool set_freesync_on_streams(struct core_freesync *core_freesync,
* not support freesync because a former stream has
* be programmed
*/
- core_freesync->dc->stream_funcs.
- adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, v_total_nominal,
- v_total_nominal);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, map_index,
+ v_total_nominal,
+ v_total_nominal);
/* Reset the cached variables */
reset_freesync_state_variables(state);
}
@@ -786,9 +824,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
vmin = inserted_frame_v_total;
/* Program V_TOTAL */
- core_freesync->dc->stream_funcs.adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, vmin, vmax);
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, index,
+ vmin, vmax);
}
if (state->btr.frame_counter > 0)
@@ -822,17 +860,16 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_stream_freesync_context(core_freesync, streams[0]);
/* Program static screen ramp values */
- core_freesync->dc->stream_funcs.adjust_vmin_vmax(
- core_freesync->dc, streams,
- num_streams, v_total,
+ adjust_vmin_vmax(core_freesync, streams,
+ num_streams, index,
+ v_total,
v_total);
triggers.overlay_update = true;
triggers.surface_update = true;
- core_freesync->dc->stream_funcs.set_static_screen_events(
- core_freesync->dc, streams, num_streams,
- &triggers);
+ dc_stream_set_static_screen_events(core_freesync->dc, streams,
+ num_streams, &triggers);
}
}
@@ -916,9 +953,8 @@ void mod_freesync_update_state(struct mod_freesync *mod_freesync,
triggers.overlay_update = true;
triggers.surface_update = true;
- core_freesync->dc->stream_funcs.set_static_screen_events(
- core_freesync->dc, streams, num_streams,
- &triggers);
+ dc_stream_set_static_screen_events(core_freesync->dc, streams,
+ num_streams, &triggers);
if (freesync_program_required)
/* Program freesync according to current state*/
@@ -1084,10 +1120,9 @@ bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
max_refresh);
/* Program vtotal min/max */
- core_freesync->dc->stream_funcs.adjust_vmin_vmax(
- core_freesync->dc, &streams, 1,
- state->freesync_range.vmin,
- state->freesync_range.vmax);
+ adjust_vmin_vmax(core_freesync, &streams, 1, index,
+ state->freesync_range.vmin,
+ state->freesync_range.vmax);
}
if (min_refresh != 0 &&
@@ -1163,9 +1198,9 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
index = map_index_from_stream(core_freesync, stream);
- if (core_freesync->dc->stream_funcs.get_crtc_position(
- core_freesync->dc, &stream, 1,
- &position.vertical_count, &position.nominal_vcount)) {
+ if (dc_stream_get_crtc_position(core_freesync->dc, &stream, 1,
+ &position.vertical_count,
+ &position.nominal_vcount)) {
*nom_v_pos = position.nominal_vcount;
*v_pos = position.vertical_count;
@@ -1223,9 +1258,9 @@ void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
triggers.overlay_update = true;
triggers.surface_update = true;
- core_freesync->dc->stream_funcs.set_static_screen_events(
- core_freesync->dc, streams, num_streams,
- &triggers);
+ dc_stream_set_static_screen_events(core_freesync->dc,
+ streams, num_streams,
+ &triggers);
}
}
@@ -1421,13 +1456,9 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
} else {
vmin = state->freesync_range.vmin;
-
vmax = vmin;
-
- core_freesync->dc->stream_funcs.adjust_vmin_vmax(
- core_freesync->dc, &stream,
- 1, vmin,
- vmax);
+ adjust_vmin_vmax(core_freesync, &stream, map_index,
+ 1, vmin, vmax);
}
}
@@ -1481,3 +1512,43 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
}
}
+
+void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_total_min, unsigned int *v_total_max,
+ unsigned int *event_triggers,
+ unsigned int *window_min, unsigned int *window_max,
+ unsigned int *lfc_mid_point_in_us,
+ unsigned int *inserted_frames,
+ unsigned int *inserted_duration_in_us)
+{
+ unsigned int stream_index, map_index;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ if (core_freesync->map[map_index].caps->supported) {
+ struct freesync_state state =
+ core_freesync->map[map_index].state;
+ *v_total_min = state.vmin;
+ *v_total_max = state.vmax;
+ *event_triggers = 0;
+ *window_min = state.time.min_window;
+ *window_max = state.time.max_window;
+ *lfc_mid_point_in_us = state.btr.mid_point_in_us;
+ *inserted_frames = state.btr.frames_to_insert;
+ *inserted_duration_in_us =
+ state.btr.inserted_frame_duration_in_us;
+ }
+
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 84b5342..f083e16 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -164,4 +164,13 @@ void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
struct dc_stream_state **streams, int num_streams,
unsigned int curr_time_stamp);
+void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_total_min, unsigned int *v_total_max,
+ unsigned int *event_triggers,
+ unsigned int *window_min, unsigned int *window_max,
+ unsigned int *lfc_mid_point_in_us,
+ unsigned int *inserted_frames,
+ unsigned int *inserted_duration_in_us);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 1478225..3230e2a 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,53 +23,43 @@
*
*/
+#ifndef MODULES_INC_MOD_STATS_H_
+#define MODULES_INC_MOD_STATS_H_
+
#include "dm_services.h"
-#include "include/grph_object_id.h"
-static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
-{
- bool rc = true;
+struct mod_stats {
+ int dummy;
+};
+
+struct mod_stats_caps {
+ bool dummy;
+};
+
+struct mod_stats *mod_stats_create(struct dc *dc);
- switch (id.type) {
- case OBJECT_TYPE_UNKNOWN:
- rc = false;
- break;
- case OBJECT_TYPE_GPU:
- case OBJECT_TYPE_ENGINE:
- /* do NOT check for id.id == 0 */
- if (id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- default:
- if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- }
+void mod_stats_destroy(struct mod_stats *mod_stats);
- return rc;
-}
+bool mod_stats_init(struct mod_stats *mod_stats);
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2)
-{
- if (false == dal_graphics_object_id_is_valid(id1)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id1'!\n", __func__);
- return false;
- }
+void mod_stats_dump(struct mod_stats *mod_stats);
- if (false == dal_graphics_object_id_is_valid(id2)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id2'!\n", __func__);
- return false;
- }
+void mod_stats_reset_data(struct mod_stats *mod_stats);
- if (id1.id == id2.id && id1.enum_id == id2.enum_id
- && id1.type == id2.type)
- return true;
+void mod_stats_update_flip(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns);
- return false;
-}
+void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns);
+void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ unsigned int v_total_min,
+ unsigned int v_total_max,
+ unsigned int event_triggers,
+ unsigned int window_min,
+ unsigned int window_max,
+ unsigned int lfc_mid_point_in_us,
+ unsigned int inserted_frames,
+ unsigned int inserted_frame_duration_in_us);
+#endif /* MODULES_INC_MOD_STATS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
new file mode 100644
index 0000000..041f87b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "mod_stats.h"
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+
+#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000001
+#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
+
+#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
+#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
+#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
+
+#define MOD_STATS_NUM_VSYNCS 5
+
+struct stats_time_cache {
+ unsigned long flip_timestamp_in_ns;
+ unsigned long vupdate_timestamp_in_ns;
+
+ unsigned int render_time_in_us;
+ unsigned int avg_render_time_in_us_last_ten;
+ unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
+ unsigned int num_vsync_between_flips;
+
+ unsigned int flip_to_vsync_time_in_us;
+ unsigned int vsync_to_flip_time_in_us;
+
+ unsigned int min_window;
+ unsigned int max_window;
+ unsigned int v_total_min;
+ unsigned int v_total_max;
+ unsigned int event_triggers;
+
+ unsigned int lfc_mid_point_in_us;
+ unsigned int num_frames_inserted;
+ unsigned int inserted_duration_in_us;
+
+ unsigned int flags;
+};
+
+struct core_stats {
+ struct mod_stats public;
+ struct dc *dc;
+
+ struct stats_time_cache *time;
+ unsigned int index;
+
+ bool enabled;
+ unsigned int entries;
+};
+
+#define MOD_STATS_TO_CORE(mod_stats)\
+ container_of(mod_stats, struct core_stats, public)
+
+bool mod_stats_init(struct mod_stats *mod_stats)
+{
+ bool result = false;
+ struct core_stats *core_stats = NULL;
+ struct dc *dc = NULL;
+
+ if (mod_stats == NULL)
+ return false;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+ dc = core_stats->dc;
+
+ return result;
+}
+
+struct mod_stats *mod_stats_create(struct dc *dc)
+{
+ struct core_stats *core_stats = NULL;
+ struct persistent_data_flag flag;
+ unsigned int reg_data;
+ int i = 0;
+
+ core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
+
+ if (core_stats == NULL)
+ goto fail_alloc_context;
+
+ if (dc == NULL)
+ goto fail_construct;
+
+ core_stats->dc = dc;
+
+ core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ DAL_STATS_ENABLE_REGKEY,
+ &reg_data, sizeof(unsigned int), &flag))
+ core_stats->enabled = reg_data;
+
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ DAL_STATS_ENTRIES_REGKEY,
+ &reg_data, sizeof(unsigned int), &flag)) {
+ if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
+ else
+ core_stats->entries = reg_data;
+ }
+
+ core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+ GFP_KERNEL);
+
+ if (core_stats->time == NULL)
+ goto fail_construct;
+
+ /* Purposely leave index 0 unused so we don't need special logic to
+ * handle calculation cases that depend on previous flip data.
+ */
+ core_stats->index = 1;
+
+ return &core_stats->public;
+
+fail_construct:
+ kfree(core_stats);
+
+fail_alloc_context:
+ return NULL;
+}
+
+void mod_stats_destroy(struct mod_stats *mod_stats)
+{
+ if (mod_stats != NULL) {
+ struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->time != NULL)
+ kfree(core_stats->time);
+
+ kfree(core_stats);
+ }
+}
+
+void mod_stats_dump(struct mod_stats *mod_stats)
+{
+ struct dc *dc = NULL;
+ struct dal_logger *logger = NULL;
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+ dc = core_stats->dc;
+ logger = dc->ctx->logger;
+ time = core_stats->time;
+
+ //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+
+ //if (!pLog->IsDummyEntry())
+ {
+ dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n");
+ dm_logger_write(logger, LOG_PROFILING, "\n");
+ dm_logger_write(logger, LOG_PROFILING, "\n");
+
+ dm_logger_write(logger, LOG_PROFILING, "==Stats==\n");
+ dm_logger_write(logger, LOG_PROFILING,
+ "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n");
+
+ for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
+ dm_logger_write(logger, LOG_PROFILING,
+ "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ time[i].render_time_in_us,
+ time[i].avg_render_time_in_us_last_ten,
+ time[i].min_window,
+ time[i].lfc_mid_point_in_us,
+ time[i].max_window,
+ time[i].vsync_to_flip_time_in_us,
+ time[i].flip_to_vsync_time_in_us,
+ time[i].num_vsync_between_flips,
+ time[i].num_frames_inserted,
+ time[i].inserted_duration_in_us,
+ time[i].v_total_min,
+ time[i].v_total_max,
+ time[i].event_triggers,
+ time[i].v_sync_time_in_us[0],
+ time[i].v_sync_time_in_us[1],
+ time[i].v_sync_time_in_us[2],
+ time[i].v_sync_time_in_us[3],
+ time[i].v_sync_time_in_us[4],
+ time[i].flags);
+ }
+ }
+ //GetLog()->Close(pLog);
+ //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW);
+}
+
+void mod_stats_reset_data(struct mod_stats *mod_stats)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ memset(core_stats->time, 0,
+ sizeof(struct stats_time_cache) * core_stats->entries);
+
+ core_stats->index = 0;
+}
+
+void mod_stats_update_flip(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].flip_timestamp_in_ns = timestamp_in_ns;
+ time[index].render_time_in_us =
+ timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+
+ if (index >= 10) {
+ for (unsigned int i = 0; i < 10; i++)
+ time[index].avg_render_time_in_us_last_ten +=
+ time[index - i].render_time_in_us;
+ time[index].avg_render_time_in_us_last_ten /= 10;
+ }
+
+ if (time[index].num_vsync_between_flips > 0)
+ time[index].vsync_to_flip_time_in_us =
+ timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
+ else
+ time[index].vsync_to_flip_time_in_us =
+ timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+
+ core_stats->index++;
+}
+
+void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+ unsigned long timestamp_in_ns)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
+ if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
+ time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
+ timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+ time[index].flip_to_vsync_time_in_us =
+ timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+
+ time[index].num_vsync_between_flips++;
+}
+
+void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ unsigned int v_total_min,
+ unsigned int v_total_max,
+ unsigned int event_triggers,
+ unsigned int window_min,
+ unsigned int window_max,
+ unsigned int lfc_mid_point_in_us,
+ unsigned int inserted_frames,
+ unsigned int inserted_duration_in_us)
+{
+ struct core_stats *core_stats = NULL;
+ struct stats_time_cache *time = NULL;
+ unsigned int index = 0;
+
+ if (mod_stats == NULL)
+ return;
+
+ core_stats = MOD_STATS_TO_CORE(mod_stats);
+
+ if (core_stats->index >= core_stats->entries)
+ return;
+
+ time = core_stats->time;
+ index = core_stats->index;
+
+ time[index].v_total_min = v_total_min;
+ time[index].v_total_max = v_total_max;
+ time[index].event_triggers = event_triggers;
+ time[index].min_window = window_min;
+ time[index].max_window = window_max;
+ time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
+ time[index].num_frames_inserted = inserted_frames;
+ time[index].inserted_duration_in_us = inserted_duration_in_us;
+}
+
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index b72f8a4..9fa3aae 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -25,7 +25,6 @@
#include <drm/amd_asic_type.h>
-struct seq_file;
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
@@ -61,71 +60,12 @@ enum amd_clockgating_state {
AMD_CG_STATE_UNGATE,
};
-enum amd_dpm_forced_level {
- AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
- AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
- AMD_DPM_FORCED_LEVEL_LOW = 0x4,
- AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
- AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
- AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
-};
enum amd_powergating_state {
AMD_PG_STATE_GATE = 0,
AMD_PG_STATE_UNGATE,
};
-struct amd_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-
-#define AMD_MAX_VCE_LEVELS 6
-
-enum amd_vce_level {
- AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-enum amd_pp_profile_type {
- AMD_PP_GFX_PROFILE,
- AMD_PP_COMPUTE_PROFILE,
-};
-
-struct amd_pp_profile {
- enum amd_pp_profile_type type;
- uint32_t min_sclk;
- uint32_t min_mclk;
- uint16_t activity_threshold;
- uint8_t up_hyst;
- uint8_t down_hyst;
-};
-
-enum amd_fan_ctrl_mode {
- AMD_FAN_CTRL_NONE = 0,
- AMD_FAN_CTRL_MANUAL = 1,
- AMD_FAN_CTRL_AUTO = 2,
-};
-
-enum pp_clock_type {
- PP_SCLK,
- PP_MCLK,
- PP_PCIE,
-};
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
@@ -169,27 +109,6 @@ enum pp_clock_type {
#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
#define AMD_PG_SUPPORT_MMHUB (1 << 13)
-enum amd_pm_state_type {
- /* not used for dpm */
- POWER_STATE_TYPE_DEFAULT,
- POWER_STATE_TYPE_POWERSAVE,
- /* user selectable states */
- POWER_STATE_TYPE_BATTERY,
- POWER_STATE_TYPE_BALANCED,
- POWER_STATE_TYPE_PERFORMANCE,
- /* internal states */
- POWER_STATE_TYPE_INTERNAL_UVD,
- POWER_STATE_TYPE_INTERNAL_UVD_SD,
- POWER_STATE_TYPE_INTERNAL_UVD_HD,
- POWER_STATE_TYPE_INTERNAL_UVD_HD2,
- POWER_STATE_TYPE_INTERNAL_UVD_MVC,
- POWER_STATE_TYPE_INTERNAL_BOOT,
- POWER_STATE_TYPE_INTERNAL_THERMAL,
- POWER_STATE_TYPE_INTERNAL_ACPI,
- POWER_STATE_TYPE_INTERNAL_ULV,
- POWER_STATE_TYPE_INTERNAL_3DPERF,
-};
-
struct amd_ip_funcs {
/* Name of IP block */
char *name;
@@ -233,95 +152,4 @@ struct amd_ip_funcs {
};
-enum amd_pp_task;
-enum amd_pp_clock_type;
-struct pp_states_info;
-struct amd_pp_simple_clock_info;
-struct amd_pp_display_configuration;
-struct amd_pp_clock_info;
-struct pp_display_clock_request;
-struct pp_wm_sets_with_clock_ranges_soc15;
-struct pp_clock_levels_with_voltage;
-struct pp_clock_levels_with_latency;
-struct amd_pp_clocks;
-
-struct amd_pm_funcs {
-/* export for dpm on ci and si */
- int (*pre_set_power_state)(void *handle);
- int (*set_power_state)(void *handle);
- void (*post_set_power_state)(void *handle);
- void (*display_configuration_changed)(void *handle);
- void (*print_power_state)(void *handle, void *ps);
- bool (*vblank_too_short)(void *handle);
- void (*enable_bapm)(void *handle, bool enable);
- int (*check_state_equal)(void *handle,
- void *cps,
- void *rps,
- bool *equal);
-/* export for sysfs */
- int (*get_temperature)(void *handle);
- void (*set_fan_control_mode)(void *handle, u32 mode);
- u32 (*get_fan_control_mode)(void *handle);
- int (*set_fan_speed_percent)(void *handle, u32 speed);
- int (*get_fan_speed_percent)(void *handle, u32 *speed);
- int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
- int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
- int (*get_sclk_od)(void *handle);
- int (*set_sclk_od)(void *handle, uint32_t value);
- int (*get_mclk_od)(void *handle);
- int (*set_mclk_od)(void *handle, uint32_t value);
- int (*read_sensor)(void *handle, int idx, void *value, int *size);
- enum amd_dpm_forced_level (*get_performance_level)(void *handle);
- enum amd_pm_state_type (*get_current_power_state)(void *handle);
- int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
- int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
- int (*get_pp_table)(void *handle, char **table);
- int (*set_pp_table)(void *handle, const char *buf, size_t size);
- void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
-
- int (*reset_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(void *handle,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(void *handle,
- enum amd_pp_profile_type type);
-/* export to amdgpu */
- void (*powergate_uvd)(void *handle, bool gate);
- void (*powergate_vce)(void *handle, bool gate);
- struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
- int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
- void *input, void *output);
- int (*load_firmware)(void *handle);
- int (*wait_for_fw_loading_complete)(void *handle);
- int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
-/* export to DC */
- u32 (*get_sclk)(void *handle, bool low);
- u32 (*get_mclk)(void *handle, bool low);
- int (*display_configuration_change)(void *handle,
- const struct amd_pp_display_configuration *input);
- int (*get_display_power_level)(void *handle,
- struct amd_pp_simple_clock_info *output);
- int (*get_current_clocks)(void *handle,
- struct amd_pp_clock_info *clocks);
- int (*get_clock_by_type)(void *handle,
- enum amd_pp_clock_type type,
- struct amd_pp_clocks *clocks);
- int (*get_clock_by_type_with_latency)(void *handle,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_latency *clocks);
- int (*get_clock_by_type_with_voltage)(void *handle,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_voltage *clocks);
- int (*set_watermarks_for_clocks_ranges)(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
- int (*display_clock_voltage_request)(void *handle,
- struct pp_display_clock_request *clock);
- int (*get_display_mode_validation_clocks)(void *handle,
- struct amd_pp_simple_clock_info *clocks);
-};
-
-
#endif /* __AMD_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h
new file mode 100644
index 0000000..b1e878e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _athub_1_0_OFFSET_HEADER
+#define _athub_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: athub_atsdec
+// base address: 0x3080
+#define mmATC_ATS_CNTL 0x0000
+#define mmATC_ATS_CNTL_BASE_IDX 0
+#define mmATC_ATS_STATUS 0x0003
+#define mmATC_ATS_STATUS_BASE_IDX 0
+#define mmATC_ATS_FAULT_CNTL 0x0004
+#define mmATC_ATS_FAULT_CNTL_BASE_IDX 0
+#define mmATC_ATS_FAULT_STATUS_INFO 0x0005
+#define mmATC_ATS_FAULT_STATUS_INFO_BASE_IDX 0
+#define mmATC_ATS_FAULT_STATUS_ADDR 0x0006
+#define mmATC_ATS_FAULT_STATUS_ADDR_BASE_IDX 0
+#define mmATC_ATS_DEFAULT_PAGE_LOW 0x0007
+#define mmATC_ATS_DEFAULT_PAGE_LOW_BASE_IDX 0
+#define mmATC_TRANS_FAULT_RSPCNTRL 0x0008
+#define mmATC_TRANS_FAULT_RSPCNTRL_BASE_IDX 0
+#define mmATC_ATS_FAULT_STATUS_INFO2 0x0009
+#define mmATC_ATS_FAULT_STATUS_INFO2_BASE_IDX 0
+#define mmATHUB_MISC_CNTL 0x000a
+#define mmATHUB_MISC_CNTL_BASE_IDX 0
+#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x000b
+#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS_BASE_IDX 0
+#define mmATC_VMID0_PASID_MAPPING 0x000c
+#define mmATC_VMID0_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID1_PASID_MAPPING 0x000d
+#define mmATC_VMID1_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID2_PASID_MAPPING 0x000e
+#define mmATC_VMID2_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID3_PASID_MAPPING 0x000f
+#define mmATC_VMID3_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID4_PASID_MAPPING 0x0010
+#define mmATC_VMID4_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID5_PASID_MAPPING 0x0011
+#define mmATC_VMID5_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID6_PASID_MAPPING 0x0012
+#define mmATC_VMID6_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID7_PASID_MAPPING 0x0013
+#define mmATC_VMID7_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID8_PASID_MAPPING 0x0014
+#define mmATC_VMID8_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID9_PASID_MAPPING 0x0015
+#define mmATC_VMID9_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID10_PASID_MAPPING 0x0016
+#define mmATC_VMID10_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID11_PASID_MAPPING 0x0017
+#define mmATC_VMID11_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID12_PASID_MAPPING 0x0018
+#define mmATC_VMID12_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID13_PASID_MAPPING 0x0019
+#define mmATC_VMID13_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID14_PASID_MAPPING 0x001a
+#define mmATC_VMID14_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID15_PASID_MAPPING 0x001b
+#define mmATC_VMID15_PASID_MAPPING_BASE_IDX 0
+#define mmATC_ATS_VMID_STATUS 0x001c
+#define mmATC_ATS_VMID_STATUS_BASE_IDX 0
+#define mmATC_ATS_GFX_ATCL2_STATUS 0x001d
+#define mmATC_ATS_GFX_ATCL2_STATUS_BASE_IDX 0
+#define mmATC_PERFCOUNTER0_CFG 0x001e
+#define mmATC_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmATC_PERFCOUNTER1_CFG 0x001f
+#define mmATC_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmATC_PERFCOUNTER2_CFG 0x0020
+#define mmATC_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmATC_PERFCOUNTER3_CFG 0x0021
+#define mmATC_PERFCOUNTER3_CFG_BASE_IDX 0
+#define mmATC_PERFCOUNTER_RSLT_CNTL 0x0022
+#define mmATC_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmATC_PERFCOUNTER_LO 0x0023
+#define mmATC_PERFCOUNTER_LO_BASE_IDX 0
+#define mmATC_PERFCOUNTER_HI 0x0024
+#define mmATC_PERFCOUNTER_HI_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL 0x0025
+#define mmATHUB_PCIE_ATS_CNTL_BASE_IDX 0
+#define mmATHUB_PCIE_PASID_CNTL 0x0026
+#define mmATHUB_PCIE_PASID_CNTL_BASE_IDX 0
+#define mmATHUB_PCIE_PAGE_REQ_CNTL 0x0027
+#define mmATHUB_PCIE_PAGE_REQ_CNTL_BASE_IDX 0
+#define mmATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x0028
+#define mmATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC_BASE_IDX 0
+#define mmATHUB_COMMAND 0x0029
+#define mmATHUB_COMMAND_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_0 0x002a
+#define mmATHUB_PCIE_ATS_CNTL_VF_0_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_1 0x002b
+#define mmATHUB_PCIE_ATS_CNTL_VF_1_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_2 0x002c
+#define mmATHUB_PCIE_ATS_CNTL_VF_2_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_3 0x002d
+#define mmATHUB_PCIE_ATS_CNTL_VF_3_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_4 0x002e
+#define mmATHUB_PCIE_ATS_CNTL_VF_4_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_5 0x002f
+#define mmATHUB_PCIE_ATS_CNTL_VF_5_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_6 0x0030
+#define mmATHUB_PCIE_ATS_CNTL_VF_6_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_7 0x0031
+#define mmATHUB_PCIE_ATS_CNTL_VF_7_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_8 0x0032
+#define mmATHUB_PCIE_ATS_CNTL_VF_8_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_9 0x0033
+#define mmATHUB_PCIE_ATS_CNTL_VF_9_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_10 0x0034
+#define mmATHUB_PCIE_ATS_CNTL_VF_10_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_11 0x0035
+#define mmATHUB_PCIE_ATS_CNTL_VF_11_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_12 0x0036
+#define mmATHUB_PCIE_ATS_CNTL_VF_12_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_13 0x0037
+#define mmATHUB_PCIE_ATS_CNTL_VF_13_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_14 0x0038
+#define mmATHUB_PCIE_ATS_CNTL_VF_14_BASE_IDX 0
+#define mmATHUB_PCIE_ATS_CNTL_VF_15 0x0039
+#define mmATHUB_PCIE_ATS_CNTL_VF_15_BASE_IDX 0
+#define mmATHUB_MEM_POWER_LS 0x003a
+#define mmATHUB_MEM_POWER_LS_BASE_IDX 0
+#define mmATS_IH_CREDIT 0x003b
+#define mmATS_IH_CREDIT_BASE_IDX 0
+#define mmATHUB_IH_CREDIT 0x003c
+#define mmATHUB_IH_CREDIT_BASE_IDX 0
+#define mmATC_VMID16_PASID_MAPPING 0x003d
+#define mmATC_VMID16_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID17_PASID_MAPPING 0x003e
+#define mmATC_VMID17_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID18_PASID_MAPPING 0x003f
+#define mmATC_VMID18_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID19_PASID_MAPPING 0x0040
+#define mmATC_VMID19_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID20_PASID_MAPPING 0x0041
+#define mmATC_VMID20_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID21_PASID_MAPPING 0x0042
+#define mmATC_VMID21_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID22_PASID_MAPPING 0x0043
+#define mmATC_VMID22_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID23_PASID_MAPPING 0x0044
+#define mmATC_VMID23_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID24_PASID_MAPPING 0x0045
+#define mmATC_VMID24_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID25_PASID_MAPPING 0x0046
+#define mmATC_VMID25_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID26_PASID_MAPPING 0x0047
+#define mmATC_VMID26_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID27_PASID_MAPPING 0x0048
+#define mmATC_VMID27_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID28_PASID_MAPPING 0x0049
+#define mmATC_VMID28_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID29_PASID_MAPPING 0x004a
+#define mmATC_VMID29_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID30_PASID_MAPPING 0x004b
+#define mmATC_VMID30_PASID_MAPPING_BASE_IDX 0
+#define mmATC_VMID31_PASID_MAPPING 0x004c
+#define mmATC_VMID31_PASID_MAPPING_BASE_IDX 0
+#define mmATC_ATS_MMHUB_ATCL2_STATUS 0x004d
+#define mmATC_ATS_MMHUB_ATCL2_STATUS_BASE_IDX 0
+#define mmATHUB_SHARED_VIRT_RESET_REQ 0x004e
+#define mmATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define mmATHUB_SHARED_ACTIVE_FCN_ID 0x004f
+#define mmATHUB_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmATC_ATS_SDPPORT_CNTL 0x0050
+#define mmATC_ATS_SDPPORT_CNTL_BASE_IDX 0
+#define mmATC_ATS_VMID_SNAPSHOT_GFX_STAT 0x0052
+#define mmATC_ATS_VMID_SNAPSHOT_GFX_STAT_BASE_IDX 0
+#define mmATC_ATS_VMID_SNAPSHOT_MMHUB_STAT 0x0053
+#define mmATC_ATS_VMID_SNAPSHOT_MMHUB_STAT_BASE_IDX 0
+
+
+// addressBlock: athub_xpbdec
+// base address: 0x31f0
+#define mmXPB_RTR_SRC_APRTR0 0x005c
+#define mmXPB_RTR_SRC_APRTR0_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR1 0x005d
+#define mmXPB_RTR_SRC_APRTR1_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR2 0x005e
+#define mmXPB_RTR_SRC_APRTR2_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR3 0x005f
+#define mmXPB_RTR_SRC_APRTR3_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR4 0x0060
+#define mmXPB_RTR_SRC_APRTR4_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR5 0x0061
+#define mmXPB_RTR_SRC_APRTR5_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR6 0x0062
+#define mmXPB_RTR_SRC_APRTR6_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR7 0x0063
+#define mmXPB_RTR_SRC_APRTR7_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR8 0x0064
+#define mmXPB_RTR_SRC_APRTR8_BASE_IDX 0
+#define mmXPB_RTR_SRC_APRTR9 0x0065
+#define mmXPB_RTR_SRC_APRTR9_BASE_IDX 0
+#define mmXPB_XDMA_RTR_SRC_APRTR0 0x0066
+#define mmXPB_XDMA_RTR_SRC_APRTR0_BASE_IDX 0
+#define mmXPB_XDMA_RTR_SRC_APRTR1 0x0067
+#define mmXPB_XDMA_RTR_SRC_APRTR1_BASE_IDX 0
+#define mmXPB_XDMA_RTR_SRC_APRTR2 0x0068
+#define mmXPB_XDMA_RTR_SRC_APRTR2_BASE_IDX 0
+#define mmXPB_XDMA_RTR_SRC_APRTR3 0x0069
+#define mmXPB_XDMA_RTR_SRC_APRTR3_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP0 0x006a
+#define mmXPB_RTR_DEST_MAP0_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP1 0x006b
+#define mmXPB_RTR_DEST_MAP1_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP2 0x006c
+#define mmXPB_RTR_DEST_MAP2_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP3 0x006d
+#define mmXPB_RTR_DEST_MAP3_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP4 0x006e
+#define mmXPB_RTR_DEST_MAP4_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP5 0x006f
+#define mmXPB_RTR_DEST_MAP5_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP6 0x0070
+#define mmXPB_RTR_DEST_MAP6_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP7 0x0071
+#define mmXPB_RTR_DEST_MAP7_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP8 0x0072
+#define mmXPB_RTR_DEST_MAP8_BASE_IDX 0
+#define mmXPB_RTR_DEST_MAP9 0x0073
+#define mmXPB_RTR_DEST_MAP9_BASE_IDX 0
+#define mmXPB_XDMA_RTR_DEST_MAP0 0x0074
+#define mmXPB_XDMA_RTR_DEST_MAP0_BASE_IDX 0
+#define mmXPB_XDMA_RTR_DEST_MAP1 0x0075
+#define mmXPB_XDMA_RTR_DEST_MAP1_BASE_IDX 0
+#define mmXPB_XDMA_RTR_DEST_MAP2 0x0076
+#define mmXPB_XDMA_RTR_DEST_MAP2_BASE_IDX 0
+#define mmXPB_XDMA_RTR_DEST_MAP3 0x0077
+#define mmXPB_XDMA_RTR_DEST_MAP3_BASE_IDX 0
+#define mmXPB_CLG_CFG0 0x0078
+#define mmXPB_CLG_CFG0_BASE_IDX 0
+#define mmXPB_CLG_CFG1 0x0079
+#define mmXPB_CLG_CFG1_BASE_IDX 0
+#define mmXPB_CLG_CFG2 0x007a
+#define mmXPB_CLG_CFG2_BASE_IDX 0
+#define mmXPB_CLG_CFG3 0x007b
+#define mmXPB_CLG_CFG3_BASE_IDX 0
+#define mmXPB_CLG_CFG4 0x007c
+#define mmXPB_CLG_CFG4_BASE_IDX 0
+#define mmXPB_CLG_CFG5 0x007d
+#define mmXPB_CLG_CFG5_BASE_IDX 0
+#define mmXPB_CLG_CFG6 0x007e
+#define mmXPB_CLG_CFG6_BASE_IDX 0
+#define mmXPB_CLG_CFG7 0x007f
+#define mmXPB_CLG_CFG7_BASE_IDX 0
+#define mmXPB_CLG_EXTRA 0x0080
+#define mmXPB_CLG_EXTRA_BASE_IDX 0
+#define mmXPB_CLG_EXTRA_MSK 0x0081
+#define mmXPB_CLG_EXTRA_MSK_BASE_IDX 0
+#define mmXPB_LB_ADDR 0x0082
+#define mmXPB_LB_ADDR_BASE_IDX 0
+#define mmXPB_WCB_STS 0x0083
+#define mmXPB_WCB_STS_BASE_IDX 0
+#define mmXPB_HST_CFG 0x0084
+#define mmXPB_HST_CFG_BASE_IDX 0
+#define mmXPB_P2P_BAR_CFG 0x0085
+#define mmXPB_P2P_BAR_CFG_BASE_IDX 0
+#define mmXPB_P2P_BAR0 0x0086
+#define mmXPB_P2P_BAR0_BASE_IDX 0
+#define mmXPB_P2P_BAR1 0x0087
+#define mmXPB_P2P_BAR1_BASE_IDX 0
+#define mmXPB_P2P_BAR2 0x0088
+#define mmXPB_P2P_BAR2_BASE_IDX 0
+#define mmXPB_P2P_BAR3 0x0089
+#define mmXPB_P2P_BAR3_BASE_IDX 0
+#define mmXPB_P2P_BAR4 0x008a
+#define mmXPB_P2P_BAR4_BASE_IDX 0
+#define mmXPB_P2P_BAR5 0x008b
+#define mmXPB_P2P_BAR5_BASE_IDX 0
+#define mmXPB_P2P_BAR6 0x008c
+#define mmXPB_P2P_BAR6_BASE_IDX 0
+#define mmXPB_P2P_BAR7 0x008d
+#define mmXPB_P2P_BAR7_BASE_IDX 0
+#define mmXPB_P2P_BAR_SETUP 0x008e
+#define mmXPB_P2P_BAR_SETUP_BASE_IDX 0
+#define mmXPB_P2P_BAR_DELTA_ABOVE 0x0090
+#define mmXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0
+#define mmXPB_P2P_BAR_DELTA_BELOW 0x0091
+#define mmXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR0 0x0092
+#define mmXPB_PEER_SYS_BAR0_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR1 0x0093
+#define mmXPB_PEER_SYS_BAR1_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR2 0x0094
+#define mmXPB_PEER_SYS_BAR2_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR3 0x0095
+#define mmXPB_PEER_SYS_BAR3_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR4 0x0096
+#define mmXPB_PEER_SYS_BAR4_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR5 0x0097
+#define mmXPB_PEER_SYS_BAR5_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR6 0x0098
+#define mmXPB_PEER_SYS_BAR6_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR7 0x0099
+#define mmXPB_PEER_SYS_BAR7_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR8 0x009a
+#define mmXPB_PEER_SYS_BAR8_BASE_IDX 0
+#define mmXPB_PEER_SYS_BAR9 0x009b
+#define mmXPB_PEER_SYS_BAR9_BASE_IDX 0
+#define mmXPB_XDMA_PEER_SYS_BAR0 0x009c
+#define mmXPB_XDMA_PEER_SYS_BAR0_BASE_IDX 0
+#define mmXPB_XDMA_PEER_SYS_BAR1 0x009d
+#define mmXPB_XDMA_PEER_SYS_BAR1_BASE_IDX 0
+#define mmXPB_XDMA_PEER_SYS_BAR2 0x009e
+#define mmXPB_XDMA_PEER_SYS_BAR2_BASE_IDX 0
+#define mmXPB_XDMA_PEER_SYS_BAR3 0x009f
+#define mmXPB_XDMA_PEER_SYS_BAR3_BASE_IDX 0
+#define mmXPB_CLK_GAT 0x00a0
+#define mmXPB_CLK_GAT_BASE_IDX 0
+#define mmXPB_INTF_CFG 0x00a1
+#define mmXPB_INTF_CFG_BASE_IDX 0
+#define mmXPB_INTF_STS 0x00a2
+#define mmXPB_INTF_STS_BASE_IDX 0
+#define mmXPB_PIPE_STS 0x00a3
+#define mmXPB_PIPE_STS_BASE_IDX 0
+#define mmXPB_SUB_CTRL 0x00a4
+#define mmXPB_SUB_CTRL_BASE_IDX 0
+#define mmXPB_MAP_INVERT_FLUSH_NUM_LSB 0x00a5
+#define mmXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0
+#define mmXPB_PERF_KNOBS 0x00a6
+#define mmXPB_PERF_KNOBS_BASE_IDX 0
+#define mmXPB_STICKY 0x00a7
+#define mmXPB_STICKY_BASE_IDX 0
+#define mmXPB_STICKY_W1C 0x00a8
+#define mmXPB_STICKY_W1C_BASE_IDX 0
+#define mmXPB_MISC_CFG 0x00a9
+#define mmXPB_MISC_CFG_BASE_IDX 0
+#define mmXPB_INTF_CFG2 0x00aa
+#define mmXPB_INTF_CFG2_BASE_IDX 0
+#define mmXPB_CLG_EXTRA_RD 0x00ab
+#define mmXPB_CLG_EXTRA_RD_BASE_IDX 0
+#define mmXPB_CLG_EXTRA_MSK_RD 0x00ac
+#define mmXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0
+#define mmXPB_CLG_GFX_MATCH 0x00ad
+#define mmXPB_CLG_GFX_MATCH_BASE_IDX 0
+#define mmXPB_CLG_GFX_MATCH_MSK 0x00ae
+#define mmXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0
+#define mmXPB_CLG_MM_MATCH 0x00af
+#define mmXPB_CLG_MM_MATCH_BASE_IDX 0
+#define mmXPB_CLG_MM_MATCH_MSK 0x00b0
+#define mmXPB_CLG_MM_MATCH_MSK_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING0 0x00b1
+#define mmXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING1 0x00b2
+#define mmXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING2 0x00b3
+#define mmXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING3 0x00b4
+#define mmXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING4 0x00b5
+#define mmXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING5 0x00b6
+#define mmXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING6 0x00b7
+#define mmXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0
+#define mmXPB_CLG_GFX_UNITID_MAPPING7 0x00b8
+#define mmXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0
+#define mmXPB_CLG_MM_UNITID_MAPPING0 0x00b9
+#define mmXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0
+#define mmXPB_CLG_MM_UNITID_MAPPING1 0x00ba
+#define mmXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0
+#define mmXPB_CLG_MM_UNITID_MAPPING2 0x00bb
+#define mmXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0
+#define mmXPB_CLG_MM_UNITID_MAPPING3 0x00bc
+#define mmXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0
+
+
+// addressBlock: athub_rpbdec
+// base address: 0x33b0
+#define mmRPB_PASSPW_CONF 0x00cc
+#define mmRPB_PASSPW_CONF_BASE_IDX 0
+#define mmRPB_BLOCKLEVEL_CONF 0x00cd
+#define mmRPB_BLOCKLEVEL_CONF_BASE_IDX 0
+#define mmRPB_TAG_CONF 0x00cf
+#define mmRPB_TAG_CONF_BASE_IDX 0
+#define mmRPB_EFF_CNTL 0x00d1
+#define mmRPB_EFF_CNTL_BASE_IDX 0
+#define mmRPB_ARB_CNTL 0x00d2
+#define mmRPB_ARB_CNTL_BASE_IDX 0
+#define mmRPB_ARB_CNTL2 0x00d3
+#define mmRPB_ARB_CNTL2_BASE_IDX 0
+#define mmRPB_BIF_CNTL 0x00d4
+#define mmRPB_BIF_CNTL_BASE_IDX 0
+#define mmRPB_WR_SWITCH_CNTL 0x00d5
+#define mmRPB_WR_SWITCH_CNTL_BASE_IDX 0
+#define mmRPB_RD_SWITCH_CNTL 0x00d7
+#define mmRPB_RD_SWITCH_CNTL_BASE_IDX 0
+#define mmRPB_CID_QUEUE_WR 0x00d8
+#define mmRPB_CID_QUEUE_WR_BASE_IDX 0
+#define mmRPB_CID_QUEUE_RD 0x00d9
+#define mmRPB_CID_QUEUE_RD_BASE_IDX 0
+#define mmRPB_CID_QUEUE_EX 0x00dc
+#define mmRPB_CID_QUEUE_EX_BASE_IDX 0
+#define mmRPB_CID_QUEUE_EX_DATA 0x00dd
+#define mmRPB_CID_QUEUE_EX_DATA_BASE_IDX 0
+#define mmRPB_SWITCH_CNTL2 0x00de
+#define mmRPB_SWITCH_CNTL2_BASE_IDX 0
+#define mmRPB_DEINTRLV_COMBINE_CNTL 0x00df
+#define mmRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0
+#define mmRPB_VC_SWITCH_RDWR 0x00e0
+#define mmRPB_VC_SWITCH_RDWR_BASE_IDX 0
+#define mmRPB_PERFCOUNTER_LO 0x00e1
+#define mmRPB_PERFCOUNTER_LO_BASE_IDX 0
+#define mmRPB_PERFCOUNTER_HI 0x00e2
+#define mmRPB_PERFCOUNTER_HI_BASE_IDX 0
+#define mmRPB_PERFCOUNTER0_CFG 0x00e3
+#define mmRPB_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmRPB_PERFCOUNTER1_CFG 0x00e4
+#define mmRPB_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmRPB_PERFCOUNTER2_CFG 0x00e5
+#define mmRPB_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmRPB_PERFCOUNTER3_CFG 0x00e6
+#define mmRPB_PERFCOUNTER3_CFG_BASE_IDX 0
+#define mmRPB_PERFCOUNTER_RSLT_CNTL 0x00e7
+#define mmRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmRPB_RD_QUEUE_CNTL 0x00e9
+#define mmRPB_RD_QUEUE_CNTL_BASE_IDX 0
+#define mmRPB_RD_QUEUE_CNTL2 0x00ea
+#define mmRPB_RD_QUEUE_CNTL2_BASE_IDX 0
+#define mmRPB_WR_QUEUE_CNTL 0x00eb
+#define mmRPB_WR_QUEUE_CNTL_BASE_IDX 0
+#define mmRPB_WR_QUEUE_CNTL2 0x00ec
+#define mmRPB_WR_QUEUE_CNTL2_BASE_IDX 0
+#define mmRPB_EA_QUEUE_WR 0x00ed
+#define mmRPB_EA_QUEUE_WR_BASE_IDX 0
+#define mmRPB_ATS_CNTL 0x00ee
+#define mmRPB_ATS_CNTL_BASE_IDX 0
+#define mmRPB_ATS_CNTL2 0x00ef
+#define mmRPB_ATS_CNTL2_BASE_IDX 0
+#define mmRPB_SDPPORT_CNTL 0x00f0
+#define mmRPB_SDPPORT_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h
new file mode 100644
index 0000000..2968c6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h
@@ -0,0 +1,2045 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _athub_1_0_SH_MASK_HEADER
+#define _athub_1_0_SH_MASK_HEADER
+
+
+// addressBlock: athub_atsdec
+//ATC_ATS_CNTL
+#define ATC_ATS_CNTL__DISABLE_ATC__SHIFT 0x0
+#define ATC_ATS_CNTL__DISABLE_PRI__SHIFT 0x1
+#define ATC_ATS_CNTL__DISABLE_PASID__SHIFT 0x2
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB__SHIFT 0x8
+#define ATC_ATS_CNTL__INVALIDATION_LOG_KEEP_ORDER__SHIFT 0x14
+#define ATC_ATS_CNTL__TRANS_LOG_KEEP_ORDER__SHIFT 0x15
+#define ATC_ATS_CNTL__TRANS_EXE_RETURN__SHIFT 0x16
+#define ATC_ATS_CNTL__DISABLE_ATC_MASK 0x00000001L
+#define ATC_ATS_CNTL__DISABLE_PRI_MASK 0x00000002L
+#define ATC_ATS_CNTL__DISABLE_PASID_MASK 0x00000004L
+#define ATC_ATS_CNTL__CREDITS_ATS_RPB_MASK 0x00003F00L
+#define ATC_ATS_CNTL__INVALIDATION_LOG_KEEP_ORDER_MASK 0x00100000L
+#define ATC_ATS_CNTL__TRANS_LOG_KEEP_ORDER_MASK 0x00200000L
+#define ATC_ATS_CNTL__TRANS_EXE_RETURN_MASK 0x00C00000L
+//ATC_ATS_STATUS
+#define ATC_ATS_STATUS__BUSY__SHIFT 0x0
+#define ATC_ATS_STATUS__CRASHED__SHIFT 0x1
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION__SHIFT 0x2
+#define ATC_ATS_STATUS__FLUSH_INVALIDATION_OUTSTANDING__SHIFT 0x3
+#define ATC_ATS_STATUS__NONFLUSH_INVALIDATION_OUTSTANDING__SHIFT 0x6
+#define ATC_ATS_STATUS__BUSY_MASK 0x00000001L
+#define ATC_ATS_STATUS__CRASHED_MASK 0x00000002L
+#define ATC_ATS_STATUS__DEADLOCK_DETECTION_MASK 0x00000004L
+#define ATC_ATS_STATUS__FLUSH_INVALIDATION_OUTSTANDING_MASK 0x00000038L
+#define ATC_ATS_STATUS__NONFLUSH_INVALIDATION_OUTSTANDING_MASK 0x000001C0L
+//ATC_ATS_FAULT_CNTL
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG__SHIFT 0x0
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE__SHIFT 0xa
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE__SHIFT 0x14
+#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG_MASK 0x000001FFL
+#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE_MASK 0x0007FC00L
+#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE_MASK 0x1FF00000L
+//ATC_ATS_FAULT_STATUS_INFO
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO__VMID__SHIFT 0xa
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO__SHIFT 0xf
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2__SHIFT 0x10
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION__SHIFT 0x11
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST__SHIFT 0x12
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS__SHIFT 0x13
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH__SHIFT 0x18
+#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE_MASK 0x000001FFL
+#define ATC_ATS_FAULT_STATUS_INFO__VMID_MASK 0x00007C00L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO_MASK 0x00008000L
+#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2_MASK 0x00010000L
+#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION_MASK 0x00020000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST_MASK 0x00040000L
+#define ATC_ATS_FAULT_STATUS_INFO__STATUS_MASK 0x00F80000L
+#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH_MASK 0x0F000000L
+//ATC_ATS_FAULT_STATUS_ADDR
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR_MASK 0xFFFFFFFFL
+//ATC_ATS_DEFAULT_PAGE_LOW
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE__SHIFT 0x0
+#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE_MASK 0xFFFFFFFFL
+//ATC_TRANS_FAULT_RSPCNTRL
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID0__SHIFT 0x0
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID1__SHIFT 0x1
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID2__SHIFT 0x2
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID3__SHIFT 0x3
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID4__SHIFT 0x4
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID5__SHIFT 0x5
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID6__SHIFT 0x6
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID7__SHIFT 0x7
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID8__SHIFT 0x8
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID9__SHIFT 0x9
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID10__SHIFT 0xa
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID11__SHIFT 0xb
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID12__SHIFT 0xc
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID13__SHIFT 0xd
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID14__SHIFT 0xe
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID15__SHIFT 0xf
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID16__SHIFT 0x10
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID17__SHIFT 0x11
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID18__SHIFT 0x12
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID19__SHIFT 0x13
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID20__SHIFT 0x14
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID21__SHIFT 0x15
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID22__SHIFT 0x16
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID23__SHIFT 0x17
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID24__SHIFT 0x18
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID25__SHIFT 0x19
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID26__SHIFT 0x1a
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID27__SHIFT 0x1b
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID28__SHIFT 0x1c
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID29__SHIFT 0x1d
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID30__SHIFT 0x1e
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID31__SHIFT 0x1f
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID0_MASK 0x00000001L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID1_MASK 0x00000002L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID2_MASK 0x00000004L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID3_MASK 0x00000008L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID4_MASK 0x00000010L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID5_MASK 0x00000020L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID6_MASK 0x00000040L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID7_MASK 0x00000080L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID8_MASK 0x00000100L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID9_MASK 0x00000200L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID10_MASK 0x00000400L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID11_MASK 0x00000800L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID12_MASK 0x00001000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID13_MASK 0x00002000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID14_MASK 0x00004000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID15_MASK 0x00008000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID16_MASK 0x00010000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID17_MASK 0x00020000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID18_MASK 0x00040000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID19_MASK 0x00080000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID20_MASK 0x00100000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID21_MASK 0x00200000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID22_MASK 0x00400000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID23_MASK 0x00800000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID24_MASK 0x01000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID25_MASK 0x02000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID26_MASK 0x04000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID27_MASK 0x08000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID28_MASK 0x10000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID29_MASK 0x20000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID30_MASK 0x40000000L
+#define ATC_TRANS_FAULT_RSPCNTRL__VMID31_MASK 0x80000000L
+//ATC_ATS_FAULT_STATUS_INFO2
+#define ATC_ATS_FAULT_STATUS_INFO2__VF__SHIFT 0x0
+#define ATC_ATS_FAULT_STATUS_INFO2__VFID__SHIFT 0x1
+#define ATC_ATS_FAULT_STATUS_INFO2__MMHUB_INV_VMID__SHIFT 0x9
+#define ATC_ATS_FAULT_STATUS_INFO2__VF_MASK 0x00000001L
+#define ATC_ATS_FAULT_STATUS_INFO2__VFID_MASK 0x0000001EL
+#define ATC_ATS_FAULT_STATUS_INFO2__MMHUB_INV_VMID_MASK 0x00003E00L
+//ATHUB_MISC_CNTL
+#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x6
+#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x12
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x13
+#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x14
+#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x15
+#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x1b
+#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x1c
+#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x00000FC0L
+#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00040000L
+#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00080000L
+#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00100000L
+#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x07E00000L
+#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x08000000L
+#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x10000000L
+//ATC_VMID_PASID_MAPPING_UPDATE_STATUS
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED__SHIFT 0x0
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED__SHIFT 0x1
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED__SHIFT 0x2
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED__SHIFT 0x3
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED__SHIFT 0x4
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED__SHIFT 0x5
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED__SHIFT 0x6
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED__SHIFT 0x7
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED__SHIFT 0x8
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED__SHIFT 0x9
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED__SHIFT 0xa
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED__SHIFT 0xb
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED__SHIFT 0xc
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED__SHIFT 0xd
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED__SHIFT 0xe
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED__SHIFT 0xf
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID16_REMAPPING_FINISHED__SHIFT 0x10
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID17_REMAPPING_FINISHED__SHIFT 0x11
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID18_REMAPPING_FINISHED__SHIFT 0x12
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID19_REMAPPING_FINISHED__SHIFT 0x13
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID20_REMAPPING_FINISHED__SHIFT 0x14
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID21_REMAPPING_FINISHED__SHIFT 0x15
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID22_REMAPPING_FINISHED__SHIFT 0x16
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID23_REMAPPING_FINISHED__SHIFT 0x17
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID24_REMAPPING_FINISHED__SHIFT 0x18
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID25_REMAPPING_FINISHED__SHIFT 0x19
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID26_REMAPPING_FINISHED__SHIFT 0x1a
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID27_REMAPPING_FINISHED__SHIFT 0x1b
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID28_REMAPPING_FINISHED__SHIFT 0x1c
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID29_REMAPPING_FINISHED__SHIFT 0x1d
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID30_REMAPPING_FINISHED__SHIFT 0x1e
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID31_REMAPPING_FINISHED__SHIFT 0x1f
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED_MASK 0x00000001L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED_MASK 0x00000002L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED_MASK 0x00000004L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED_MASK 0x00000008L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED_MASK 0x00000010L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED_MASK 0x00000020L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED_MASK 0x00000040L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED_MASK 0x00000080L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED_MASK 0x00000100L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED_MASK 0x00000200L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED_MASK 0x00000400L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED_MASK 0x00000800L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED_MASK 0x00001000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED_MASK 0x00002000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED_MASK 0x00004000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED_MASK 0x00008000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID16_REMAPPING_FINISHED_MASK 0x00010000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID17_REMAPPING_FINISHED_MASK 0x00020000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID18_REMAPPING_FINISHED_MASK 0x00040000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID19_REMAPPING_FINISHED_MASK 0x00080000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID20_REMAPPING_FINISHED_MASK 0x00100000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID21_REMAPPING_FINISHED_MASK 0x00200000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID22_REMAPPING_FINISHED_MASK 0x00400000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID23_REMAPPING_FINISHED_MASK 0x00800000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID24_REMAPPING_FINISHED_MASK 0x01000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID25_REMAPPING_FINISHED_MASK 0x02000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID26_REMAPPING_FINISHED_MASK 0x04000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID27_REMAPPING_FINISHED_MASK 0x08000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID28_REMAPPING_FINISHED_MASK 0x10000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID29_REMAPPING_FINISHED_MASK 0x20000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID30_REMAPPING_FINISHED_MASK 0x40000000L
+#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID31_REMAPPING_FINISHED_MASK 0x80000000L
+//ATC_VMID0_PASID_MAPPING
+#define ATC_VMID0_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID0_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID0_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID0_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID1_PASID_MAPPING
+#define ATC_VMID1_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID1_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID1_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID1_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID2_PASID_MAPPING
+#define ATC_VMID2_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID2_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID2_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID2_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID3_PASID_MAPPING
+#define ATC_VMID3_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID3_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID3_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID3_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID4_PASID_MAPPING
+#define ATC_VMID4_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID4_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID4_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID4_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID5_PASID_MAPPING
+#define ATC_VMID5_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID5_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID5_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID5_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID6_PASID_MAPPING
+#define ATC_VMID6_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID6_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID6_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID6_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID7_PASID_MAPPING
+#define ATC_VMID7_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID7_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID7_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID7_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID8_PASID_MAPPING
+#define ATC_VMID8_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID8_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID8_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID8_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID9_PASID_MAPPING
+#define ATC_VMID9_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID9_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID9_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID9_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID10_PASID_MAPPING
+#define ATC_VMID10_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID10_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID10_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID10_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID11_PASID_MAPPING
+#define ATC_VMID11_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID11_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID11_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID11_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID12_PASID_MAPPING
+#define ATC_VMID12_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID12_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID12_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID12_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID13_PASID_MAPPING
+#define ATC_VMID13_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID13_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID13_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID13_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID14_PASID_MAPPING
+#define ATC_VMID14_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID14_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID14_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID14_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID15_PASID_MAPPING
+#define ATC_VMID15_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID15_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID15_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID15_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_ATS_VMID_STATUS
+#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING__SHIFT 0x0
+#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING__SHIFT 0x1
+#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING__SHIFT 0x2
+#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING__SHIFT 0x3
+#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING__SHIFT 0x4
+#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING__SHIFT 0x5
+#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING__SHIFT 0x6
+#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING__SHIFT 0x7
+#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING__SHIFT 0x8
+#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING__SHIFT 0x9
+#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING__SHIFT 0xa
+#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING__SHIFT 0xb
+#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING__SHIFT 0xc
+#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING__SHIFT 0xd
+#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING__SHIFT 0xe
+#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING__SHIFT 0xf
+#define ATC_ATS_VMID_STATUS__VMID16_OUTSTANDING__SHIFT 0x10
+#define ATC_ATS_VMID_STATUS__VMID17_OUTSTANDING__SHIFT 0x11
+#define ATC_ATS_VMID_STATUS__VMID18_OUTSTANDING__SHIFT 0x12
+#define ATC_ATS_VMID_STATUS__VMID19_OUTSTANDING__SHIFT 0x13
+#define ATC_ATS_VMID_STATUS__VMID20_OUTSTANDING__SHIFT 0x14
+#define ATC_ATS_VMID_STATUS__VMID21_OUTSTANDING__SHIFT 0x15
+#define ATC_ATS_VMID_STATUS__VMID22_OUTSTANDING__SHIFT 0x16
+#define ATC_ATS_VMID_STATUS__VMID23_OUTSTANDING__SHIFT 0x17
+#define ATC_ATS_VMID_STATUS__VMID24_OUTSTANDING__SHIFT 0x18
+#define ATC_ATS_VMID_STATUS__VMID25_OUTSTANDING__SHIFT 0x19
+#define ATC_ATS_VMID_STATUS__VMID26_OUTSTANDING__SHIFT 0x1a
+#define ATC_ATS_VMID_STATUS__VMID27_OUTSTANDING__SHIFT 0x1b
+#define ATC_ATS_VMID_STATUS__VMID28_OUTSTANDING__SHIFT 0x1c
+#define ATC_ATS_VMID_STATUS__VMID29_OUTSTANDING__SHIFT 0x1d
+#define ATC_ATS_VMID_STATUS__VMID30_OUTSTANDING__SHIFT 0x1e
+#define ATC_ATS_VMID_STATUS__VMID31_OUTSTANDING__SHIFT 0x1f
+#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING_MASK 0x00000001L
+#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING_MASK 0x00000002L
+#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING_MASK 0x00000004L
+#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING_MASK 0x00000008L
+#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING_MASK 0x00000010L
+#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING_MASK 0x00000020L
+#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING_MASK 0x00000040L
+#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING_MASK 0x00000080L
+#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING_MASK 0x00000100L
+#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING_MASK 0x00000200L
+#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING_MASK 0x00000400L
+#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING_MASK 0x00000800L
+#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING_MASK 0x00001000L
+#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING_MASK 0x00002000L
+#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING_MASK 0x00004000L
+#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING_MASK 0x00008000L
+#define ATC_ATS_VMID_STATUS__VMID16_OUTSTANDING_MASK 0x00010000L
+#define ATC_ATS_VMID_STATUS__VMID17_OUTSTANDING_MASK 0x00020000L
+#define ATC_ATS_VMID_STATUS__VMID18_OUTSTANDING_MASK 0x00040000L
+#define ATC_ATS_VMID_STATUS__VMID19_OUTSTANDING_MASK 0x00080000L
+#define ATC_ATS_VMID_STATUS__VMID20_OUTSTANDING_MASK 0x00100000L
+#define ATC_ATS_VMID_STATUS__VMID21_OUTSTANDING_MASK 0x00200000L
+#define ATC_ATS_VMID_STATUS__VMID22_OUTSTANDING_MASK 0x00400000L
+#define ATC_ATS_VMID_STATUS__VMID23_OUTSTANDING_MASK 0x00800000L
+#define ATC_ATS_VMID_STATUS__VMID24_OUTSTANDING_MASK 0x01000000L
+#define ATC_ATS_VMID_STATUS__VMID25_OUTSTANDING_MASK 0x02000000L
+#define ATC_ATS_VMID_STATUS__VMID26_OUTSTANDING_MASK 0x04000000L
+#define ATC_ATS_VMID_STATUS__VMID27_OUTSTANDING_MASK 0x08000000L
+#define ATC_ATS_VMID_STATUS__VMID28_OUTSTANDING_MASK 0x10000000L
+#define ATC_ATS_VMID_STATUS__VMID29_OUTSTANDING_MASK 0x20000000L
+#define ATC_ATS_VMID_STATUS__VMID30_OUTSTANDING_MASK 0x40000000L
+#define ATC_ATS_VMID_STATUS__VMID31_OUTSTANDING_MASK 0x80000000L
+//ATC_ATS_GFX_ATCL2_STATUS
+#define ATC_ATS_GFX_ATCL2_STATUS__POWERED_DOWN__SHIFT 0x0
+#define ATC_ATS_GFX_ATCL2_STATUS__POWERED_DOWN_MASK 0x00000001L
+//ATC_PERFCOUNTER0_CFG
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER1_CFG
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER2_CFG
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER3_CFG
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define ATC_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define ATC_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//ATC_PERFCOUNTER_RSLT_CNTL
+#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//ATC_PERFCOUNTER_LO
+#define ATC_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_PERFCOUNTER_HI
+#define ATC_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//ATHUB_PCIE_ATS_CNTL
+#define ATHUB_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_PASID_CNTL
+#define ATHUB_PCIE_PASID_CNTL__PASID_EN__SHIFT 0x10
+#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x11
+#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x12
+#define ATHUB_PCIE_PASID_CNTL__PASID_EN_MASK 0x00010000L
+#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x00020000L
+#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x00040000L
+//ATHUB_PCIE_PAGE_REQ_CNTL
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x00000001L
+#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x00000002L
+//ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC
+#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
+#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
+//ATHUB_COMMAND
+#define ATHUB_COMMAND__BUS_MASTER_EN__SHIFT 0x2
+#define ATHUB_COMMAND__BUS_MASTER_EN_MASK 0x00000004L
+//ATHUB_PCIE_ATS_CNTL_VF_0
+#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_1
+#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_2
+#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_3
+#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_4
+#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_5
+#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_6
+#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_7
+#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_8
+#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_9
+#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_10
+#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_11
+#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_12
+#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_13
+#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_14
+#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_PCIE_ATS_CNTL_VF_15
+#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//ATHUB_MEM_POWER_LS
+#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATS_IH_CREDIT
+#define ATS_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define ATS_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define ATS_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define ATS_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//ATHUB_IH_CREDIT
+#define ATHUB_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define ATHUB_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define ATHUB_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define ATHUB_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//ATC_VMID16_PASID_MAPPING
+#define ATC_VMID16_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID16_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID16_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID16_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID16_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID16_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID17_PASID_MAPPING
+#define ATC_VMID17_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID17_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID17_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID17_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID17_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID17_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID18_PASID_MAPPING
+#define ATC_VMID18_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID18_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID18_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID18_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID18_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID18_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID19_PASID_MAPPING
+#define ATC_VMID19_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID19_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID19_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID19_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID19_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID19_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID20_PASID_MAPPING
+#define ATC_VMID20_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID20_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID20_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID20_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID20_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID20_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID21_PASID_MAPPING
+#define ATC_VMID21_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID21_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID21_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID21_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID21_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID21_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID22_PASID_MAPPING
+#define ATC_VMID22_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID22_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID22_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID22_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID22_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID22_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID23_PASID_MAPPING
+#define ATC_VMID23_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID23_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID23_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID23_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID23_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID23_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID24_PASID_MAPPING
+#define ATC_VMID24_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID24_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID24_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID24_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID24_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID24_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID25_PASID_MAPPING
+#define ATC_VMID25_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID25_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID25_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID25_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID25_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID25_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID26_PASID_MAPPING
+#define ATC_VMID26_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID26_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID26_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID26_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID26_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID26_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID27_PASID_MAPPING
+#define ATC_VMID27_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID27_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID27_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID27_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID27_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID27_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID28_PASID_MAPPING
+#define ATC_VMID28_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID28_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID28_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID28_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID28_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID28_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID29_PASID_MAPPING
+#define ATC_VMID29_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID29_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID29_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID29_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID29_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID29_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID30_PASID_MAPPING
+#define ATC_VMID30_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID30_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID30_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID30_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID30_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID30_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_VMID31_PASID_MAPPING
+#define ATC_VMID31_PASID_MAPPING__PASID__SHIFT 0x0
+#define ATC_VMID31_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
+#define ATC_VMID31_PASID_MAPPING__VALID__SHIFT 0x1f
+#define ATC_VMID31_PASID_MAPPING__PASID_MASK 0x0000FFFFL
+#define ATC_VMID31_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
+#define ATC_VMID31_PASID_MAPPING__VALID_MASK 0x80000000L
+//ATC_ATS_MMHUB_ATCL2_STATUS
+#define ATC_ATS_MMHUB_ATCL2_STATUS__POWERED_DOWN__SHIFT 0x0
+#define ATC_ATS_MMHUB_ATCL2_STATUS__POWERED_DOWN_MASK 0x00000001L
+//ATHUB_SHARED_VIRT_RESET_REQ
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//ATHUB_SHARED_ACTIVE_FCN_ID
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define ATHUB_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//ATC_ATS_SDPPORT_CNTL
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE__SHIFT 0x0
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE__SHIFT 0x1
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD__SHIFT 0x3
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE__SHIFT 0x7
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK__SHIFT 0x8
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD__SHIFT 0x9
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE__SHIFT 0xd
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_RDY_MODE__SHIFT 0xe
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_MMHUB_RDY_MODE__SHIFT 0xf
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN__SHIFT 0x10
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV__SHIFT 0x11
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN__SHIFT 0x12
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x13
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN__SHIFT 0x14
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV__SHIFT 0x15
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN__SHIFT 0x16
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV__SHIFT 0x17
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN__SHIFT 0x18
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV__SHIFT 0x19
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE_MASK 0x00000001L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE_MASK 0x00000006L
+#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD_MASK 0x00000078L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE_MASK 0x00000080L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK_MASK 0x00000100L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD_MASK 0x00001E00L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE_MASK 0x00002000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_RDY_MODE_MASK 0x00004000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_MMHUB_RDY_MODE_MASK 0x00008000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN_MASK 0x00010000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV_MASK 0x00020000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN_MASK 0x00040000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV_MASK 0x00080000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN_MASK 0x00100000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV_MASK 0x00200000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN_MASK 0x00400000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV_MASK 0x00800000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN_MASK 0x01000000L
+#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV_MASK 0x02000000L
+//ATC_ATS_VMID_SNAPSHOT_GFX_STAT
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID0__SHIFT 0x0
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID1__SHIFT 0x1
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID2__SHIFT 0x2
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID3__SHIFT 0x3
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID4__SHIFT 0x4
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID5__SHIFT 0x5
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID6__SHIFT 0x6
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID7__SHIFT 0x7
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID8__SHIFT 0x8
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID9__SHIFT 0x9
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID10__SHIFT 0xa
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID11__SHIFT 0xb
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID12__SHIFT 0xc
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID13__SHIFT 0xd
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID14__SHIFT 0xe
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID15__SHIFT 0xf
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID0_MASK 0x00000001L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID1_MASK 0x00000002L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID2_MASK 0x00000004L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID3_MASK 0x00000008L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID4_MASK 0x00000010L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID5_MASK 0x00000020L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID6_MASK 0x00000040L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID7_MASK 0x00000080L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID8_MASK 0x00000100L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID9_MASK 0x00000200L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID10_MASK 0x00000400L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID11_MASK 0x00000800L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID12_MASK 0x00001000L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID13_MASK 0x00002000L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID14_MASK 0x00004000L
+#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID15_MASK 0x00008000L
+//ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID0__SHIFT 0x0
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID1__SHIFT 0x1
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID2__SHIFT 0x2
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID3__SHIFT 0x3
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID4__SHIFT 0x4
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID5__SHIFT 0x5
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID6__SHIFT 0x6
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID7__SHIFT 0x7
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID8__SHIFT 0x8
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID9__SHIFT 0x9
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID10__SHIFT 0xa
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID11__SHIFT 0xb
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID12__SHIFT 0xc
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID13__SHIFT 0xd
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID14__SHIFT 0xe
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID15__SHIFT 0xf
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID0_MASK 0x00000001L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID1_MASK 0x00000002L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID2_MASK 0x00000004L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID3_MASK 0x00000008L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID4_MASK 0x00000010L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID5_MASK 0x00000020L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID6_MASK 0x00000040L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID7_MASK 0x00000080L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID8_MASK 0x00000100L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID9_MASK 0x00000200L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID10_MASK 0x00000400L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID11_MASK 0x00000800L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID12_MASK 0x00001000L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID13_MASK 0x00002000L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID14_MASK 0x00004000L
+#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID15_MASK 0x00008000L
+
+
+// addressBlock: athub_xpbdec
+//XPB_RTR_SRC_APRTR0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR1
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR2
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR3
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR4
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR5
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR6
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR7
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR8
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_SRC_APRTR9
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0
+#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR0
+#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR1
+#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR2
+#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_XDMA_RTR_SRC_APRTR3
+#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
+#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
+//XPB_RTR_DEST_MAP0
+#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP1
+#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP2
+#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP3
+#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP4
+#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP5
+#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP6
+#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP7
+#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP8
+#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L
+//XPB_RTR_DEST_MAP9
+#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1
+#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a
+#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
+#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L
+#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP0
+#define XPB_XDMA_RTR_DEST_MAP0__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP0__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP1
+#define XPB_XDMA_RTR_DEST_MAP1__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP1__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP2
+#define XPB_XDMA_RTR_DEST_MAP2__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP2__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
+//XPB_XDMA_RTR_DEST_MAP3
+#define XPB_XDMA_RTR_DEST_MAP3__NMR__SHIFT 0x0
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
+#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
+#define XPB_XDMA_RTR_DEST_MAP3__NMR_MASK 0x00000001L
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
+#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
+#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
+//XPB_CLG_CFG0
+#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG1
+#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG2
+#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG3
+#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG4
+#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG5
+#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG6
+#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_CFG7
+#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0
+#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7
+#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa
+#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL
+#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
+#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L
+//XPB_CLG_EXTRA
+#define XPB_CLG_EXTRA__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA__CMP0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA__VLD0__SHIFT 0xb
+#define XPB_CLG_EXTRA__CLG0_NUM__SHIFT 0xc
+#define XPB_CLG_EXTRA__CMP1_HIGH__SHIFT 0xf
+#define XPB_CLG_EXTRA__CMP1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA__VLD1__SHIFT 0x1a
+#define XPB_CLG_EXTRA__CLG1_NUM__SHIFT 0x1b
+#define XPB_CLG_EXTRA__CMP0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA__CMP0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA__VLD0_MASK 0x00000800L
+#define XPB_CLG_EXTRA__CLG0_NUM_MASK 0x00007000L
+#define XPB_CLG_EXTRA__CMP1_HIGH_MASK 0x001F8000L
+#define XPB_CLG_EXTRA__CMP1_LOW_MASK 0x03E00000L
+#define XPB_CLG_EXTRA__VLD1_MASK 0x04000000L
+#define XPB_CLG_EXTRA__CLG1_NUM_MASK 0x38000000L
+//XPB_CLG_EXTRA_MSK
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xb
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x11
+#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x0001F800L
+#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x003E0000L
+//XPB_LB_ADDR
+#define XPB_LB_ADDR__CMP0__SHIFT 0x0
+#define XPB_LB_ADDR__MASK0__SHIFT 0xa
+#define XPB_LB_ADDR__CMP1__SHIFT 0x14
+#define XPB_LB_ADDR__MASK1__SHIFT 0x1a
+#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL
+#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L
+#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L
+#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L
+//XPB_WCB_STS
+#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17
+#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL
+#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L
+#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L
+//XPB_HST_CFG
+#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0
+#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L
+//XPB_P2P_BAR_CFG
+#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0
+#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4
+#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6
+#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8
+#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa
+#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc
+#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL
+#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
+#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
+#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
+#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
+#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
+#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
+#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
+#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
+//XPB_P2P_BAR0
+#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR0__VALID__SHIFT 0xc
+#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR0__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR0__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR0__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR1
+#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR1__VALID__SHIFT 0xc
+#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR1__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR1__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR1__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR2
+#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR2__VALID__SHIFT 0xc
+#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR2__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR2__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR2__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR3
+#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR3__VALID__SHIFT 0xc
+#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR3__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR3__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR3__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR4
+#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR4__VALID__SHIFT 0xc
+#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR4__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR4__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR4__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR5
+#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR5__VALID__SHIFT 0xc
+#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR5__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR5__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR5__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR6
+#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR6__VALID__SHIFT 0xc
+#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR6__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR6__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR6__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR7
+#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0
+#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4
+#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR7__VALID__SHIFT 0xc
+#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR7__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL
+#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L
+#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR7__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR7__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_SETUP
+#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8
+#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc
+#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe
+#define XPB_P2P_BAR_SETUP__RESERVED__SHIFT 0xf
+#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10
+#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL
+#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L
+#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
+#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
+#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
+#define XPB_P2P_BAR_SETUP__RESERVED_MASK 0x00008000L
+#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L
+//XPB_P2P_BAR_DELTA_ABOVE
+#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L
+//XPB_P2P_BAR_DELTA_BELOW
+#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8
+#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL
+#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L
+//XPB_PEER_SYS_BAR0
+#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR1
+#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR2
+#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR3
+#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR4
+#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR5
+#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR6
+#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR7
+#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR8
+#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL
+//XPB_PEER_SYS_BAR9
+#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0
+#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1
+#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
+#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR0
+#define XPB_XDMA_PEER_SYS_BAR0__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR0__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR0__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR1
+#define XPB_XDMA_PEER_SYS_BAR1__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR1__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR1__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR2
+#define XPB_XDMA_PEER_SYS_BAR2__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR2__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR2__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
+//XPB_XDMA_PEER_SYS_BAR3
+#define XPB_XDMA_PEER_SYS_BAR3__VALID__SHIFT 0x0
+#define XPB_XDMA_PEER_SYS_BAR3__ADDR__SHIFT 0x1
+#define XPB_XDMA_PEER_SYS_BAR3__VALID_MASK 0x00000001L
+#define XPB_XDMA_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
+//XPB_CLK_GAT
+#define XPB_CLK_GAT__ONDLY__SHIFT 0x0
+#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6
+#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc
+#define XPB_CLK_GAT__ENABLE__SHIFT 0x12
+#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13
+#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL
+#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L
+#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L
+#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L
+#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
+//XPB_INTF_CFG
+#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8
+#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10
+#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL__SHIFT 0x17
+#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL__SHIFT 0x18
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL__SHIFT 0x19
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL__SHIFT 0x1a
+#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b
+#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d
+#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e
+#define XPB_INTF_CFG__XSP_ORDERING_VAL__SHIFT 0x1f
+#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L
+#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L
+#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL_MASK 0x00800000L
+#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL_MASK 0x01000000L
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL_MASK 0x02000000L
+#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL_MASK 0x04000000L
+#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
+#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
+#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
+#define XPB_INTF_CFG__XSP_ORDERING_VAL_MASK 0x80000000L
+//XPB_INTF_STS
+#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0
+#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10
+#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11
+#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12
+#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13
+#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL
+#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L
+#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
+#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
+#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
+#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
+#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L
+//XPB_PIPE_STS
+#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16
+#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18
+#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
+#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL
+#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L
+#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
+#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
+#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
+#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
+#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
+#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
+#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
+#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
+#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
+#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L
+//XPB_SUB_CTRL
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9
+#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa
+#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb
+#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc
+#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd
+#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe
+#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf
+#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10
+#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11
+#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12
+#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13
+#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
+#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
+#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
+#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
+#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
+#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
+#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
+#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
+#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
+#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
+#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
+#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
+#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
+#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
+#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
+#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
+#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
+#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
+#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
+#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
+//XPB_MAP_INVERT_FLUSH_NUM_LSB
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0
+#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL
+//XPB_PERF_KNOBS
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc
+#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL
+#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L
+#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L
+//XPB_STICKY
+#define XPB_STICKY__BITS__SHIFT 0x0
+#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL
+//XPB_STICKY_W1C
+#define XPB_STICKY_W1C__BITS__SHIFT 0x0
+#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL
+//XPB_MISC_CFG
+#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0
+#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8
+#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10
+#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18
+#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f
+#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL
+#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L
+#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L
+#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L
+#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
+//XPB_INTF_CFG2
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0
+#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL
+//XPB_CLG_EXTRA_RD
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb
+#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf
+#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15
+#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a
+#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b
+#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L
+#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L
+#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L
+#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L
+#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L
+#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L
+//XPB_CLG_EXTRA_MSK_RD
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL
+#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L
+#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L
+//XPB_CLG_GFX_MATCH
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x6
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0xc
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x12
+#define XPB_CLG_GFX_MATCH__FARBIRC0_VLD__SHIFT 0x18
+#define XPB_CLG_GFX_MATCH__FARBIRC1_VLD__SHIFT 0x19
+#define XPB_CLG_GFX_MATCH__FARBIRC2_VLD__SHIFT 0x1a
+#define XPB_CLG_GFX_MATCH__FARBIRC3_VLD__SHIFT 0x1b
+#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x0000003FL
+#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x00000FC0L
+#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x0003F000L
+#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0x00FC0000L
+#define XPB_CLG_GFX_MATCH__FARBIRC0_VLD_MASK 0x01000000L
+#define XPB_CLG_GFX_MATCH__FARBIRC1_VLD_MASK 0x02000000L
+#define XPB_CLG_GFX_MATCH__FARBIRC2_VLD_MASK 0x04000000L
+#define XPB_CLG_GFX_MATCH__FARBIRC3_VLD_MASK 0x08000000L
+//XPB_CLG_GFX_MATCH_MSK
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0xc
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x12
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x0003F000L
+#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0x00FC0000L
+//XPB_CLG_MM_MATCH
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x6
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0xc
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x12
+#define XPB_CLG_MM_MATCH__FARBIRC0_VLD__SHIFT 0x18
+#define XPB_CLG_MM_MATCH__FARBIRC1_VLD__SHIFT 0x19
+#define XPB_CLG_MM_MATCH__FARBIRC2_VLD__SHIFT 0x1a
+#define XPB_CLG_MM_MATCH__FARBIRC3_VLD__SHIFT 0x1b
+#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x0000003FL
+#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x00000FC0L
+#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x0003F000L
+#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0x00FC0000L
+#define XPB_CLG_MM_MATCH__FARBIRC0_VLD_MASK 0x01000000L
+#define XPB_CLG_MM_MATCH__FARBIRC1_VLD_MASK 0x02000000L
+#define XPB_CLG_MM_MATCH__FARBIRC2_VLD_MASK 0x04000000L
+#define XPB_CLG_MM_MATCH__FARBIRC3_VLD_MASK 0x08000000L
+//XPB_CLG_MM_MATCH_MSK
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0xc
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x12
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x0003F000L
+#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0x00FC0000L
+//XPB_CLG_GFX_UNITID_MAPPING0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING1
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING2
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING3
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING4
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING5
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_GFX_UNITID_MAPPING7
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING1
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING2
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
+//XPB_CLG_MM_UNITID_MAPPING3
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
+#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
+#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
+
+
+// addressBlock: athub_rpbdec
+//RPB_PASSPW_CONF
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE__SHIFT 0x2
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0x3
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0x4
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x5
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0x6
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x7
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE__SHIFT 0x8
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x9
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0xa
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN__SHIFT 0xb
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xc
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0xe
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0xf
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x10
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x11
+#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L
+#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_MASK 0x00000004L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000008L
+#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00000010L
+#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00000020L
+#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00000040L
+#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00000080L
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_MASK 0x00000100L
+#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00000200L
+#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00000400L
+#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN_MASK 0x00000800L
+#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00001000L
+#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00004000L
+#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00008000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00010000L
+#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00020000L
+//RPB_BLOCKLEVEL_CONF
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0
+#define RPB_BLOCKLEVEL_CONF__ATC_TR_BLOCKLEVEL__SHIFT 0x2
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x4
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x6
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0x8
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xa
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0xc
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xe
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xf
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x11
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L
+#define RPB_BLOCKLEVEL_CONF__ATC_TR_BLOCKLEVEL_MASK 0x0000000CL
+#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000030L
+#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x000000C0L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00000300L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x00000C00L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00003000L
+#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00004000L
+#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00008000L
+#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L
+#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00020000L
+//RPB_TAG_CONF
+#define RPB_TAG_CONF__RPB_ATS_TR__SHIFT 0x0
+#define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0x8
+#define RPB_TAG_CONF__RPB_ATS_PR__SHIFT 0x10
+#define RPB_TAG_CONF__RPB_ATS_TR_MASK 0x000000FFL
+#define RPB_TAG_CONF__RPB_IO_WR_MASK 0x0000FF00L
+#define RPB_TAG_CONF__RPB_ATS_PR_MASK 0x00FF0000L
+//RPB_EFF_CNTL
+#define RPB_EFF_CNTL__WR_LAZY_TIMER__SHIFT 0x0
+#define RPB_EFF_CNTL__RD_LAZY_TIMER__SHIFT 0x8
+#define RPB_EFF_CNTL__WR_LAZY_TIMER_MASK 0x000000FFL
+#define RPB_EFF_CNTL__RD_LAZY_TIMER_MASK 0x0000FF00L
+//RPB_ARB_CNTL
+#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19
+#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L
+#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L
+#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L
+//RPB_ARB_CNTL2
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10
+#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L
+//RPB_BIF_CNTL
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8
+#define RPB_BIF_CNTL__ARB_MODE__SHIFT 0x10
+#define RPB_BIF_CNTL__DRAIN_VC_NUM__SHIFT 0x11
+#define RPB_BIF_CNTL__SWITCH_ENABLE__SHIFT 0x12
+#define RPB_BIF_CNTL__SWITCH_THRESHOLD__SHIFT 0x13
+#define RPB_BIF_CNTL__PAGE_PRI_EN__SHIFT 0x1b
+#define RPB_BIF_CNTL__TR_PRI_EN__SHIFT 0x1c
+#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE__SHIFT 0x1d
+#define RPB_BIF_CNTL__PARITY_CHECK_EN__SHIFT 0x1e
+#define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL
+#define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L
+#define RPB_BIF_CNTL__ARB_MODE_MASK 0x00010000L
+#define RPB_BIF_CNTL__DRAIN_VC_NUM_MASK 0x00020000L
+#define RPB_BIF_CNTL__SWITCH_ENABLE_MASK 0x00040000L
+#define RPB_BIF_CNTL__SWITCH_THRESHOLD_MASK 0x07F80000L
+#define RPB_BIF_CNTL__PAGE_PRI_EN_MASK 0x08000000L
+#define RPB_BIF_CNTL__TR_PRI_EN_MASK 0x10000000L
+#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE_MASK 0x20000000L
+#define RPB_BIF_CNTL__PARITY_CHECK_EN_MASK 0x40000000L
+//RPB_WR_SWITCH_CNTL
+#define RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x0
+#define RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x7
+#define RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0xe
+#define RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x15
+#define RPB_WR_SWITCH_CNTL__SWITCH_NUM_MODE__SHIFT 0x1c
+#define RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x0000007FL
+#define RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x00003F80L
+#define RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x001FC000L
+#define RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0x0FE00000L
+#define RPB_WR_SWITCH_CNTL__SWITCH_NUM_MODE_MASK 0x10000000L
+//RPB_RD_SWITCH_CNTL
+#define RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x0
+#define RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x7
+#define RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0xe
+#define RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x15
+#define RPB_RD_SWITCH_CNTL__SWITCH_NUM_MODE__SHIFT 0x1c
+#define RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x0000007FL
+#define RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x00003F80L
+#define RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x001FC000L
+#define RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0x0FE00000L
+#define RPB_RD_SWITCH_CNTL__SWITCH_NUM_MODE_MASK 0x10000000L
+//RPB_CID_QUEUE_WR
+#define RPB_CID_QUEUE_WR__CLIENT_ID_LOW__SHIFT 0x0
+#define RPB_CID_QUEUE_WR__CLIENT_ID_HIGH__SHIFT 0x5
+#define RPB_CID_QUEUE_WR__UPDATE_MODE__SHIFT 0xb
+#define RPB_CID_QUEUE_WR__WRITE_QUEUE__SHIFT 0xc
+#define RPB_CID_QUEUE_WR__READ_QUEUE__SHIFT 0xf
+#define RPB_CID_QUEUE_WR__UPDATE__SHIFT 0x12
+#define RPB_CID_QUEUE_WR__CLIENT_ID_LOW_MASK 0x0000001FL
+#define RPB_CID_QUEUE_WR__CLIENT_ID_HIGH_MASK 0x000007E0L
+#define RPB_CID_QUEUE_WR__UPDATE_MODE_MASK 0x00000800L
+#define RPB_CID_QUEUE_WR__WRITE_QUEUE_MASK 0x00007000L
+#define RPB_CID_QUEUE_WR__READ_QUEUE_MASK 0x00038000L
+#define RPB_CID_QUEUE_WR__UPDATE_MASK 0x00040000L
+//RPB_CID_QUEUE_RD
+#define RPB_CID_QUEUE_RD__CLIENT_ID_LOW__SHIFT 0x0
+#define RPB_CID_QUEUE_RD__CLIENT_ID_HIGH__SHIFT 0x5
+#define RPB_CID_QUEUE_RD__WRITE_QUEUE__SHIFT 0xb
+#define RPB_CID_QUEUE_RD__READ_QUEUE__SHIFT 0xe
+#define RPB_CID_QUEUE_RD__CLIENT_ID_LOW_MASK 0x0000001FL
+#define RPB_CID_QUEUE_RD__CLIENT_ID_HIGH_MASK 0x000007E0L
+#define RPB_CID_QUEUE_RD__WRITE_QUEUE_MASK 0x00003800L
+#define RPB_CID_QUEUE_RD__READ_QUEUE_MASK 0x0001C000L
+//RPB_CID_QUEUE_EX
+#define RPB_CID_QUEUE_EX__START__SHIFT 0x0
+#define RPB_CID_QUEUE_EX__OFFSET__SHIFT 0x1
+#define RPB_CID_QUEUE_EX__START_MASK 0x00000001L
+#define RPB_CID_QUEUE_EX__OFFSET_MASK 0x000001FEL
+//RPB_CID_QUEUE_EX_DATA
+#define RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES__SHIFT 0x0
+#define RPB_CID_QUEUE_EX_DATA__READ_ENTRIES__SHIFT 0x10
+#define RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES_MASK 0x0000FFFFL
+#define RPB_CID_QUEUE_EX_DATA__READ_ENTRIES_MASK 0xFFFF0000L
+//RPB_SWITCH_CNTL2
+#define RPB_SWITCH_CNTL2__RD_QUEUE4_SWITCH_NUM__SHIFT 0x0
+#define RPB_SWITCH_CNTL2__RD_QUEUE5_SWITCH_NUM__SHIFT 0x7
+#define RPB_SWITCH_CNTL2__WR_QUEUE4_SWITCH_NUM__SHIFT 0xe
+#define RPB_SWITCH_CNTL2__WR_QUEUE5_SWITCH_NUM__SHIFT 0x15
+#define RPB_SWITCH_CNTL2__RD_QUEUE4_SWITCH_NUM_MASK 0x0000007FL
+#define RPB_SWITCH_CNTL2__RD_QUEUE5_SWITCH_NUM_MASK 0x00003F80L
+#define RPB_SWITCH_CNTL2__WR_QUEUE4_SWITCH_NUM_MASK 0x001FC000L
+#define RPB_SWITCH_CNTL2__WR_QUEUE5_SWITCH_NUM_MASK 0x0FE00000L
+//RPB_DEINTRLV_COMBINE_CNTL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L
+#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L
+//RPB_VC_SWITCH_RDWR
+#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0
+#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2
+#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa
+#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L
+#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL
+#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L
+//RPB_PERFCOUNTER_LO
+#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//RPB_PERFCOUNTER_HI
+#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//RPB_PERFCOUNTER0_CFG
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER1_CFG
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER2_CFG
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER3_CFG
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//RPB_PERFCOUNTER_RSLT_CNTL
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//RPB_RD_QUEUE_CNTL
+#define RPB_RD_QUEUE_CNTL__ARB_MODE__SHIFT 0x0
+#define RPB_RD_QUEUE_CNTL__Q4_SHARED__SHIFT 0x1
+#define RPB_RD_QUEUE_CNTL__Q5_SHARED__SHIFT 0x2
+#define RPB_RD_QUEUE_CNTL__Q4_UNITID_EA_MODE__SHIFT 0x3
+#define RPB_RD_QUEUE_CNTL__Q5_UNITID_EA_MODE__SHIFT 0x4
+#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_LOW__SHIFT 0x5
+#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_HIGH__SHIFT 0xa
+#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_LOW__SHIFT 0x10
+#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_HIGH__SHIFT 0x15
+#define RPB_RD_QUEUE_CNTL__ARB_MODE_MASK 0x00000001L
+#define RPB_RD_QUEUE_CNTL__Q4_SHARED_MASK 0x00000002L
+#define RPB_RD_QUEUE_CNTL__Q5_SHARED_MASK 0x00000004L
+#define RPB_RD_QUEUE_CNTL__Q4_UNITID_EA_MODE_MASK 0x00000008L
+#define RPB_RD_QUEUE_CNTL__Q5_UNITID_EA_MODE_MASK 0x00000010L
+#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_LOW_MASK 0x000003E0L
+#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_HIGH_MASK 0x0000FC00L
+#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_LOW_MASK 0x001F0000L
+#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_HIGH_MASK 0x07E00000L
+//RPB_RD_QUEUE_CNTL2
+#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW__SHIFT 0x0
+#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH__SHIFT 0x5
+#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW__SHIFT 0xb
+#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH__SHIFT 0x10
+#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW_MASK 0x0000001FL
+#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH_MASK 0x000007E0L
+#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW_MASK 0x0000F800L
+#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH_MASK 0x003F0000L
+//RPB_WR_QUEUE_CNTL
+#define RPB_WR_QUEUE_CNTL__ARB_MODE__SHIFT 0x0
+#define RPB_WR_QUEUE_CNTL__Q4_SHARED__SHIFT 0x1
+#define RPB_WR_QUEUE_CNTL__Q5_SHARED__SHIFT 0x2
+#define RPB_WR_QUEUE_CNTL__Q4_UNITID_EA_MODE__SHIFT 0x3
+#define RPB_WR_QUEUE_CNTL__Q5_UNITID_EA_MODE__SHIFT 0x4
+#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_LOW__SHIFT 0x5
+#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_HIGH__SHIFT 0xa
+#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_LOW__SHIFT 0x10
+#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_HIGH__SHIFT 0x15
+#define RPB_WR_QUEUE_CNTL__ARB_MODE_MASK 0x00000001L
+#define RPB_WR_QUEUE_CNTL__Q4_SHARED_MASK 0x00000002L
+#define RPB_WR_QUEUE_CNTL__Q5_SHARED_MASK 0x00000004L
+#define RPB_WR_QUEUE_CNTL__Q4_UNITID_EA_MODE_MASK 0x00000008L
+#define RPB_WR_QUEUE_CNTL__Q5_UNITID_EA_MODE_MASK 0x00000010L
+#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_LOW_MASK 0x000003E0L
+#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_HIGH_MASK 0x0000FC00L
+#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_LOW_MASK 0x001F0000L
+#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_HIGH_MASK 0x07E00000L
+//RPB_WR_QUEUE_CNTL2
+#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW__SHIFT 0x0
+#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH__SHIFT 0x5
+#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW__SHIFT 0xb
+#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH__SHIFT 0x10
+#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW_MASK 0x0000001FL
+#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH_MASK 0x000007E0L
+#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW_MASK 0x0000F800L
+#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH_MASK 0x003F0000L
+//RPB_EA_QUEUE_WR
+#define RPB_EA_QUEUE_WR__EA_NUMBER__SHIFT 0x0
+#define RPB_EA_QUEUE_WR__WRITE_QUEUE__SHIFT 0x5
+#define RPB_EA_QUEUE_WR__READ_QUEUE__SHIFT 0x8
+#define RPB_EA_QUEUE_WR__UPDATE__SHIFT 0xb
+#define RPB_EA_QUEUE_WR__EA_NUMBER_MASK 0x0000001FL
+#define RPB_EA_QUEUE_WR__WRITE_QUEUE_MASK 0x000000E0L
+#define RPB_EA_QUEUE_WR__READ_QUEUE_MASK 0x00000700L
+#define RPB_EA_QUEUE_WR__UPDATE_MASK 0x00000800L
+//RPB_ATS_CNTL
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2
+#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7
+#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM__SHIFT 0xf
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13
+#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17
+#define RPB_ATS_CNTL__INVAL_COM_CMD__SHIFT 0x19
+#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L
+#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L
+#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL
+#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L
+#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM_MASK 0x00078000L
+#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L
+#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L
+#define RPB_ATS_CNTL__INVAL_COM_CMD_MASK 0x7E000000L
+//RPB_ATS_CNTL2
+#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x0
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0x6
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0xc
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0xf
+#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x12
+#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x0000003FL
+#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x00000FC0L
+#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x00007000L
+#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00038000L
+#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x000C0000L
+//RPB_SDPPORT_CNTL
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6
+#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE__SHIFT 0xa
+#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE__SHIFT 0xb
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT__SHIFT 0xd
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER__SHIFT 0xe
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS__SHIFT 0xf
+#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD__SHIFT 0x10
+#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE__SHIFT 0x14
+#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK__SHIFT 0x15
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
+#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L
+#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L
+#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE_MASK 0x00000400L
+#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE_MASK 0x00001800L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT_MASK 0x00002000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER_MASK 0x00004000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS_MASK 0x00008000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD_MASK 0x000F0000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE_MASK 0x00100000L
+#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK_MASK 0x00200000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
+#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index f730d06..f730d06 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
index 6d3162c..6d3162c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 4ccf968..4ccf968 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index b28d4b6..e2a2f11 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -9364,17 +9364,31 @@
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x14
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x18
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x1c
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00100000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x01000000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x10000000L
//HUBPREQ0_DCSURF_FLIP_CONTROL
#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
index 663d3af..5bf84c6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
@@ -436,7 +436,6 @@
#define mmTA_CNTL_DEFAULT 0x8004d850
#define mmTA_CNTL_AUX_DEFAULT 0x00000000
#define mmTA_RESERVED_010C_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_DEFAULT 0x40000040
#define mmTA_STATUS_DEFAULT 0x00000000
#define mmTA_SCRATCH_DEFAULT 0x00000000
@@ -1700,7 +1699,6 @@
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
#define mmDB_STENCIL_WRITE_BASE_HI_DEFAULT 0x00000000
#define mmDB_DFSM_CONTROL_DEFAULT 0x00000000
-#define mmDB_RENDER_FILTER_DEFAULT 0x00000000
#define mmDB_Z_INFO2_DEFAULT 0x00000000
#define mmDB_STENCIL_INFO2_DEFAULT 0x00000000
#define mmTA_BC_BASE_ADDR_DEFAULT 0x00000000
@@ -1806,8 +1804,6 @@
#define mmPA_SC_RIGHT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_LEFT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_HORIZ_GRID_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_LR_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_TB_DEFAULT 0x00000000
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_DEFAULT 0x00000000
#define mmCB_BLEND_RED_DEFAULT 0x00000000
#define mmCB_BLEND_GREEN_DEFAULT 0x00000000
@@ -2072,7 +2068,6 @@
#define mmVGT_EVENT_INITIATOR_DEFAULT 0x00000000
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_DEFAULT 0x00000000
#define mmVGT_DRAW_PAYLOAD_CNTL_DEFAULT 0x00000000
-#define mmVGT_INDEX_PAYLOAD_CNTL_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_0_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_1_DEFAULT 0x00000000
#define mmVGT_ESGS_RING_ITEMSIZE_DEFAULT 0x00000000
@@ -2490,7 +2485,6 @@
#define mmWD_INDEX_BUF_BASE_DEFAULT 0x00000000
#define mmWD_INDEX_BUF_BASE_HI_DEFAULT 0x00000000
#define mmIA_MULTI_VGT_PARAM_DEFAULT 0x006000ff
-#define mmVGT_OBJECT_ID_DEFAULT 0x00000000
#define mmVGT_INSTANCE_BASE_ID_DEFAULT 0x00000000
#define mmPA_SU_LINE_STIPPLE_VALUE_DEFAULT 0x00000000
#define mmPA_SC_LINE_STIPPLE_STATE_DEFAULT 0x00000000
@@ -2534,7 +2528,6 @@
#define mmSQC_WRITEBACK_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_UCONFIG_DEFAULT 0x40000040
#define mmDB_OCCLUSION_COUNT0_LOW_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT0_HI_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT1_LOW_DEFAULT 0x00000000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index e6d6171..4ce090d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -841,8 +841,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3330,8 +3328,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3542,10 +3538,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4074,8 +4066,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -4908,8 +4898,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -4996,8 +4984,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 5c5e9b4..2e1214b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -4576,15 +4576,6 @@
//TA_RESERVED_010C
#define TA_RESERVED_010C__Unused__SHIFT 0x0
#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
-//TA_GRAD_ADJ
-#define TA_GRAD_ADJ__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ__GRAD_ADJ_3_MASK 0xFF000000L
//TA_STATUS
#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
@@ -14459,9 +14450,6 @@
#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
-//DB_RENDER_FILTER
-#define DB_RENDER_FILTER__PS_INVOKE_MASK__SHIFT 0x0
-#define DB_RENDER_FILTER__PS_INVOKE_MASK_MASK 0x0000FFFFL
//DB_Z_INFO2
#define DB_Z_INFO2__EPITCH__SHIFT 0x0
#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
@@ -14959,11 +14947,9 @@
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
//CP_PERFMON_CNTX_CNTL
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
@@ -15003,20 +14989,6 @@
#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_LR
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT__SHIFT 0x10
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT__SHIFT 0x18
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT_MASK 0x0000FF00L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT_MASK 0x00FF0000L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_TB
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT_MASK 0x0000FF00L
//VGT_MULTI_PRIM_IB_RESET_INDX
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
@@ -17010,13 +16982,11 @@
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
//PA_CL_OBJPRIM_ID_CNTL
#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
@@ -17345,9 +17315,6 @@
#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
-//VGT_INDEX_PAYLOAD_CNTL
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN__SHIFT 0x0
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN_MASK 0x00000001L
//VGT_INSTANCE_STEP_RATE_0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
@@ -19849,9 +19816,6 @@
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
-//VGT_OBJECT_ID
-#define VGT_OBJECT_ID__REG_OBJ_ID__SHIFT 0x0
-#define VGT_OBJECT_ID__REG_OBJ_ID_MASK 0xFFFFFFFFL
//VGT_INSTANCE_BASE_ID
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
@@ -20067,15 +20031,6 @@
//TA_CS_BC_BASE_ADDR_HI
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
-//TA_GRAD_ADJ_UCONFIG
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3_MASK 0xFF000000L
//DB_OCCLUSION_COUNT0_LOW
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
index db7ef5e..030e002 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
@@ -815,8 +815,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3617,8 +3615,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3829,10 +3825,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4361,8 +4353,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -5195,8 +5185,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -5283,8 +5271,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
index ab0a25e..13bfc2e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h
@@ -4149,15 +4149,6 @@
//TA_RESERVED_010C
#define TA_RESERVED_010C__Unused__SHIFT 0x0
#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
-//TA_GRAD_ADJ
-#define TA_GRAD_ADJ__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ__GRAD_ADJ_3_MASK 0xFF000000L
//TA_STATUS
#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
@@ -15891,9 +15882,6 @@
#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
-//DB_RENDER_FILTER
-#define DB_RENDER_FILTER__PS_INVOKE_MASK__SHIFT 0x0
-#define DB_RENDER_FILTER__PS_INVOKE_MASK_MASK 0x0000FFFFL
//DB_Z_INFO2
#define DB_Z_INFO2__EPITCH__SHIFT 0x0
#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
@@ -16435,20 +16423,6 @@
#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_LR
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT__SHIFT 0x10
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT__SHIFT 0x18
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT_MASK 0x0000FF00L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT_MASK 0x00FF0000L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_TB
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT_MASK 0x0000FF00L
//VGT_MULTI_PRIM_IB_RESET_INDX
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
@@ -18777,9 +18751,6 @@
#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
-//VGT_INDEX_PAYLOAD_CNTL
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN__SHIFT 0x0
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN_MASK 0x00000001L
//VGT_INSTANCE_STEP_RATE_0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
@@ -21281,9 +21252,6 @@
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
-//VGT_OBJECT_ID
-#define VGT_OBJECT_ID__REG_OBJ_ID__SHIFT 0x0
-#define VGT_OBJECT_ID__REG_OBJ_ID_MASK 0xFFFFFFFFL
//VGT_INSTANCE_BASE_ID
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
@@ -21499,15 +21467,6 @@
//TA_CS_BC_BASE_ADDR_HI
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
-//TA_GRAD_ADJ_UCONFIG
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3_MASK 0xFF000000L
//DB_OCCLUSION_COUNT0_LOW
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_offset.h
new file mode 100644
index 0000000..5ab240c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_offset.h
@@ -0,0 +1,7497 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_2_1_OFFSET_HEADER
+#define _gc_9_2_1_OFFSET_HEADER
+
+
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define mmGRBM_CNTL 0x0000
+#define mmGRBM_CNTL_BASE_IDX 0
+#define mmGRBM_SKEW_CNTL 0x0001
+#define mmGRBM_SKEW_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS2 0x0002
+#define mmGRBM_STATUS2_BASE_IDX 0
+#define mmGRBM_PWR_CNTL 0x0003
+#define mmGRBM_PWR_CNTL_BASE_IDX 0
+#define mmGRBM_STATUS 0x0004
+#define mmGRBM_STATUS_BASE_IDX 0
+#define mmGRBM_STATUS_SE0 0x0005
+#define mmGRBM_STATUS_SE0_BASE_IDX 0
+#define mmGRBM_STATUS_SE1 0x0006
+#define mmGRBM_STATUS_SE1_BASE_IDX 0
+#define mmGRBM_SOFT_RESET 0x0008
+#define mmGRBM_SOFT_RESET_BASE_IDX 0
+#define mmGRBM_GFX_CLKEN_CNTL 0x000c
+#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d
+#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define mmGRBM_STATUS_SE2 0x000e
+#define mmGRBM_STATUS_SE2_BASE_IDX 0
+#define mmGRBM_STATUS_SE3 0x000f
+#define mmGRBM_STATUS_SE3_BASE_IDX 0
+#define mmGRBM_READ_ERROR 0x0016
+#define mmGRBM_READ_ERROR_BASE_IDX 0
+#define mmGRBM_READ_ERROR2 0x0017
+#define mmGRBM_READ_ERROR2_BASE_IDX 0
+#define mmGRBM_INT_CNTL 0x0018
+#define mmGRBM_INT_CNTL_BASE_IDX 0
+#define mmGRBM_TRAP_OP 0x0019
+#define mmGRBM_TRAP_OP_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR 0x001a
+#define mmGRBM_TRAP_ADDR_BASE_IDX 0
+#define mmGRBM_TRAP_ADDR_MSK 0x001b
+#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define mmGRBM_TRAP_WD 0x001c
+#define mmGRBM_TRAP_WD_BASE_IDX 0
+#define mmGRBM_TRAP_WD_MSK 0x001d
+#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define mmGRBM_DSM_BYPASS 0x001e
+#define mmGRBM_DSM_BYPASS_BASE_IDX 0
+#define mmGRBM_WRITE_ERROR 0x001f
+#define mmGRBM_WRITE_ERROR_BASE_IDX 0
+#define mmGRBM_IOV_ERROR 0x0020
+#define mmGRBM_IOV_ERROR_BASE_IDX 0
+#define mmGRBM_CHIP_REVISION 0x0021
+#define mmGRBM_CHIP_REVISION_BASE_IDX 0
+#define mmGRBM_GFX_CNTL 0x0022
+#define mmGRBM_GFX_CNTL_BASE_IDX 0
+#define mmGRBM_RSMU_CFG 0x0023
+#define mmGRBM_RSMU_CFG_BASE_IDX 0
+#define mmGRBM_IH_CREDIT 0x0024
+#define mmGRBM_IH_CREDIT_BASE_IDX 0
+#define mmGRBM_PWR_CNTL2 0x0025
+#define mmGRBM_PWR_CNTL2_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026
+#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027
+#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define mmGRBM_RSMU_READ_ERROR 0x0028
+#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define mmGRBM_CHICKEN_BITS 0x0029
+#define mmGRBM_CHICKEN_BITS_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE0 0x002a
+#define mmGRBM_FENCE_RANGE0_BASE_IDX 0
+#define mmGRBM_FENCE_RANGE1 0x002b
+#define mmGRBM_FENCE_RANGE1_BASE_IDX 0
+#define mmGRBM_NOWHERE 0x003f
+#define mmGRBM_NOWHERE_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG0 0x0040
+#define mmGRBM_SCRATCH_REG0_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG1 0x0041
+#define mmGRBM_SCRATCH_REG1_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG2 0x0042
+#define mmGRBM_SCRATCH_REG2_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG3 0x0043
+#define mmGRBM_SCRATCH_REG3_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG4 0x0044
+#define mmGRBM_SCRATCH_REG4_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG5 0x0045
+#define mmGRBM_SCRATCH_REG5_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG6 0x0046
+#define mmGRBM_SCRATCH_REG6_BASE_IDX 0
+#define mmGRBM_SCRATCH_REG7 0x0047
+#define mmGRBM_SCRATCH_REG7_BASE_IDX 0
+
+
+// addressBlock: gc_cpdec
+// base address: 0x8200
+#define mmCP_CPC_STATUS 0x0084
+#define mmCP_CPC_STATUS_BASE_IDX 0
+#define mmCP_CPC_BUSY_STAT 0x0085
+#define mmCP_CPC_BUSY_STAT_BASE_IDX 0
+#define mmCP_CPC_STALLED_STAT1 0x0086
+#define mmCP_CPC_STALLED_STAT1_BASE_IDX 0
+#define mmCP_CPF_STATUS 0x0087
+#define mmCP_CPF_STATUS_BASE_IDX 0
+#define mmCP_CPF_BUSY_STAT 0x0088
+#define mmCP_CPF_BUSY_STAT_BASE_IDX 0
+#define mmCP_CPF_STALLED_STAT1 0x0089
+#define mmCP_CPF_STALLED_STAT1_BASE_IDX 0
+#define mmCP_CPC_GRBM_FREE_COUNT 0x008b
+#define mmCP_CPC_GRBM_FREE_COUNT_BASE_IDX 0
+#define mmCP_MEC_CNTL 0x008d
+#define mmCP_MEC_CNTL_BASE_IDX 0
+#define mmCP_MEC_ME1_HEADER_DUMP 0x008e
+#define mmCP_MEC_ME1_HEADER_DUMP_BASE_IDX 0
+#define mmCP_MEC_ME2_HEADER_DUMP 0x008f
+#define mmCP_MEC_ME2_HEADER_DUMP_BASE_IDX 0
+#define mmCP_CPC_SCRATCH_INDEX 0x0090
+#define mmCP_CPC_SCRATCH_INDEX_BASE_IDX 0
+#define mmCP_CPC_SCRATCH_DATA 0x0091
+#define mmCP_CPC_SCRATCH_DATA_BASE_IDX 0
+#define mmCP_CPF_GRBM_FREE_COUNT 0x0092
+#define mmCP_CPF_GRBM_FREE_COUNT_BASE_IDX 0
+#define mmCP_CPC_HALT_HYST_COUNT 0x00a7
+#define mmCP_CPC_HALT_HYST_COUNT_BASE_IDX 0
+#define mmCP_CE_COMPARE_COUNT 0x00c0
+#define mmCP_CE_COMPARE_COUNT_BASE_IDX 0
+#define mmCP_CE_DE_COUNT 0x00c1
+#define mmCP_CE_DE_COUNT_BASE_IDX 0
+#define mmCP_DE_CE_COUNT 0x00c2
+#define mmCP_DE_CE_COUNT_BASE_IDX 0
+#define mmCP_DE_LAST_INVAL_COUNT 0x00c3
+#define mmCP_DE_LAST_INVAL_COUNT_BASE_IDX 0
+#define mmCP_DE_DE_COUNT 0x00c4
+#define mmCP_DE_DE_COUNT_BASE_IDX 0
+#define mmCP_STALLED_STAT3 0x019c
+#define mmCP_STALLED_STAT3_BASE_IDX 0
+#define mmCP_STALLED_STAT1 0x019d
+#define mmCP_STALLED_STAT1_BASE_IDX 0
+#define mmCP_STALLED_STAT2 0x019e
+#define mmCP_STALLED_STAT2_BASE_IDX 0
+#define mmCP_BUSY_STAT 0x019f
+#define mmCP_BUSY_STAT_BASE_IDX 0
+#define mmCP_STAT 0x01a0
+#define mmCP_STAT_BASE_IDX 0
+#define mmCP_ME_HEADER_DUMP 0x01a1
+#define mmCP_ME_HEADER_DUMP_BASE_IDX 0
+#define mmCP_PFP_HEADER_DUMP 0x01a2
+#define mmCP_PFP_HEADER_DUMP_BASE_IDX 0
+#define mmCP_GRBM_FREE_COUNT 0x01a3
+#define mmCP_GRBM_FREE_COUNT_BASE_IDX 0
+#define mmCP_CE_HEADER_DUMP 0x01a4
+#define mmCP_CE_HEADER_DUMP_BASE_IDX 0
+#define mmCP_PFP_INSTR_PNTR 0x01a5
+#define mmCP_PFP_INSTR_PNTR_BASE_IDX 0
+#define mmCP_ME_INSTR_PNTR 0x01a6
+#define mmCP_ME_INSTR_PNTR_BASE_IDX 0
+#define mmCP_CE_INSTR_PNTR 0x01a7
+#define mmCP_CE_INSTR_PNTR_BASE_IDX 0
+#define mmCP_MEC1_INSTR_PNTR 0x01a8
+#define mmCP_MEC1_INSTR_PNTR_BASE_IDX 0
+#define mmCP_MEC2_INSTR_PNTR 0x01a9
+#define mmCP_MEC2_INSTR_PNTR_BASE_IDX 0
+#define mmCP_CSF_STAT 0x01b4
+#define mmCP_CSF_STAT_BASE_IDX 0
+#define mmCP_ME_CNTL 0x01b6
+#define mmCP_ME_CNTL_BASE_IDX 0
+#define mmCP_CNTX_STAT 0x01b8
+#define mmCP_CNTX_STAT_BASE_IDX 0
+#define mmCP_ME_PREEMPTION 0x01b9
+#define mmCP_ME_PREEMPTION_BASE_IDX 0
+#define mmCP_ROQ_THRESHOLDS 0x01bc
+#define mmCP_ROQ_THRESHOLDS_BASE_IDX 0
+#define mmCP_MEQ_STQ_THRESHOLD 0x01bd
+#define mmCP_MEQ_STQ_THRESHOLD_BASE_IDX 0
+#define mmCP_RB2_RPTR 0x01be
+#define mmCP_RB2_RPTR_BASE_IDX 0
+#define mmCP_RB1_RPTR 0x01bf
+#define mmCP_RB1_RPTR_BASE_IDX 0
+#define mmCP_RB0_RPTR 0x01c0
+#define mmCP_RB0_RPTR_BASE_IDX 0
+#define mmCP_RB_RPTR 0x01c0
+#define mmCP_RB_RPTR_BASE_IDX 0
+#define mmCP_RB_WPTR_DELAY 0x01c1
+#define mmCP_RB_WPTR_DELAY_BASE_IDX 0
+#define mmCP_RB_WPTR_POLL_CNTL 0x01c2
+#define mmCP_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmCP_ROQ1_THRESHOLDS 0x01d5
+#define mmCP_ROQ1_THRESHOLDS_BASE_IDX 0
+#define mmCP_ROQ2_THRESHOLDS 0x01d6
+#define mmCP_ROQ2_THRESHOLDS_BASE_IDX 0
+#define mmCP_STQ_THRESHOLDS 0x01d7
+#define mmCP_STQ_THRESHOLDS_BASE_IDX 0
+#define mmCP_QUEUE_THRESHOLDS 0x01d8
+#define mmCP_QUEUE_THRESHOLDS_BASE_IDX 0
+#define mmCP_MEQ_THRESHOLDS 0x01d9
+#define mmCP_MEQ_THRESHOLDS_BASE_IDX 0
+#define mmCP_ROQ_AVAIL 0x01da
+#define mmCP_ROQ_AVAIL_BASE_IDX 0
+#define mmCP_STQ_AVAIL 0x01db
+#define mmCP_STQ_AVAIL_BASE_IDX 0
+#define mmCP_ROQ2_AVAIL 0x01dc
+#define mmCP_ROQ2_AVAIL_BASE_IDX 0
+#define mmCP_MEQ_AVAIL 0x01dd
+#define mmCP_MEQ_AVAIL_BASE_IDX 0
+#define mmCP_CMD_INDEX 0x01de
+#define mmCP_CMD_INDEX_BASE_IDX 0
+#define mmCP_CMD_DATA 0x01df
+#define mmCP_CMD_DATA_BASE_IDX 0
+#define mmCP_ROQ_RB_STAT 0x01e0
+#define mmCP_ROQ_RB_STAT_BASE_IDX 0
+#define mmCP_ROQ_IB1_STAT 0x01e1
+#define mmCP_ROQ_IB1_STAT_BASE_IDX 0
+#define mmCP_ROQ_IB2_STAT 0x01e2
+#define mmCP_ROQ_IB2_STAT_BASE_IDX 0
+#define mmCP_STQ_STAT 0x01e3
+#define mmCP_STQ_STAT_BASE_IDX 0
+#define mmCP_STQ_WR_STAT 0x01e4
+#define mmCP_STQ_WR_STAT_BASE_IDX 0
+#define mmCP_MEQ_STAT 0x01e5
+#define mmCP_MEQ_STAT_BASE_IDX 0
+#define mmCP_CEQ1_AVAIL 0x01e6
+#define mmCP_CEQ1_AVAIL_BASE_IDX 0
+#define mmCP_CEQ2_AVAIL 0x01e7
+#define mmCP_CEQ2_AVAIL_BASE_IDX 0
+#define mmCP_CE_ROQ_RB_STAT 0x01e8
+#define mmCP_CE_ROQ_RB_STAT_BASE_IDX 0
+#define mmCP_CE_ROQ_IB1_STAT 0x01e9
+#define mmCP_CE_ROQ_IB1_STAT_BASE_IDX 0
+#define mmCP_CE_ROQ_IB2_STAT 0x01ea
+#define mmCP_CE_ROQ_IB2_STAT_BASE_IDX 0
+
+
+// addressBlock: gc_padec
+// base address: 0x8800
+#define mmVGT_VTX_VECT_EJECT_REG 0x022c
+#define mmVGT_VTX_VECT_EJECT_REG_BASE_IDX 0
+#define mmVGT_DMA_DATA_FIFO_DEPTH 0x022d
+#define mmVGT_DMA_DATA_FIFO_DEPTH_BASE_IDX 0
+#define mmVGT_DMA_REQ_FIFO_DEPTH 0x022e
+#define mmVGT_DMA_REQ_FIFO_DEPTH_BASE_IDX 0
+#define mmVGT_DRAW_INIT_FIFO_DEPTH 0x022f
+#define mmVGT_DRAW_INIT_FIFO_DEPTH_BASE_IDX 0
+#define mmVGT_LAST_COPY_STATE 0x0230
+#define mmVGT_LAST_COPY_STATE_BASE_IDX 0
+#define mmVGT_CACHE_INVALIDATION 0x0231
+#define mmVGT_CACHE_INVALIDATION_BASE_IDX 0
+#define mmVGT_STRMOUT_DELAY 0x0233
+#define mmVGT_STRMOUT_DELAY_BASE_IDX 0
+#define mmVGT_FIFO_DEPTHS 0x0234
+#define mmVGT_FIFO_DEPTHS_BASE_IDX 0
+#define mmVGT_GS_VERTEX_REUSE 0x0235
+#define mmVGT_GS_VERTEX_REUSE_BASE_IDX 0
+#define mmVGT_MC_LAT_CNTL 0x0236
+#define mmVGT_MC_LAT_CNTL_BASE_IDX 0
+#define mmIA_CNTL_STATUS 0x0237
+#define mmIA_CNTL_STATUS_BASE_IDX 0
+#define mmVGT_CNTL_STATUS 0x023c
+#define mmVGT_CNTL_STATUS_BASE_IDX 0
+#define mmWD_CNTL_STATUS 0x023f
+#define mmWD_CNTL_STATUS_BASE_IDX 0
+#define mmCC_GC_PRIM_CONFIG 0x0240
+#define mmCC_GC_PRIM_CONFIG_BASE_IDX 0
+#define mmGC_USER_PRIM_CONFIG 0x0241
+#define mmGC_USER_PRIM_CONFIG_BASE_IDX 0
+#define mmWD_QOS 0x0242
+#define mmWD_QOS_BASE_IDX 0
+#define mmWD_UTCL1_CNTL 0x0243
+#define mmWD_UTCL1_CNTL_BASE_IDX 0
+#define mmWD_UTCL1_STATUS 0x0244
+#define mmWD_UTCL1_STATUS_BASE_IDX 0
+#define mmIA_UTCL1_CNTL 0x0246
+#define mmIA_UTCL1_CNTL_BASE_IDX 0
+#define mmIA_UTCL1_STATUS 0x0247
+#define mmIA_UTCL1_STATUS_BASE_IDX 0
+#define mmVGT_SYS_CONFIG 0x0263
+#define mmVGT_SYS_CONFIG_BASE_IDX 0
+#define mmVGT_VS_MAX_WAVE_ID 0x0268
+#define mmVGT_VS_MAX_WAVE_ID_BASE_IDX 0
+#define mmVGT_GS_MAX_WAVE_ID 0x0269
+#define mmVGT_GS_MAX_WAVE_ID_BASE_IDX 0
+#define mmGFX_PIPE_CONTROL 0x026d
+#define mmGFX_PIPE_CONTROL_BASE_IDX 0
+#define mmCC_GC_SHADER_ARRAY_CONFIG 0x026f
+#define mmCC_GC_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define mmGC_USER_SHADER_ARRAY_CONFIG 0x0270
+#define mmGC_USER_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define mmVGT_DMA_PRIMITIVE_TYPE 0x0271
+#define mmVGT_DMA_PRIMITIVE_TYPE_BASE_IDX 0
+#define mmVGT_DMA_CONTROL 0x0272
+#define mmVGT_DMA_CONTROL_BASE_IDX 0
+#define mmVGT_DMA_LS_HS_CONFIG 0x0273
+#define mmVGT_DMA_LS_HS_CONFIG_BASE_IDX 0
+#define mmWD_BUF_RESOURCE_1 0x0276
+#define mmWD_BUF_RESOURCE_1_BASE_IDX 0
+#define mmWD_BUF_RESOURCE_2 0x0277
+#define mmWD_BUF_RESOURCE_2_BASE_IDX 0
+#define mmPA_CL_CNTL_STATUS 0x0284
+#define mmPA_CL_CNTL_STATUS_BASE_IDX 0
+#define mmPA_CL_ENHANCE 0x0285
+#define mmPA_CL_ENHANCE_BASE_IDX 0
+#define mmPA_SU_CNTL_STATUS 0x0294
+#define mmPA_SU_CNTL_STATUS_BASE_IDX 0
+#define mmPA_SC_FIFO_DEPTH_CNTL 0x0295
+#define mmPA_SC_FIFO_DEPTH_CNTL_BASE_IDX 0
+#define mmPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x02c0
+#define mmPA_SC_P3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define mmPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x02c1
+#define mmPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define mmPA_SC_TRAP_SCREEN_HV_LOCK 0x02c2
+#define mmPA_SC_TRAP_SCREEN_HV_LOCK_BASE_IDX 0
+#define mmPA_SC_FORCE_EOV_MAX_CNTS 0x02c9
+#define mmPA_SC_FORCE_EOV_MAX_CNTS_BASE_IDX 0
+#define mmPA_SC_BINNER_EVENT_CNTL_0 0x02cc
+#define mmPA_SC_BINNER_EVENT_CNTL_0_BASE_IDX 0
+#define mmPA_SC_BINNER_EVENT_CNTL_1 0x02cd
+#define mmPA_SC_BINNER_EVENT_CNTL_1_BASE_IDX 0
+#define mmPA_SC_BINNER_EVENT_CNTL_2 0x02ce
+#define mmPA_SC_BINNER_EVENT_CNTL_2_BASE_IDX 0
+#define mmPA_SC_BINNER_EVENT_CNTL_3 0x02cf
+#define mmPA_SC_BINNER_EVENT_CNTL_3_BASE_IDX 0
+#define mmPA_SC_BINNER_TIMEOUT_COUNTER 0x02d0
+#define mmPA_SC_BINNER_TIMEOUT_COUNTER_BASE_IDX 0
+#define mmPA_SC_BINNER_PERF_CNTL_0 0x02d1
+#define mmPA_SC_BINNER_PERF_CNTL_0_BASE_IDX 0
+#define mmPA_SC_BINNER_PERF_CNTL_1 0x02d2
+#define mmPA_SC_BINNER_PERF_CNTL_1_BASE_IDX 0
+#define mmPA_SC_BINNER_PERF_CNTL_2 0x02d3
+#define mmPA_SC_BINNER_PERF_CNTL_2_BASE_IDX 0
+#define mmPA_SC_BINNER_PERF_CNTL_3 0x02d4
+#define mmPA_SC_BINNER_PERF_CNTL_3_BASE_IDX 0
+#define mmPA_SC_ENHANCE_2 0x02dc
+#define mmPA_SC_ENHANCE_2_BASE_IDX 0
+#define mmPA_SC_FIFO_SIZE 0x02f3
+#define mmPA_SC_FIFO_SIZE_BASE_IDX 0
+#define mmPA_SC_IF_FIFO_SIZE 0x02f5
+#define mmPA_SC_IF_FIFO_SIZE_BASE_IDX 0
+#define mmPA_SC_PKR_WAVE_TABLE_CNTL 0x02f8
+#define mmPA_SC_PKR_WAVE_TABLE_CNTL_BASE_IDX 0
+#define mmPA_UTCL1_CNTL1 0x02f9
+#define mmPA_UTCL1_CNTL1_BASE_IDX 0
+#define mmPA_UTCL1_CNTL2 0x02fa
+#define mmPA_UTCL1_CNTL2_BASE_IDX 0
+#define mmPA_SIDEBAND_REQUEST_DELAYS 0x02fb
+#define mmPA_SIDEBAND_REQUEST_DELAYS_BASE_IDX 0
+#define mmPA_SC_ENHANCE 0x02fc
+#define mmPA_SC_ENHANCE_BASE_IDX 0
+#define mmPA_SC_ENHANCE_1 0x02fd
+#define mmPA_SC_ENHANCE_1_BASE_IDX 0
+#define mmPA_SC_DSM_CNTL 0x02fe
+#define mmPA_SC_DSM_CNTL_BASE_IDX 0
+#define mmPA_SC_TILE_STEERING_CREST_OVERRIDE 0x02ff
+#define mmPA_SC_TILE_STEERING_CREST_OVERRIDE_BASE_IDX 0
+
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define mmSQ_CONFIG 0x0300
+#define mmSQ_CONFIG_BASE_IDX 0
+#define mmSQC_CONFIG 0x0301
+#define mmSQC_CONFIG_BASE_IDX 0
+#define mmLDS_CONFIG 0x0302
+#define mmLDS_CONFIG_BASE_IDX 0
+#define mmSQ_RANDOM_WAVE_PRI 0x0303
+#define mmSQ_RANDOM_WAVE_PRI_BASE_IDX 0
+#define mmSQ_REG_CREDITS 0x0304
+#define mmSQ_REG_CREDITS_BASE_IDX 0
+#define mmSQ_FIFO_SIZES 0x0305
+#define mmSQ_FIFO_SIZES_BASE_IDX 0
+#define mmSQ_DSM_CNTL 0x0306
+#define mmSQ_DSM_CNTL_BASE_IDX 0
+#define mmSQ_DSM_CNTL2 0x0307
+#define mmSQ_DSM_CNTL2_BASE_IDX 0
+#define mmSQ_RUNTIME_CONFIG 0x0308
+#define mmSQ_RUNTIME_CONFIG_BASE_IDX 0
+#define mmSH_MEM_BASES 0x030a
+#define mmSH_MEM_BASES_BASE_IDX 0
+#define mmSH_MEM_CONFIG 0x030d
+#define mmSH_MEM_CONFIG_BASE_IDX 0
+#define mmCC_GC_SHADER_RATE_CONFIG 0x0312
+#define mmCC_GC_SHADER_RATE_CONFIG_BASE_IDX 0
+#define mmGC_USER_SHADER_RATE_CONFIG 0x0313
+#define mmGC_USER_SHADER_RATE_CONFIG_BASE_IDX 0
+#define mmSQ_INTERRUPT_AUTO_MASK 0x0314
+#define mmSQ_INTERRUPT_AUTO_MASK_BASE_IDX 0
+#define mmSQ_INTERRUPT_MSG_CTRL 0x0315
+#define mmSQ_INTERRUPT_MSG_CTRL_BASE_IDX 0
+#define mmSQ_UTCL1_CNTL1 0x0317
+#define mmSQ_UTCL1_CNTL1_BASE_IDX 0
+#define mmSQ_UTCL1_CNTL2 0x0318
+#define mmSQ_UTCL1_CNTL2_BASE_IDX 0
+#define mmSQ_UTCL1_STATUS 0x0319
+#define mmSQ_UTCL1_STATUS_BASE_IDX 0
+#define mmSQ_SHADER_TBA_LO 0x031c
+#define mmSQ_SHADER_TBA_LO_BASE_IDX 0
+#define mmSQ_SHADER_TBA_HI 0x031d
+#define mmSQ_SHADER_TBA_HI_BASE_IDX 0
+#define mmSQ_SHADER_TMA_LO 0x031e
+#define mmSQ_SHADER_TMA_LO_BASE_IDX 0
+#define mmSQ_SHADER_TMA_HI 0x031f
+#define mmSQ_SHADER_TMA_HI_BASE_IDX 0
+#define mmSQC_DSM_CNTL 0x0320
+#define mmSQC_DSM_CNTL_BASE_IDX 0
+#define mmSQC_DSM_CNTLA 0x0321
+#define mmSQC_DSM_CNTLA_BASE_IDX 0
+#define mmSQC_DSM_CNTLB 0x0322
+#define mmSQC_DSM_CNTLB_BASE_IDX 0
+#define mmSQC_DSM_CNTL2 0x0325
+#define mmSQC_DSM_CNTL2_BASE_IDX 0
+#define mmSQC_DSM_CNTL2A 0x0326
+#define mmSQC_DSM_CNTL2A_BASE_IDX 0
+#define mmSQC_DSM_CNTL2B 0x0327
+#define mmSQC_DSM_CNTL2B_BASE_IDX 0
+#define mmSQ_REG_TIMESTAMP 0x0374
+#define mmSQ_REG_TIMESTAMP_BASE_IDX 0
+#define mmSQ_CMD_TIMESTAMP 0x0375
+#define mmSQ_CMD_TIMESTAMP_BASE_IDX 0
+#define mmSQ_IND_INDEX 0x0378
+#define mmSQ_IND_INDEX_BASE_IDX 0
+#define mmSQ_IND_DATA 0x0379
+#define mmSQ_IND_DATA_BASE_IDX 0
+#define mmSQ_CMD 0x037b
+#define mmSQ_CMD_BASE_IDX 0
+#define mmSQ_TIME_HI 0x037c
+#define mmSQ_TIME_HI_BASE_IDX 0
+#define mmSQ_TIME_LO 0x037d
+#define mmSQ_TIME_LO_BASE_IDX 0
+#define mmSQ_DS_0 0x037f
+#define mmSQ_DS_0_BASE_IDX 0
+#define mmSQ_DS_1 0x037f
+#define mmSQ_DS_1_BASE_IDX 0
+#define mmSQ_EXP_0 0x037f
+#define mmSQ_EXP_0_BASE_IDX 0
+#define mmSQ_EXP_1 0x037f
+#define mmSQ_EXP_1_BASE_IDX 0
+#define mmSQ_FLAT_0 0x037f
+#define mmSQ_FLAT_0_BASE_IDX 0
+#define mmSQ_FLAT_1 0x037f
+#define mmSQ_FLAT_1_BASE_IDX 0
+#define mmSQ_GLBL_0 0x037f
+#define mmSQ_GLBL_0_BASE_IDX 0
+#define mmSQ_GLBL_1 0x037f
+#define mmSQ_GLBL_1_BASE_IDX 0
+#define mmSQ_INST 0x037f
+#define mmSQ_INST_BASE_IDX 0
+#define mmSQ_MIMG_0 0x037f
+#define mmSQ_MIMG_0_BASE_IDX 0
+#define mmSQ_MIMG_1 0x037f
+#define mmSQ_MIMG_1_BASE_IDX 0
+#define mmSQ_MTBUF_0 0x037f
+#define mmSQ_MTBUF_0_BASE_IDX 0
+#define mmSQ_MTBUF_1 0x037f
+#define mmSQ_MTBUF_1_BASE_IDX 0
+#define mmSQ_MUBUF_0 0x037f
+#define mmSQ_MUBUF_0_BASE_IDX 0
+#define mmSQ_MUBUF_1 0x037f
+#define mmSQ_MUBUF_1_BASE_IDX 0
+#define mmSQ_SCRATCH_0 0x037f
+#define mmSQ_SCRATCH_0_BASE_IDX 0
+#define mmSQ_SCRATCH_1 0x037f
+#define mmSQ_SCRATCH_1_BASE_IDX 0
+#define mmSQ_SMEM_0 0x037f
+#define mmSQ_SMEM_0_BASE_IDX 0
+#define mmSQ_SMEM_1 0x037f
+#define mmSQ_SMEM_1_BASE_IDX 0
+#define mmSQ_SOP1 0x037f
+#define mmSQ_SOP1_BASE_IDX 0
+#define mmSQ_SOP2 0x037f
+#define mmSQ_SOP2_BASE_IDX 0
+#define mmSQ_SOPC 0x037f
+#define mmSQ_SOPC_BASE_IDX 0
+#define mmSQ_SOPK 0x037f
+#define mmSQ_SOPK_BASE_IDX 0
+#define mmSQ_SOPP 0x037f
+#define mmSQ_SOPP_BASE_IDX 0
+#define mmSQ_VINTRP 0x037f
+#define mmSQ_VINTRP_BASE_IDX 0
+#define mmSQ_VOP1 0x037f
+#define mmSQ_VOP1_BASE_IDX 0
+#define mmSQ_VOP2 0x037f
+#define mmSQ_VOP2_BASE_IDX 0
+#define mmSQ_VOP3P_0 0x037f
+#define mmSQ_VOP3P_0_BASE_IDX 0
+#define mmSQ_VOP3P_1 0x037f
+#define mmSQ_VOP3P_1_BASE_IDX 0
+#define mmSQ_VOP3_0 0x037f
+#define mmSQ_VOP3_0_BASE_IDX 0
+#define mmSQ_VOP3_0_SDST_ENC 0x037f
+#define mmSQ_VOP3_0_SDST_ENC_BASE_IDX 0
+#define mmSQ_VOP3_1 0x037f
+#define mmSQ_VOP3_1_BASE_IDX 0
+#define mmSQ_VOPC 0x037f
+#define mmSQ_VOPC_BASE_IDX 0
+#define mmSQ_VOP_DPP 0x037f
+#define mmSQ_VOP_DPP_BASE_IDX 0
+#define mmSQ_VOP_SDWA 0x037f
+#define mmSQ_VOP_SDWA_BASE_IDX 0
+#define mmSQ_VOP_SDWA_SDST_ENC 0x037f
+#define mmSQ_VOP_SDWA_SDST_ENC_BASE_IDX 0
+#define mmSQ_LB_CTR_CTRL 0x0398
+#define mmSQ_LB_CTR_CTRL_BASE_IDX 0
+#define mmSQ_LB_DATA0 0x0399
+#define mmSQ_LB_DATA0_BASE_IDX 0
+#define mmSQ_LB_DATA1 0x039a
+#define mmSQ_LB_DATA1_BASE_IDX 0
+#define mmSQ_LB_DATA2 0x039b
+#define mmSQ_LB_DATA2_BASE_IDX 0
+#define mmSQ_LB_DATA3 0x039c
+#define mmSQ_LB_DATA3_BASE_IDX 0
+#define mmSQ_LB_CTR_SEL 0x039d
+#define mmSQ_LB_CTR_SEL_BASE_IDX 0
+#define mmSQ_LB_CTR0_CU 0x039e
+#define mmSQ_LB_CTR0_CU_BASE_IDX 0
+#define mmSQ_LB_CTR1_CU 0x039f
+#define mmSQ_LB_CTR1_CU_BASE_IDX 0
+#define mmSQ_LB_CTR2_CU 0x03a0
+#define mmSQ_LB_CTR2_CU_BASE_IDX 0
+#define mmSQ_LB_CTR3_CU 0x03a1
+#define mmSQ_LB_CTR3_CU_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_CMN 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_CMN_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_EVENT 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_EVENT_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_INST 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_INST_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_ISSUE 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_ISSUE_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_MISC 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_MISC_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_WAVE 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_WAVE_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_WAVE_START 0x03b0
+#define mmSQ_THREAD_TRACE_WORD_WAVE_START_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2 0x03b1
+#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2 0x03b1
+#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2 0x03b1
+#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2_BASE_IDX 0
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2 0x03b1
+#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2_BASE_IDX 0
+#define mmSQ_WREXEC_EXEC_HI 0x03b1
+#define mmSQ_WREXEC_EXEC_HI_BASE_IDX 0
+#define mmSQ_WREXEC_EXEC_LO 0x03b1
+#define mmSQ_WREXEC_EXEC_LO_BASE_IDX 0
+#define mmSQ_BUF_RSRC_WORD0 0x03c0
+#define mmSQ_BUF_RSRC_WORD0_BASE_IDX 0
+#define mmSQ_BUF_RSRC_WORD1 0x03c1
+#define mmSQ_BUF_RSRC_WORD1_BASE_IDX 0
+#define mmSQ_BUF_RSRC_WORD2 0x03c2
+#define mmSQ_BUF_RSRC_WORD2_BASE_IDX 0
+#define mmSQ_BUF_RSRC_WORD3 0x03c3
+#define mmSQ_BUF_RSRC_WORD3_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD0 0x03c4
+#define mmSQ_IMG_RSRC_WORD0_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD1 0x03c5
+#define mmSQ_IMG_RSRC_WORD1_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD2 0x03c6
+#define mmSQ_IMG_RSRC_WORD2_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD3 0x03c7
+#define mmSQ_IMG_RSRC_WORD3_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD4 0x03c8
+#define mmSQ_IMG_RSRC_WORD4_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD5 0x03c9
+#define mmSQ_IMG_RSRC_WORD5_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD6 0x03ca
+#define mmSQ_IMG_RSRC_WORD6_BASE_IDX 0
+#define mmSQ_IMG_RSRC_WORD7 0x03cb
+#define mmSQ_IMG_RSRC_WORD7_BASE_IDX 0
+#define mmSQ_IMG_SAMP_WORD0 0x03cc
+#define mmSQ_IMG_SAMP_WORD0_BASE_IDX 0
+#define mmSQ_IMG_SAMP_WORD1 0x03cd
+#define mmSQ_IMG_SAMP_WORD1_BASE_IDX 0
+#define mmSQ_IMG_SAMP_WORD2 0x03ce
+#define mmSQ_IMG_SAMP_WORD2_BASE_IDX 0
+#define mmSQ_IMG_SAMP_WORD3 0x03cf
+#define mmSQ_IMG_SAMP_WORD3_BASE_IDX 0
+#define mmSQ_FLAT_SCRATCH_WORD0 0x03d0
+#define mmSQ_FLAT_SCRATCH_WORD0_BASE_IDX 0
+#define mmSQ_FLAT_SCRATCH_WORD1 0x03d1
+#define mmSQ_FLAT_SCRATCH_WORD1_BASE_IDX 0
+#define mmSQ_M0_GPR_IDX_WORD 0x03d2
+#define mmSQ_M0_GPR_IDX_WORD_BASE_IDX 0
+#define mmSQC_ICACHE_UTCL1_CNTL1 0x03d3
+#define mmSQC_ICACHE_UTCL1_CNTL1_BASE_IDX 0
+#define mmSQC_ICACHE_UTCL1_CNTL2 0x03d4
+#define mmSQC_ICACHE_UTCL1_CNTL2_BASE_IDX 0
+#define mmSQC_DCACHE_UTCL1_CNTL1 0x03d5
+#define mmSQC_DCACHE_UTCL1_CNTL1_BASE_IDX 0
+#define mmSQC_DCACHE_UTCL1_CNTL2 0x03d6
+#define mmSQC_DCACHE_UTCL1_CNTL2_BASE_IDX 0
+#define mmSQC_ICACHE_UTCL1_STATUS 0x03d7
+#define mmSQC_ICACHE_UTCL1_STATUS_BASE_IDX 0
+#define mmSQC_DCACHE_UTCL1_STATUS 0x03d8
+#define mmSQC_DCACHE_UTCL1_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define mmSX_DEBUG_1 0x0419
+#define mmSX_DEBUG_1_BASE_IDX 0
+#define mmSPI_PS_MAX_WAVE_ID 0x043a
+#define mmSPI_PS_MAX_WAVE_ID_BASE_IDX 0
+#define mmSPI_START_PHASE 0x043b
+#define mmSPI_START_PHASE_BASE_IDX 0
+#define mmSPI_GFX_CNTL 0x043c
+#define mmSPI_GFX_CNTL_BASE_IDX 0
+#define mmSPI_DSM_CNTL 0x0443
+#define mmSPI_DSM_CNTL_BASE_IDX 0
+#define mmSPI_DSM_CNTL2 0x0444
+#define mmSPI_DSM_CNTL2_BASE_IDX 0
+#define mmSPI_CONFIG_PS_CU_EN 0x0452
+#define mmSPI_CONFIG_PS_CU_EN_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_CNTL 0x04aa
+#define mmSPI_WF_LIFETIME_CNTL_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_0 0x04ab
+#define mmSPI_WF_LIFETIME_LIMIT_0_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_1 0x04ac
+#define mmSPI_WF_LIFETIME_LIMIT_1_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_2 0x04ad
+#define mmSPI_WF_LIFETIME_LIMIT_2_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_3 0x04ae
+#define mmSPI_WF_LIFETIME_LIMIT_3_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_4 0x04af
+#define mmSPI_WF_LIFETIME_LIMIT_4_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_5 0x04b0
+#define mmSPI_WF_LIFETIME_LIMIT_5_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_6 0x04b1
+#define mmSPI_WF_LIFETIME_LIMIT_6_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_7 0x04b2
+#define mmSPI_WF_LIFETIME_LIMIT_7_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_8 0x04b3
+#define mmSPI_WF_LIFETIME_LIMIT_8_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_LIMIT_9 0x04b4
+#define mmSPI_WF_LIFETIME_LIMIT_9_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_0 0x04b5
+#define mmSPI_WF_LIFETIME_STATUS_0_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_1 0x04b6
+#define mmSPI_WF_LIFETIME_STATUS_1_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_2 0x04b7
+#define mmSPI_WF_LIFETIME_STATUS_2_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_3 0x04b8
+#define mmSPI_WF_LIFETIME_STATUS_3_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_4 0x04b9
+#define mmSPI_WF_LIFETIME_STATUS_4_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_5 0x04ba
+#define mmSPI_WF_LIFETIME_STATUS_5_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_6 0x04bb
+#define mmSPI_WF_LIFETIME_STATUS_6_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_7 0x04bc
+#define mmSPI_WF_LIFETIME_STATUS_7_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_8 0x04bd
+#define mmSPI_WF_LIFETIME_STATUS_8_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_9 0x04be
+#define mmSPI_WF_LIFETIME_STATUS_9_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_10 0x04bf
+#define mmSPI_WF_LIFETIME_STATUS_10_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_11 0x04c0
+#define mmSPI_WF_LIFETIME_STATUS_11_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_12 0x04c1
+#define mmSPI_WF_LIFETIME_STATUS_12_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_13 0x04c2
+#define mmSPI_WF_LIFETIME_STATUS_13_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_14 0x04c3
+#define mmSPI_WF_LIFETIME_STATUS_14_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_15 0x04c4
+#define mmSPI_WF_LIFETIME_STATUS_15_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_16 0x04c5
+#define mmSPI_WF_LIFETIME_STATUS_16_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_17 0x04c6
+#define mmSPI_WF_LIFETIME_STATUS_17_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_18 0x04c7
+#define mmSPI_WF_LIFETIME_STATUS_18_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_19 0x04c8
+#define mmSPI_WF_LIFETIME_STATUS_19_BASE_IDX 0
+#define mmSPI_WF_LIFETIME_STATUS_20 0x04c9
+#define mmSPI_WF_LIFETIME_STATUS_20_BASE_IDX 0
+#define mmSPI_LB_CTR_CTRL 0x04d4
+#define mmSPI_LB_CTR_CTRL_BASE_IDX 0
+#define mmSPI_LB_CU_MASK 0x04d5
+#define mmSPI_LB_CU_MASK_BASE_IDX 0
+#define mmSPI_LB_DATA_REG 0x04d6
+#define mmSPI_LB_DATA_REG_BASE_IDX 0
+#define mmSPI_PG_ENABLE_STATIC_CU_MASK 0x04d7
+#define mmSPI_PG_ENABLE_STATIC_CU_MASK_BASE_IDX 0
+#define mmSPI_GDS_CREDITS 0x04d8
+#define mmSPI_GDS_CREDITS_BASE_IDX 0
+#define mmSPI_SX_EXPORT_BUFFER_SIZES 0x04d9
+#define mmSPI_SX_EXPORT_BUFFER_SIZES_BASE_IDX 0
+#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES 0x04da
+#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_STATUS 0x04db
+#define mmSPI_CSQ_WF_ACTIVE_STATUS_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_0 0x04dc
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_0_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_1 0x04dd
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_1_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_2 0x04de
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_2_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_3 0x04df
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_3_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_4 0x04e0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_4_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_5 0x04e1
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_5_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_6 0x04e2
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_6_BASE_IDX 0
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_7 0x04e3
+#define mmSPI_CSQ_WF_ACTIVE_COUNT_7_BASE_IDX 0
+#define mmSPI_LB_DATA_WAVES 0x04e4
+#define mmSPI_LB_DATA_WAVES_BASE_IDX 0
+#define mmSPI_LB_DATA_PERCU_WAVE_HSGS 0x04e5
+#define mmSPI_LB_DATA_PERCU_WAVE_HSGS_BASE_IDX 0
+#define mmSPI_LB_DATA_PERCU_WAVE_VSPS 0x04e6
+#define mmSPI_LB_DATA_PERCU_WAVE_VSPS_BASE_IDX 0
+#define mmSPI_LB_DATA_PERCU_WAVE_CS 0x04e7
+#define mmSPI_LB_DATA_PERCU_WAVE_CS_BASE_IDX 0
+#define mmSPI_P0_TRAP_SCREEN_PSBA_LO 0x04ec
+#define mmSPI_P0_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define mmSPI_P0_TRAP_SCREEN_PSBA_HI 0x04ed
+#define mmSPI_P0_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define mmSPI_P0_TRAP_SCREEN_PSMA_LO 0x04ee
+#define mmSPI_P0_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define mmSPI_P0_TRAP_SCREEN_PSMA_HI 0x04ef
+#define mmSPI_P0_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define mmSPI_P0_TRAP_SCREEN_GPR_MIN 0x04f0
+#define mmSPI_P0_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+#define mmSPI_P1_TRAP_SCREEN_PSBA_LO 0x04f1
+#define mmSPI_P1_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define mmSPI_P1_TRAP_SCREEN_PSBA_HI 0x04f2
+#define mmSPI_P1_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define mmSPI_P1_TRAP_SCREEN_PSMA_LO 0x04f3
+#define mmSPI_P1_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define mmSPI_P1_TRAP_SCREEN_PSMA_HI 0x04f4
+#define mmSPI_P1_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define mmSPI_P1_TRAP_SCREEN_GPR_MIN 0x04f5
+#define mmSPI_P1_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define mmTD_CNTL 0x0525
+#define mmTD_CNTL_BASE_IDX 0
+#define mmTD_STATUS 0x0526
+#define mmTD_STATUS_BASE_IDX 0
+#define mmTD_DSM_CNTL 0x052f
+#define mmTD_DSM_CNTL_BASE_IDX 0
+#define mmTD_DSM_CNTL2 0x0530
+#define mmTD_DSM_CNTL2_BASE_IDX 0
+#define mmTD_SCRATCH 0x0533
+#define mmTD_SCRATCH_BASE_IDX 0
+#define mmTA_CNTL 0x0541
+#define mmTA_CNTL_BASE_IDX 0
+#define mmTA_CNTL_AUX 0x0542
+#define mmTA_CNTL_AUX_BASE_IDX 0
+#define mmTA_RESERVED_010C 0x0543
+#define mmTA_RESERVED_010C_BASE_IDX 0
+#define mmTA_STATUS 0x0548
+#define mmTA_STATUS_BASE_IDX 0
+#define mmTA_SCRATCH 0x0564
+#define mmTA_SCRATCH_BASE_IDX 0
+
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define mmGDS_CONFIG 0x05c0
+#define mmGDS_CONFIG_BASE_IDX 0
+#define mmGDS_CNTL_STATUS 0x05c1
+#define mmGDS_CNTL_STATUS_BASE_IDX 0
+#define mmGDS_ENHANCE2 0x05c2
+#define mmGDS_ENHANCE2_BASE_IDX 0
+#define mmGDS_PROTECTION_FAULT 0x05c3
+#define mmGDS_PROTECTION_FAULT_BASE_IDX 0
+#define mmGDS_VM_PROTECTION_FAULT 0x05c4
+#define mmGDS_VM_PROTECTION_FAULT_BASE_IDX 0
+#define mmGDS_DSM_CNTL 0x05ca
+#define mmGDS_DSM_CNTL_BASE_IDX 0
+#define mmGDS_DSM_CNTL2 0x05cd
+#define mmGDS_DSM_CNTL2_BASE_IDX 0
+#define mmGDS_WD_GDS_CSB 0x05ce
+#define mmGDS_WD_GDS_CSB_BASE_IDX 0
+
+
+// addressBlock: gc_rbdec
+// base address: 0x9800
+#define mmDB_DEBUG 0x060c
+#define mmDB_DEBUG_BASE_IDX 0
+#define mmDB_DEBUG2 0x060d
+#define mmDB_DEBUG2_BASE_IDX 0
+#define mmDB_DEBUG3 0x060e
+#define mmDB_DEBUG3_BASE_IDX 0
+#define mmDB_DEBUG4 0x060f
+#define mmDB_DEBUG4_BASE_IDX 0
+#define mmDB_CREDIT_LIMIT 0x0614
+#define mmDB_CREDIT_LIMIT_BASE_IDX 0
+#define mmDB_WATERMARKS 0x0615
+#define mmDB_WATERMARKS_BASE_IDX 0
+#define mmDB_SUBTILE_CONTROL 0x0616
+#define mmDB_SUBTILE_CONTROL_BASE_IDX 0
+#define mmDB_FREE_CACHELINES 0x0617
+#define mmDB_FREE_CACHELINES_BASE_IDX 0
+#define mmDB_FIFO_DEPTH1 0x0618
+#define mmDB_FIFO_DEPTH1_BASE_IDX 0
+#define mmDB_FIFO_DEPTH2 0x0619
+#define mmDB_FIFO_DEPTH2_BASE_IDX 0
+#define mmDB_EXCEPTION_CONTROL 0x061a
+#define mmDB_EXCEPTION_CONTROL_BASE_IDX 0
+#define mmDB_RING_CONTROL 0x061b
+#define mmDB_RING_CONTROL_BASE_IDX 0
+#define mmDB_MEM_ARB_WATERMARKS 0x061c
+#define mmDB_MEM_ARB_WATERMARKS_BASE_IDX 0
+#define mmDB_RMI_CACHE_POLICY 0x061e
+#define mmDB_RMI_CACHE_POLICY_BASE_IDX 0
+#define mmDB_DFSM_CONFIG 0x0630
+#define mmDB_DFSM_CONFIG_BASE_IDX 0
+#define mmDB_DFSM_WATERMARK 0x0631
+#define mmDB_DFSM_WATERMARK_BASE_IDX 0
+#define mmDB_DFSM_TILES_IN_FLIGHT 0x0632
+#define mmDB_DFSM_TILES_IN_FLIGHT_BASE_IDX 0
+#define mmDB_DFSM_PRIMS_IN_FLIGHT 0x0633
+#define mmDB_DFSM_PRIMS_IN_FLIGHT_BASE_IDX 0
+#define mmDB_DFSM_WATCHDOG 0x0634
+#define mmDB_DFSM_WATCHDOG_BASE_IDX 0
+#define mmDB_DFSM_FLUSH_ENABLE 0x0635
+#define mmDB_DFSM_FLUSH_ENABLE_BASE_IDX 0
+#define mmDB_DFSM_FLUSH_AUX_EVENT 0x0636
+#define mmDB_DFSM_FLUSH_AUX_EVENT_BASE_IDX 0
+#define mmCC_RB_REDUNDANCY 0x063c
+#define mmCC_RB_REDUNDANCY_BASE_IDX 0
+#define mmCC_RB_BACKEND_DISABLE 0x063d
+#define mmCC_RB_BACKEND_DISABLE_BASE_IDX 0
+#define mmGB_ADDR_CONFIG 0x063e
+#define mmGB_ADDR_CONFIG_BASE_IDX 0
+#define mmGB_BACKEND_MAP 0x063f
+#define mmGB_BACKEND_MAP_BASE_IDX 0
+#define mmGB_GPU_ID 0x0640
+#define mmGB_GPU_ID_BASE_IDX 0
+#define mmCC_RB_DAISY_CHAIN 0x0641
+#define mmCC_RB_DAISY_CHAIN_BASE_IDX 0
+#define mmGB_ADDR_CONFIG_READ 0x0642
+#define mmGB_ADDR_CONFIG_READ_BASE_IDX 0
+#define mmGB_TILE_MODE0 0x0644
+#define mmGB_TILE_MODE0_BASE_IDX 0
+#define mmGB_TILE_MODE1 0x0645
+#define mmGB_TILE_MODE1_BASE_IDX 0
+#define mmGB_TILE_MODE2 0x0646
+#define mmGB_TILE_MODE2_BASE_IDX 0
+#define mmGB_TILE_MODE3 0x0647
+#define mmGB_TILE_MODE3_BASE_IDX 0
+#define mmGB_TILE_MODE4 0x0648
+#define mmGB_TILE_MODE4_BASE_IDX 0
+#define mmGB_TILE_MODE5 0x0649
+#define mmGB_TILE_MODE5_BASE_IDX 0
+#define mmGB_TILE_MODE6 0x064a
+#define mmGB_TILE_MODE6_BASE_IDX 0
+#define mmGB_TILE_MODE7 0x064b
+#define mmGB_TILE_MODE7_BASE_IDX 0
+#define mmGB_TILE_MODE8 0x064c
+#define mmGB_TILE_MODE8_BASE_IDX 0
+#define mmGB_TILE_MODE9 0x064d
+#define mmGB_TILE_MODE9_BASE_IDX 0
+#define mmGB_TILE_MODE10 0x064e
+#define mmGB_TILE_MODE10_BASE_IDX 0
+#define mmGB_TILE_MODE11 0x064f
+#define mmGB_TILE_MODE11_BASE_IDX 0
+#define mmGB_TILE_MODE12 0x0650
+#define mmGB_TILE_MODE12_BASE_IDX 0
+#define mmGB_TILE_MODE13 0x0651
+#define mmGB_TILE_MODE13_BASE_IDX 0
+#define mmGB_TILE_MODE14 0x0652
+#define mmGB_TILE_MODE14_BASE_IDX 0
+#define mmGB_TILE_MODE15 0x0653
+#define mmGB_TILE_MODE15_BASE_IDX 0
+#define mmGB_TILE_MODE16 0x0654
+#define mmGB_TILE_MODE16_BASE_IDX 0
+#define mmGB_TILE_MODE17 0x0655
+#define mmGB_TILE_MODE17_BASE_IDX 0
+#define mmGB_TILE_MODE18 0x0656
+#define mmGB_TILE_MODE18_BASE_IDX 0
+#define mmGB_TILE_MODE19 0x0657
+#define mmGB_TILE_MODE19_BASE_IDX 0
+#define mmGB_TILE_MODE20 0x0658
+#define mmGB_TILE_MODE20_BASE_IDX 0
+#define mmGB_TILE_MODE21 0x0659
+#define mmGB_TILE_MODE21_BASE_IDX 0
+#define mmGB_TILE_MODE22 0x065a
+#define mmGB_TILE_MODE22_BASE_IDX 0
+#define mmGB_TILE_MODE23 0x065b
+#define mmGB_TILE_MODE23_BASE_IDX 0
+#define mmGB_TILE_MODE24 0x065c
+#define mmGB_TILE_MODE24_BASE_IDX 0
+#define mmGB_TILE_MODE25 0x065d
+#define mmGB_TILE_MODE25_BASE_IDX 0
+#define mmGB_TILE_MODE26 0x065e
+#define mmGB_TILE_MODE26_BASE_IDX 0
+#define mmGB_TILE_MODE27 0x065f
+#define mmGB_TILE_MODE27_BASE_IDX 0
+#define mmGB_TILE_MODE28 0x0660
+#define mmGB_TILE_MODE28_BASE_IDX 0
+#define mmGB_TILE_MODE29 0x0661
+#define mmGB_TILE_MODE29_BASE_IDX 0
+#define mmGB_TILE_MODE30 0x0662
+#define mmGB_TILE_MODE30_BASE_IDX 0
+#define mmGB_TILE_MODE31 0x0663
+#define mmGB_TILE_MODE31_BASE_IDX 0
+#define mmGB_MACROTILE_MODE0 0x0664
+#define mmGB_MACROTILE_MODE0_BASE_IDX 0
+#define mmGB_MACROTILE_MODE1 0x0665
+#define mmGB_MACROTILE_MODE1_BASE_IDX 0
+#define mmGB_MACROTILE_MODE2 0x0666
+#define mmGB_MACROTILE_MODE2_BASE_IDX 0
+#define mmGB_MACROTILE_MODE3 0x0667
+#define mmGB_MACROTILE_MODE3_BASE_IDX 0
+#define mmGB_MACROTILE_MODE4 0x0668
+#define mmGB_MACROTILE_MODE4_BASE_IDX 0
+#define mmGB_MACROTILE_MODE5 0x0669
+#define mmGB_MACROTILE_MODE5_BASE_IDX 0
+#define mmGB_MACROTILE_MODE6 0x066a
+#define mmGB_MACROTILE_MODE6_BASE_IDX 0
+#define mmGB_MACROTILE_MODE7 0x066b
+#define mmGB_MACROTILE_MODE7_BASE_IDX 0
+#define mmGB_MACROTILE_MODE8 0x066c
+#define mmGB_MACROTILE_MODE8_BASE_IDX 0
+#define mmGB_MACROTILE_MODE9 0x066d
+#define mmGB_MACROTILE_MODE9_BASE_IDX 0
+#define mmGB_MACROTILE_MODE10 0x066e
+#define mmGB_MACROTILE_MODE10_BASE_IDX 0
+#define mmGB_MACROTILE_MODE11 0x066f
+#define mmGB_MACROTILE_MODE11_BASE_IDX 0
+#define mmGB_MACROTILE_MODE12 0x0670
+#define mmGB_MACROTILE_MODE12_BASE_IDX 0
+#define mmGB_MACROTILE_MODE13 0x0671
+#define mmGB_MACROTILE_MODE13_BASE_IDX 0
+#define mmGB_MACROTILE_MODE14 0x0672
+#define mmGB_MACROTILE_MODE14_BASE_IDX 0
+#define mmGB_MACROTILE_MODE15 0x0673
+#define mmGB_MACROTILE_MODE15_BASE_IDX 0
+#define mmCB_HW_CONTROL 0x0680
+#define mmCB_HW_CONTROL_BASE_IDX 0
+#define mmCB_HW_CONTROL_1 0x0681
+#define mmCB_HW_CONTROL_1_BASE_IDX 0
+#define mmCB_HW_CONTROL_2 0x0682
+#define mmCB_HW_CONTROL_2_BASE_IDX 0
+#define mmCB_HW_CONTROL_3 0x0683
+#define mmCB_HW_CONTROL_3_BASE_IDX 0
+#define mmCB_HW_MEM_ARBITER_RD 0x0686
+#define mmCB_HW_MEM_ARBITER_RD_BASE_IDX 0
+#define mmCB_HW_MEM_ARBITER_WR 0x0687
+#define mmCB_HW_MEM_ARBITER_WR_BASE_IDX 0
+#define mmCB_DCC_CONFIG 0x0688
+#define mmCB_DCC_CONFIG_BASE_IDX 0
+#define mmGC_USER_RB_REDUNDANCY 0x06de
+#define mmGC_USER_RB_REDUNDANCY_BASE_IDX 0
+#define mmGC_USER_RB_BACKEND_DISABLE 0x06df
+#define mmGC_USER_RB_BACKEND_DISABLE_BASE_IDX 0
+
+
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_PERFCOUNTER_RSLT_CNTL 0x0700
+#define mmGCEA_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmGCEA_DSM_CNTL 0x0708
+#define mmGCEA_DSM_CNTL_BASE_IDX 0
+#define mmGCEA_DSM_CNTLA 0x0709
+#define mmGCEA_DSM_CNTLA_BASE_IDX 0
+#define mmGCEA_DSM_CNTLB 0x070a
+#define mmGCEA_DSM_CNTLB_BASE_IDX 0
+#define mmGCEA_DSM_CNTL2 0x070b
+#define mmGCEA_DSM_CNTL2_BASE_IDX 0
+#define mmGCEA_DSM_CNTL2A 0x070c
+#define mmGCEA_DSM_CNTL2A_BASE_IDX 0
+#define mmGCEA_DSM_CNTL2B 0x070d
+#define mmGCEA_DSM_CNTL2B_BASE_IDX 0
+#define mmGCEA_TCC_XBR_CREDITS 0x070e
+#define mmGCEA_TCC_XBR_CREDITS_BASE_IDX 0
+#define mmGCEA_TCC_XBR_MAXBURST 0x070f
+#define mmGCEA_TCC_XBR_MAXBURST_BASE_IDX 0
+#define mmGCEA_PROBE_CNTL 0x0710
+#define mmGCEA_PROBE_CNTL_BASE_IDX 0
+#define mmGCEA_PROBE_MAP 0x0711
+#define mmGCEA_PROBE_MAP_BASE_IDX 0
+#define mmGCEA_ERR_STATUS 0x0712
+#define mmGCEA_ERR_STATUS_BASE_IDX 0
+#define mmGCEA_MISC2 0x0713
+#define mmGCEA_MISC2_BASE_IDX 0
+#define mmGCEA_DRAM_BANK_ARB 0x0714
+#define mmGCEA_DRAM_BANK_ARB_BASE_IDX 0
+#define mmGCEA_SDP_BACKDOOR_CMDCREDITS0 0x0715
+#define mmGCEA_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 0
+#define mmGCEA_SDP_BACKDOOR_CMDCREDITS1 0x0716
+#define mmGCEA_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 0
+#define mmGCEA_SDP_BACKDOOR_DATACREDITS0 0x0717
+#define mmGCEA_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 0
+#define mmGCEA_SDP_BACKDOOR_DATACREDITS1 0x0718
+#define mmGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
+#define mmGCEA_SDP_BACKDOOR_MISCCREDITS 0x0719
+#define mmGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define mmGCEA_SDP_ENABLE 0x071a
+#define mmGCEA_SDP_ENABLE_BASE_IDX 0
+
+
+// addressBlock: gc_rmi_rmidec
+// base address: 0x9e00
+#define mmRMI_GENERAL_CNTL 0x0780
+#define mmRMI_GENERAL_CNTL_BASE_IDX 0
+#define mmRMI_GENERAL_CNTL1 0x0781
+#define mmRMI_GENERAL_CNTL1_BASE_IDX 0
+#define mmRMI_GENERAL_STATUS 0x0782
+#define mmRMI_GENERAL_STATUS_BASE_IDX 0
+#define mmRMI_SUBBLOCK_STATUS0 0x0783
+#define mmRMI_SUBBLOCK_STATUS0_BASE_IDX 0
+#define mmRMI_SUBBLOCK_STATUS1 0x0784
+#define mmRMI_SUBBLOCK_STATUS1_BASE_IDX 0
+#define mmRMI_SUBBLOCK_STATUS2 0x0785
+#define mmRMI_SUBBLOCK_STATUS2_BASE_IDX 0
+#define mmRMI_SUBBLOCK_STATUS3 0x0786
+#define mmRMI_SUBBLOCK_STATUS3_BASE_IDX 0
+#define mmRMI_XBAR_CONFIG 0x0787
+#define mmRMI_XBAR_CONFIG_BASE_IDX 0
+#define mmRMI_PROBE_POP_LOGIC_CNTL 0x0788
+#define mmRMI_PROBE_POP_LOGIC_CNTL_BASE_IDX 0
+#define mmRMI_UTC_XNACK_N_MISC_CNTL 0x0789
+#define mmRMI_UTC_XNACK_N_MISC_CNTL_BASE_IDX 0
+#define mmRMI_DEMUX_CNTL 0x078a
+#define mmRMI_DEMUX_CNTL_BASE_IDX 0
+#define mmRMI_UTCL1_CNTL1 0x078b
+#define mmRMI_UTCL1_CNTL1_BASE_IDX 0
+#define mmRMI_UTCL1_CNTL2 0x078c
+#define mmRMI_UTCL1_CNTL2_BASE_IDX 0
+#define mmRMI_UTC_UNIT_CONFIG 0x078d
+#define mmRMI_UTC_UNIT_CONFIG_BASE_IDX 0
+#define mmRMI_TCIW_FORMATTER0_CNTL 0x078e
+#define mmRMI_TCIW_FORMATTER0_CNTL_BASE_IDX 0
+#define mmRMI_TCIW_FORMATTER1_CNTL 0x078f
+#define mmRMI_TCIW_FORMATTER1_CNTL_BASE_IDX 0
+#define mmRMI_SCOREBOARD_CNTL 0x0790
+#define mmRMI_SCOREBOARD_CNTL_BASE_IDX 0
+#define mmRMI_SCOREBOARD_STATUS0 0x0791
+#define mmRMI_SCOREBOARD_STATUS0_BASE_IDX 0
+#define mmRMI_SCOREBOARD_STATUS1 0x0792
+#define mmRMI_SCOREBOARD_STATUS1_BASE_IDX 0
+#define mmRMI_SCOREBOARD_STATUS2 0x0793
+#define mmRMI_SCOREBOARD_STATUS2_BASE_IDX 0
+#define mmRMI_XBAR_ARBITER_CONFIG 0x0794
+#define mmRMI_XBAR_ARBITER_CONFIG_BASE_IDX 0
+#define mmRMI_XBAR_ARBITER_CONFIG_1 0x0795
+#define mmRMI_XBAR_ARBITER_CONFIG_1_BASE_IDX 0
+#define mmRMI_CLOCK_CNTRL 0x0796
+#define mmRMI_CLOCK_CNTRL_BASE_IDX 0
+#define mmRMI_UTCL1_STATUS 0x0797
+#define mmRMI_UTCL1_STATUS_BASE_IDX 0
+#define mmRMI_SPARE 0x079e
+#define mmRMI_SPARE_BASE_IDX 0
+#define mmRMI_SPARE_1 0x079f
+#define mmRMI_SPARE_1_BASE_IDX 0
+#define mmRMI_SPARE_2 0x07a0
+#define mmRMI_SPARE_2_BASE_IDX 0
+
+
+// addressBlock: gc_utcl2_atcl2dec
+// base address: 0xa000
+#define mmATC_L2_CNTL 0x0800
+#define mmATC_L2_CNTL_BASE_IDX 0
+#define mmATC_L2_CNTL2 0x0801
+#define mmATC_L2_CNTL2_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA0 0x0804
+#define mmATC_L2_CACHE_DATA0_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA1 0x0805
+#define mmATC_L2_CACHE_DATA1_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA2 0x0806
+#define mmATC_L2_CACHE_DATA2_BASE_IDX 0
+#define mmATC_L2_CNTL3 0x0807
+#define mmATC_L2_CNTL3_BASE_IDX 0
+#define mmATC_L2_STATUS 0x0808
+#define mmATC_L2_STATUS_BASE_IDX 0
+#define mmATC_L2_STATUS2 0x0809
+#define mmATC_L2_STATUS2_BASE_IDX 0
+#define mmATC_L2_MISC_CG 0x080a
+#define mmATC_L2_MISC_CG_BASE_IDX 0
+#define mmATC_L2_MEM_POWER_LS 0x080b
+#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define mmATC_L2_CGTT_CLK_CTRL 0x080c
+#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_utcl2_vml2pfdec
+// base address: 0xa100
+#define mmVM_L2_CNTL 0x0840
+#define mmVM_L2_CNTL_BASE_IDX 0
+#define mmVM_L2_CNTL2 0x0841
+#define mmVM_L2_CNTL2_BASE_IDX 0
+#define mmVM_L2_CNTL3 0x0842
+#define mmVM_L2_CNTL3_BASE_IDX 0
+#define mmVM_L2_STATUS 0x0843
+#define mmVM_L2_STATUS_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_CNTL 0x0844
+#define mmVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x0845
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x0846
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_CNTL 0x0847
+#define mmVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_CNTL2 0x0848
+#define mmVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3 0x0849
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4 0x084a
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_STATUS 0x084b
+#define mmVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32 0x084c
+#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32 0x084d
+#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x084e
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x084f
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x0851
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x0852
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x0853
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x0854
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x0855
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x0856
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define mmVM_L2_CNTL4 0x0857
+#define mmVM_L2_CNTL4_BASE_IDX 0
+#define mmVM_L2_MM_GROUP_RT_CLASSES 0x0858
+#define mmVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define mmVM_L2_BANK_SELECT_RESERVED_CID 0x0859
+#define mmVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define mmVM_L2_BANK_SELECT_RESERVED_CID2 0x085a
+#define mmVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define mmVM_L2_CACHE_PARITY_CNTL 0x085b
+#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define mmVM_L2_CGTT_CLK_CTRL 0x085e
+#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_utcl2_vml2vcdec
+// base address: 0xa200
+#define mmVM_CONTEXT0_CNTL 0x0880
+#define mmVM_CONTEXT0_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT1_CNTL 0x0881
+#define mmVM_CONTEXT1_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT2_CNTL 0x0882
+#define mmVM_CONTEXT2_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT3_CNTL 0x0883
+#define mmVM_CONTEXT3_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT4_CNTL 0x0884
+#define mmVM_CONTEXT4_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT5_CNTL 0x0885
+#define mmVM_CONTEXT5_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT6_CNTL 0x0886
+#define mmVM_CONTEXT6_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT7_CNTL 0x0887
+#define mmVM_CONTEXT7_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT8_CNTL 0x0888
+#define mmVM_CONTEXT8_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT9_CNTL 0x0889
+#define mmVM_CONTEXT9_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT10_CNTL 0x088a
+#define mmVM_CONTEXT10_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT11_CNTL 0x088b
+#define mmVM_CONTEXT11_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT12_CNTL 0x088c
+#define mmVM_CONTEXT12_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT13_CNTL 0x088d
+#define mmVM_CONTEXT13_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT14_CNTL 0x088e
+#define mmVM_CONTEXT14_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT15_CNTL 0x088f
+#define mmVM_CONTEXT15_CNTL_BASE_IDX 0
+#define mmVM_CONTEXTS_DISABLE 0x0890
+#define mmVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_SEM 0x0891
+#define mmVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_SEM 0x0892
+#define mmVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_SEM 0x0893
+#define mmVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_SEM 0x0894
+#define mmVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_SEM 0x0895
+#define mmVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_SEM 0x0896
+#define mmVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_SEM 0x0897
+#define mmVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_SEM 0x0898
+#define mmVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_SEM 0x0899
+#define mmVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_SEM 0x089a
+#define mmVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_SEM 0x089b
+#define mmVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_SEM 0x089c
+#define mmVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_SEM 0x089d
+#define mmVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_SEM 0x089e
+#define mmVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_SEM 0x089f
+#define mmVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_SEM 0x08a0
+#define mmVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_SEM 0x08a1
+#define mmVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_SEM 0x08a2
+#define mmVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_REQ 0x08a3
+#define mmVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_REQ 0x08a4
+#define mmVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_REQ 0x08a5
+#define mmVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_REQ 0x08a6
+#define mmVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_REQ 0x08a7
+#define mmVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_REQ 0x08a8
+#define mmVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_REQ 0x08a9
+#define mmVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_REQ 0x08aa
+#define mmVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_REQ 0x08ab
+#define mmVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_REQ 0x08ac
+#define mmVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_REQ 0x08ad
+#define mmVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_REQ 0x08ae
+#define mmVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_REQ 0x08af
+#define mmVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_REQ 0x08b0
+#define mmVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_REQ 0x08b1
+#define mmVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_REQ 0x08b2
+#define mmVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_REQ 0x08b3
+#define mmVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_REQ 0x08b4
+#define mmVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ACK 0x08b5
+#define mmVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ACK 0x08b6
+#define mmVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ACK 0x08b7
+#define mmVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ACK 0x08b8
+#define mmVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ACK 0x08b9
+#define mmVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ACK 0x08ba
+#define mmVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ACK 0x08bb
+#define mmVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ACK 0x08bc
+#define mmVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ACK 0x08bd
+#define mmVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ACK 0x08be
+#define mmVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ACK 0x08bf
+#define mmVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ACK 0x08c0
+#define mmVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ACK 0x08c1
+#define mmVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ACK 0x08c2
+#define mmVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ACK 0x08c3
+#define mmVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ACK 0x08c4
+#define mmVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ACK 0x08c5
+#define mmVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ACK 0x08c6
+#define mmVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x08c7
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x08c8
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x08c9
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x08ca
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x08cb
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x08cc
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x08cd
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x08ce
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x08cf
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x08d0
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x08d1
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x08d2
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x08d3
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x08d4
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x08d5
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x08d6
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x08d7
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x08d8
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x08d9
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x08da
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x08db
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x08dc
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x08dd
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x08de
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x08df
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x08e0
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x08e1
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x08e2
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x08e3
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x08e4
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x08e5
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x08e6
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x08e7
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x08e8
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x08e9
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x08ea
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x08eb
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x08ec
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x08ed
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x08ee
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x08ef
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x08f0
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x08f1
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x08f2
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x08f3
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x08f4
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x08f5
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x08f6
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x08f7
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x08f8
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x08f9
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x08fa
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x08fb
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x08fc
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x08fd
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x08fe
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x08ff
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x0900
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x0901
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x0902
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x0903
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x0904
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x0905
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x0906
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x0907
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x0908
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x0909
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x090a
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x090b
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x090c
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x090d
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x090e
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x090f
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x0910
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0911
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0912
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0913
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0914
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0915
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x0916
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0917
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0918
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x0919
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x091a
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x091b
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x091c
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x091d
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x091e
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x091f
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0920
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0921
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0922
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0923
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0924
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0925
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0926
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0927
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0928
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0929
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x092a
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x092b
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x092c
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x092d
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x092e
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x092f
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0930
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0931
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0932
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0933
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0934
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0935
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0936
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0937
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0938
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0939
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x093a
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x093b
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x093c
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x093d
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x093e
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x093f
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0940
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0941
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0942
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0943
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0944
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0945
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0946
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0947
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0948
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0949
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x094a
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+
+
+// addressBlock: gc_utcl2_vmsharedpfdec
+// base address: 0xa590
+#define mmMC_VM_NB_MMIOBASE 0x0964
+#define mmMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define mmMC_VM_NB_MMIOLIMIT 0x0965
+#define mmMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define mmMC_VM_NB_PCI_CTRL 0x0966
+#define mmMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define mmMC_VM_NB_PCI_ARB 0x0967
+#define mmMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1 0x0968
+#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2 0x0969
+#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2 0x096a
+#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define mmMC_VM_FB_OFFSET 0x096b
+#define mmMC_VM_FB_OFFSET_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x096c
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x096d
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define mmMC_VM_STEERING 0x096e
+#define mmMC_VM_STEERING_BASE_IDX 0
+#define mmMC_SHARED_VIRT_RESET_REQ 0x096f
+#define mmMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define mmMC_MEM_POWER_LS 0x0970
+#define mmMC_MEM_POWER_LS_BASE_IDX 0
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x0971
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x0972
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define mmMC_VM_APT_CNTL 0x0973
+#define mmMC_VM_APT_CNTL_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_START 0x0974
+#define mmMC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_END 0x0975
+#define mmMC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0976
+#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_CNTL 0x0977
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0978
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+
+// addressBlock: gc_utcl2_vmsharedvcdec
+// base address: 0xa600
+#define mmMC_VM_FB_LOCATION_BASE 0x0980
+#define mmMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define mmMC_VM_FB_LOCATION_TOP 0x0981
+#define mmMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define mmMC_VM_AGP_TOP 0x0982
+#define mmMC_VM_AGP_TOP_BASE_IDX 0
+#define mmMC_VM_AGP_BOT 0x0983
+#define mmMC_VM_AGP_BOT_BASE_IDX 0
+#define mmMC_VM_AGP_BASE 0x0984
+#define mmMC_VM_AGP_BASE_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0985
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0986
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB_CNTL 0x0987
+#define mmMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_ea_gceadec
+// base address: 0xa800
+#define mmGCEA_DRAM_RD_CLI2GRP_MAP0 0x0a00
+#define mmGCEA_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmGCEA_DRAM_RD_CLI2GRP_MAP1 0x0a01
+#define mmGCEA_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmGCEA_DRAM_WR_CLI2GRP_MAP0 0x0a02
+#define mmGCEA_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmGCEA_DRAM_WR_CLI2GRP_MAP1 0x0a03
+#define mmGCEA_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmGCEA_DRAM_RD_GRP2VC_MAP 0x0a04
+#define mmGCEA_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define mmGCEA_DRAM_WR_GRP2VC_MAP 0x0a05
+#define mmGCEA_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define mmGCEA_DRAM_RD_LAZY 0x0a06
+#define mmGCEA_DRAM_RD_LAZY_BASE_IDX 0
+#define mmGCEA_DRAM_WR_LAZY 0x0a07
+#define mmGCEA_DRAM_WR_LAZY_BASE_IDX 0
+#define mmGCEA_DRAM_RD_CAM_CNTL 0x0a08
+#define mmGCEA_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define mmGCEA_DRAM_WR_CAM_CNTL 0x0a09
+#define mmGCEA_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define mmGCEA_DRAM_PAGE_BURST 0x0a0a
+#define mmGCEA_DRAM_PAGE_BURST_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_AGE 0x0a0b
+#define mmGCEA_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_AGE 0x0a0c
+#define mmGCEA_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_QUEUING 0x0a0d
+#define mmGCEA_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_QUEUING 0x0a0e
+#define mmGCEA_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_FIXED 0x0a0f
+#define mmGCEA_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_FIXED 0x0a10
+#define mmGCEA_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_URGENCY 0x0a11
+#define mmGCEA_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_URGENCY 0x0a12
+#define mmGCEA_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI1 0x0a13
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI2 0x0a14
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI3 0x0a15
+#define mmGCEA_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI1 0x0a16
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI2 0x0a17
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI3 0x0a18
+#define mmGCEA_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmGCEA_ADDRNORM_BASE_ADDR0 0x0a34
+#define mmGCEA_ADDRNORM_BASE_ADDR0_BASE_IDX 0
+#define mmGCEA_ADDRNORM_LIMIT_ADDR0 0x0a35
+#define mmGCEA_ADDRNORM_LIMIT_ADDR0_BASE_IDX 0
+#define mmGCEA_ADDRNORM_BASE_ADDR1 0x0a36
+#define mmGCEA_ADDRNORM_BASE_ADDR1_BASE_IDX 0
+#define mmGCEA_ADDRNORM_LIMIT_ADDR1 0x0a37
+#define mmGCEA_ADDRNORM_LIMIT_ADDR1_BASE_IDX 0
+#define mmGCEA_ADDRNORM_OFFSET_ADDR1 0x0a38
+#define mmGCEA_ADDRNORM_OFFSET_ADDR1_BASE_IDX 0
+#define mmGCEA_ADDRNORMDRAM_HOLE_CNTL 0x0a43
+#define mmGCEA_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 0
+#define mmGCEA_ADDRNORMDRAM_TRICHANNEL_CFG 0x0a45
+#define mmGCEA_ADDRNORMDRAM_TRICHANNEL_CFG_BASE_IDX 0
+#define mmGCEA_ADDRDEC_BANK_CFG 0x0a47
+#define mmGCEA_ADDRDEC_BANK_CFG_BASE_IDX 0
+#define mmGCEA_ADDRDEC_MISC_CFG 0x0a48
+#define mmGCEA_ADDRDEC_MISC_CFG_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK0 0x0a49
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK1 0x0a4a
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK2 0x0a4b
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK3 0x0a4c
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK4 0x0a4d
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC 0x0a4e
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC2 0x0a4f
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS0 0x0a50
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS1 0x0a51
+#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 0
+#define mmGCEA_ADDRDECDRAM_HARVEST_ENABLE 0x0a52
+#define mmGCEA_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS0 0x0a5d
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS1 0x0a5e
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS2 0x0a5f
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS3 0x0a60
+#define mmGCEA_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS0 0x0a61
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS1 0x0a62
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS2 0x0a63
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS3 0x0a64
+#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_MASK_CS01 0x0a65
+#define mmGCEA_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_MASK_CS23 0x0a66
+#define mmGCEA_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS01 0x0a67
+#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS23 0x0a68
+#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_CFG_CS01 0x0a69
+#define mmGCEA_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_CFG_CS23 0x0a6a
+#define mmGCEA_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_SEL_CS01 0x0a6b
+#define mmGCEA_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_ADDR_SEL_CS23 0x0a6c
+#define mmGCEA_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS01 0x0a6d
+#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS23 0x0a6e
+#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS01 0x0a6f
+#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS23 0x0a70
+#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_RM_SEL_CS01 0x0a71
+#define mmGCEA_ADDRDEC0_RM_SEL_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_RM_SEL_CS23 0x0a72
+#define mmGCEA_ADDRDEC0_RM_SEL_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_RM_SEL_SECCS01 0x0a73
+#define mmGCEA_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC0_RM_SEL_SECCS23 0x0a74
+#define mmGCEA_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS0 0x0a75
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS1 0x0a76
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS2 0x0a77
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS3 0x0a78
+#define mmGCEA_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS0 0x0a79
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS1 0x0a7a
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS2 0x0a7b
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS3 0x0a7c
+#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_MASK_CS01 0x0a7d
+#define mmGCEA_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_MASK_CS23 0x0a7e
+#define mmGCEA_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS01 0x0a7f
+#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS23 0x0a80
+#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_CFG_CS01 0x0a81
+#define mmGCEA_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_CFG_CS23 0x0a82
+#define mmGCEA_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_SEL_CS01 0x0a83
+#define mmGCEA_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_ADDR_SEL_CS23 0x0a84
+#define mmGCEA_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS01 0x0a85
+#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS23 0x0a86
+#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS01 0x0a87
+#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS23 0x0a88
+#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_RM_SEL_CS01 0x0a89
+#define mmGCEA_ADDRDEC1_RM_SEL_CS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_RM_SEL_CS23 0x0a8a
+#define mmGCEA_ADDRDEC1_RM_SEL_CS23_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_RM_SEL_SECCS01 0x0a8b
+#define mmGCEA_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 0
+#define mmGCEA_ADDRDEC1_RM_SEL_SECCS23 0x0a8c
+#define mmGCEA_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 0
+#define mmGCEA_IO_RD_CLI2GRP_MAP0 0x0ad5
+#define mmGCEA_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmGCEA_IO_RD_CLI2GRP_MAP1 0x0ad6
+#define mmGCEA_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmGCEA_IO_WR_CLI2GRP_MAP0 0x0ad7
+#define mmGCEA_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmGCEA_IO_WR_CLI2GRP_MAP1 0x0ad8
+#define mmGCEA_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmGCEA_IO_RD_COMBINE_FLUSH 0x0ad9
+#define mmGCEA_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define mmGCEA_IO_WR_COMBINE_FLUSH 0x0ada
+#define mmGCEA_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define mmGCEA_IO_GROUP_BURST 0x0adb
+#define mmGCEA_IO_GROUP_BURST_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_AGE 0x0adc
+#define mmGCEA_IO_RD_PRI_AGE_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_AGE 0x0add
+#define mmGCEA_IO_WR_PRI_AGE_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_QUEUING 0x0ade
+#define mmGCEA_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_QUEUING 0x0adf
+#define mmGCEA_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_FIXED 0x0ae0
+#define mmGCEA_IO_RD_PRI_FIXED_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_FIXED 0x0ae1
+#define mmGCEA_IO_WR_PRI_FIXED_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_URGENCY 0x0ae2
+#define mmGCEA_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_URGENCY 0x0ae3
+#define mmGCEA_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_URGENCY_MASK 0x0ae4
+#define mmGCEA_IO_RD_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_URGENCY_MASK 0x0ae5
+#define mmGCEA_IO_WR_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_QUANT_PRI1 0x0ae6
+#define mmGCEA_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_QUANT_PRI2 0x0ae7
+#define mmGCEA_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmGCEA_IO_RD_PRI_QUANT_PRI3 0x0ae8
+#define mmGCEA_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_QUANT_PRI1 0x0ae9
+#define mmGCEA_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_QUANT_PRI2 0x0aea
+#define mmGCEA_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmGCEA_IO_WR_PRI_QUANT_PRI3 0x0aeb
+#define mmGCEA_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmGCEA_SDP_ARB_DRAM 0x0aec
+#define mmGCEA_SDP_ARB_DRAM_BASE_IDX 0
+#define mmGCEA_SDP_ARB_FINAL 0x0aee
+#define mmGCEA_SDP_ARB_FINAL_BASE_IDX 0
+#define mmGCEA_SDP_DRAM_PRIORITY 0x0aef
+#define mmGCEA_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define mmGCEA_SDP_IO_PRIORITY 0x0af1
+#define mmGCEA_SDP_IO_PRIORITY_BASE_IDX 0
+#define mmGCEA_SDP_CREDITS 0x0af2
+#define mmGCEA_SDP_CREDITS_BASE_IDX 0
+#define mmGCEA_SDP_TAG_RESERVE0 0x0af3
+#define mmGCEA_SDP_TAG_RESERVE0_BASE_IDX 0
+#define mmGCEA_SDP_TAG_RESERVE1 0x0af4
+#define mmGCEA_SDP_TAG_RESERVE1_BASE_IDX 0
+#define mmGCEA_SDP_VCC_RESERVE0 0x0af5
+#define mmGCEA_SDP_VCC_RESERVE0_BASE_IDX 0
+#define mmGCEA_SDP_VCC_RESERVE1 0x0af6
+#define mmGCEA_SDP_VCC_RESERVE1_BASE_IDX 0
+#define mmGCEA_SDP_VCD_RESERVE0 0x0af7
+#define mmGCEA_SDP_VCD_RESERVE0_BASE_IDX 0
+#define mmGCEA_SDP_VCD_RESERVE1 0x0af8
+#define mmGCEA_SDP_VCD_RESERVE1_BASE_IDX 0
+#define mmGCEA_SDP_REQ_CNTL 0x0af9
+#define mmGCEA_SDP_REQ_CNTL_BASE_IDX 0
+#define mmGCEA_MISC 0x0afa
+#define mmGCEA_MISC_BASE_IDX 0
+#define mmGCEA_LATENCY_SAMPLING 0x0afb
+#define mmGCEA_LATENCY_SAMPLING_BASE_IDX 0
+#define mmGCEA_PERFCOUNTER_LO 0x0afc
+#define mmGCEA_PERFCOUNTER_LO_BASE_IDX 0
+#define mmGCEA_PERFCOUNTER_HI 0x0afd
+#define mmGCEA_PERFCOUNTER_HI_BASE_IDX 0
+#define mmGCEA_PERFCOUNTER0_CFG 0x0afe
+#define mmGCEA_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmGCEA_PERFCOUNTER1_CFG 0x0aff
+#define mmGCEA_PERFCOUNTER1_CFG_BASE_IDX 0
+
+
+// addressBlock: gc_tcdec
+// base address: 0xac00
+#define mmTCP_INVALIDATE 0x0b00
+#define mmTCP_INVALIDATE_BASE_IDX 0
+#define mmTCP_STATUS 0x0b01
+#define mmTCP_STATUS_BASE_IDX 0
+#define mmTCP_CNTL 0x0b02
+#define mmTCP_CNTL_BASE_IDX 0
+#define mmTCP_CHAN_STEER_LO 0x0b03
+#define mmTCP_CHAN_STEER_LO_BASE_IDX 0
+#define mmTCP_CHAN_STEER_HI 0x0b04
+#define mmTCP_CHAN_STEER_HI_BASE_IDX 0
+#define mmTCP_ADDR_CONFIG 0x0b05
+#define mmTCP_ADDR_CONFIG_BASE_IDX 0
+#define mmTCP_CREDIT 0x0b06
+#define mmTCP_CREDIT_BASE_IDX 0
+#define mmTCP_BUFFER_ADDR_HASH_CNTL 0x0b16
+#define mmTCP_BUFFER_ADDR_HASH_CNTL_BASE_IDX 0
+#define mmTC_CFG_L1_LOAD_POLICY0 0x0b1a
+#define mmTC_CFG_L1_LOAD_POLICY0_BASE_IDX 0
+#define mmTC_CFG_L1_LOAD_POLICY1 0x0b1b
+#define mmTC_CFG_L1_LOAD_POLICY1_BASE_IDX 0
+#define mmTC_CFG_L1_STORE_POLICY 0x0b1c
+#define mmTC_CFG_L1_STORE_POLICY_BASE_IDX 0
+#define mmTC_CFG_L2_LOAD_POLICY0 0x0b1d
+#define mmTC_CFG_L2_LOAD_POLICY0_BASE_IDX 0
+#define mmTC_CFG_L2_LOAD_POLICY1 0x0b1e
+#define mmTC_CFG_L2_LOAD_POLICY1_BASE_IDX 0
+#define mmTC_CFG_L2_STORE_POLICY0 0x0b1f
+#define mmTC_CFG_L2_STORE_POLICY0_BASE_IDX 0
+#define mmTC_CFG_L2_STORE_POLICY1 0x0b20
+#define mmTC_CFG_L2_STORE_POLICY1_BASE_IDX 0
+#define mmTC_CFG_L2_ATOMIC_POLICY 0x0b21
+#define mmTC_CFG_L2_ATOMIC_POLICY_BASE_IDX 0
+#define mmTC_CFG_L1_VOLATILE 0x0b22
+#define mmTC_CFG_L1_VOLATILE_BASE_IDX 0
+#define mmTC_CFG_L2_VOLATILE 0x0b23
+#define mmTC_CFG_L2_VOLATILE_BASE_IDX 0
+#define mmTCI_STATUS 0x0b61
+#define mmTCI_STATUS_BASE_IDX 0
+#define mmTCI_CNTL_1 0x0b62
+#define mmTCI_CNTL_1_BASE_IDX 0
+#define mmTCI_CNTL_2 0x0b63
+#define mmTCI_CNTL_2_BASE_IDX 0
+#define mmTCC_CTRL 0x0b80
+#define mmTCC_CTRL_BASE_IDX 0
+#define mmTCC_CTRL2 0x0b81
+#define mmTCC_CTRL2_BASE_IDX 0
+#define mmTCC_REDUNDANCY 0x0b84
+#define mmTCC_REDUNDANCY_BASE_IDX 0
+#define mmTCC_EXE_DISABLE 0x0b85
+#define mmTCC_EXE_DISABLE_BASE_IDX 0
+#define mmTCC_DSM_CNTL 0x0b86
+#define mmTCC_DSM_CNTL_BASE_IDX 0
+#define mmTCC_DSM_CNTLA 0x0b87
+#define mmTCC_DSM_CNTLA_BASE_IDX 0
+#define mmTCC_DSM_CNTL2 0x0b88
+#define mmTCC_DSM_CNTL2_BASE_IDX 0
+#define mmTCC_DSM_CNTL2A 0x0b89
+#define mmTCC_DSM_CNTL2A_BASE_IDX 0
+#define mmTCC_DSM_CNTL2B 0x0b8a
+#define mmTCC_DSM_CNTL2B_BASE_IDX 0
+#define mmTCC_WBINVL2 0x0b8b
+#define mmTCC_WBINVL2_BASE_IDX 0
+#define mmTCC_SOFT_RESET 0x0b8c
+#define mmTCC_SOFT_RESET_BASE_IDX 0
+#define mmTCA_CTRL 0x0bc0
+#define mmTCA_CTRL_BASE_IDX 0
+#define mmTCA_BURST_MASK 0x0bc1
+#define mmTCA_BURST_MASK_BASE_IDX 0
+#define mmTCA_BURST_CTRL 0x0bc2
+#define mmTCA_BURST_CTRL_BASE_IDX 0
+#define mmTCA_DSM_CNTL 0x0bc3
+#define mmTCA_DSM_CNTL_BASE_IDX 0
+#define mmTCA_DSM_CNTL2 0x0bc4
+#define mmTCA_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: gc_shdec
+// base address: 0xb000
+#define mmSPI_SHADER_PGM_RSRC3_PS 0x0c07
+#define mmSPI_SHADER_PGM_RSRC3_PS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_PS 0x0c08
+#define mmSPI_SHADER_PGM_LO_PS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_PS 0x0c09
+#define mmSPI_SHADER_PGM_HI_PS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC1_PS 0x0c0a
+#define mmSPI_SHADER_PGM_RSRC1_PS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC2_PS 0x0c0b
+#define mmSPI_SHADER_PGM_RSRC2_PS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_0 0x0c0c
+#define mmSPI_SHADER_USER_DATA_PS_0_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_1 0x0c0d
+#define mmSPI_SHADER_USER_DATA_PS_1_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_2 0x0c0e
+#define mmSPI_SHADER_USER_DATA_PS_2_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_3 0x0c0f
+#define mmSPI_SHADER_USER_DATA_PS_3_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_4 0x0c10
+#define mmSPI_SHADER_USER_DATA_PS_4_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_5 0x0c11
+#define mmSPI_SHADER_USER_DATA_PS_5_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_6 0x0c12
+#define mmSPI_SHADER_USER_DATA_PS_6_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_7 0x0c13
+#define mmSPI_SHADER_USER_DATA_PS_7_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_8 0x0c14
+#define mmSPI_SHADER_USER_DATA_PS_8_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_9 0x0c15
+#define mmSPI_SHADER_USER_DATA_PS_9_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_10 0x0c16
+#define mmSPI_SHADER_USER_DATA_PS_10_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_11 0x0c17
+#define mmSPI_SHADER_USER_DATA_PS_11_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_12 0x0c18
+#define mmSPI_SHADER_USER_DATA_PS_12_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_13 0x0c19
+#define mmSPI_SHADER_USER_DATA_PS_13_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_14 0x0c1a
+#define mmSPI_SHADER_USER_DATA_PS_14_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_15 0x0c1b
+#define mmSPI_SHADER_USER_DATA_PS_15_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_16 0x0c1c
+#define mmSPI_SHADER_USER_DATA_PS_16_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_17 0x0c1d
+#define mmSPI_SHADER_USER_DATA_PS_17_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_18 0x0c1e
+#define mmSPI_SHADER_USER_DATA_PS_18_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_19 0x0c1f
+#define mmSPI_SHADER_USER_DATA_PS_19_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_20 0x0c20
+#define mmSPI_SHADER_USER_DATA_PS_20_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_21 0x0c21
+#define mmSPI_SHADER_USER_DATA_PS_21_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_22 0x0c22
+#define mmSPI_SHADER_USER_DATA_PS_22_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_23 0x0c23
+#define mmSPI_SHADER_USER_DATA_PS_23_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_24 0x0c24
+#define mmSPI_SHADER_USER_DATA_PS_24_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_25 0x0c25
+#define mmSPI_SHADER_USER_DATA_PS_25_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_26 0x0c26
+#define mmSPI_SHADER_USER_DATA_PS_26_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_27 0x0c27
+#define mmSPI_SHADER_USER_DATA_PS_27_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_28 0x0c28
+#define mmSPI_SHADER_USER_DATA_PS_28_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_29 0x0c29
+#define mmSPI_SHADER_USER_DATA_PS_29_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_30 0x0c2a
+#define mmSPI_SHADER_USER_DATA_PS_30_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_PS_31 0x0c2b
+#define mmSPI_SHADER_USER_DATA_PS_31_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC3_VS 0x0c46
+#define mmSPI_SHADER_PGM_RSRC3_VS_BASE_IDX 0
+#define mmSPI_SHADER_LATE_ALLOC_VS 0x0c47
+#define mmSPI_SHADER_LATE_ALLOC_VS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_VS 0x0c48
+#define mmSPI_SHADER_PGM_LO_VS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_VS 0x0c49
+#define mmSPI_SHADER_PGM_HI_VS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC1_VS 0x0c4a
+#define mmSPI_SHADER_PGM_RSRC1_VS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC2_VS 0x0c4b
+#define mmSPI_SHADER_PGM_RSRC2_VS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_0 0x0c4c
+#define mmSPI_SHADER_USER_DATA_VS_0_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_1 0x0c4d
+#define mmSPI_SHADER_USER_DATA_VS_1_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_2 0x0c4e
+#define mmSPI_SHADER_USER_DATA_VS_2_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_3 0x0c4f
+#define mmSPI_SHADER_USER_DATA_VS_3_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_4 0x0c50
+#define mmSPI_SHADER_USER_DATA_VS_4_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_5 0x0c51
+#define mmSPI_SHADER_USER_DATA_VS_5_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_6 0x0c52
+#define mmSPI_SHADER_USER_DATA_VS_6_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_7 0x0c53
+#define mmSPI_SHADER_USER_DATA_VS_7_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_8 0x0c54
+#define mmSPI_SHADER_USER_DATA_VS_8_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_9 0x0c55
+#define mmSPI_SHADER_USER_DATA_VS_9_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_10 0x0c56
+#define mmSPI_SHADER_USER_DATA_VS_10_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_11 0x0c57
+#define mmSPI_SHADER_USER_DATA_VS_11_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_12 0x0c58
+#define mmSPI_SHADER_USER_DATA_VS_12_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_13 0x0c59
+#define mmSPI_SHADER_USER_DATA_VS_13_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_14 0x0c5a
+#define mmSPI_SHADER_USER_DATA_VS_14_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_15 0x0c5b
+#define mmSPI_SHADER_USER_DATA_VS_15_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_16 0x0c5c
+#define mmSPI_SHADER_USER_DATA_VS_16_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_17 0x0c5d
+#define mmSPI_SHADER_USER_DATA_VS_17_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_18 0x0c5e
+#define mmSPI_SHADER_USER_DATA_VS_18_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_19 0x0c5f
+#define mmSPI_SHADER_USER_DATA_VS_19_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_20 0x0c60
+#define mmSPI_SHADER_USER_DATA_VS_20_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_21 0x0c61
+#define mmSPI_SHADER_USER_DATA_VS_21_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_22 0x0c62
+#define mmSPI_SHADER_USER_DATA_VS_22_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_23 0x0c63
+#define mmSPI_SHADER_USER_DATA_VS_23_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_24 0x0c64
+#define mmSPI_SHADER_USER_DATA_VS_24_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_25 0x0c65
+#define mmSPI_SHADER_USER_DATA_VS_25_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_26 0x0c66
+#define mmSPI_SHADER_USER_DATA_VS_26_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_27 0x0c67
+#define mmSPI_SHADER_USER_DATA_VS_27_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_28 0x0c68
+#define mmSPI_SHADER_USER_DATA_VS_28_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_29 0x0c69
+#define mmSPI_SHADER_USER_DATA_VS_29_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_30 0x0c6a
+#define mmSPI_SHADER_USER_DATA_VS_30_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_VS_31 0x0c6b
+#define mmSPI_SHADER_USER_DATA_VS_31_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC2_GS_VS 0x0c7c
+#define mmSPI_SHADER_PGM_RSRC2_GS_VS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC4_GS 0x0c81
+#define mmSPI_SHADER_PGM_RSRC4_GS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ADDR_LO_GS 0x0c82
+#define mmSPI_SHADER_USER_DATA_ADDR_LO_GS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ADDR_HI_GS 0x0c83
+#define mmSPI_SHADER_USER_DATA_ADDR_HI_GS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_ES 0x0c84
+#define mmSPI_SHADER_PGM_LO_ES_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_ES 0x0c85
+#define mmSPI_SHADER_PGM_HI_ES_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC3_GS 0x0c87
+#define mmSPI_SHADER_PGM_RSRC3_GS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_GS 0x0c88
+#define mmSPI_SHADER_PGM_LO_GS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_GS 0x0c89
+#define mmSPI_SHADER_PGM_HI_GS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC1_GS 0x0c8a
+#define mmSPI_SHADER_PGM_RSRC1_GS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC2_GS 0x0c8b
+#define mmSPI_SHADER_PGM_RSRC2_GS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_0 0x0ccc
+#define mmSPI_SHADER_USER_DATA_ES_0_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_1 0x0ccd
+#define mmSPI_SHADER_USER_DATA_ES_1_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_2 0x0cce
+#define mmSPI_SHADER_USER_DATA_ES_2_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_3 0x0ccf
+#define mmSPI_SHADER_USER_DATA_ES_3_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_4 0x0cd0
+#define mmSPI_SHADER_USER_DATA_ES_4_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_5 0x0cd1
+#define mmSPI_SHADER_USER_DATA_ES_5_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_6 0x0cd2
+#define mmSPI_SHADER_USER_DATA_ES_6_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_7 0x0cd3
+#define mmSPI_SHADER_USER_DATA_ES_7_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_8 0x0cd4
+#define mmSPI_SHADER_USER_DATA_ES_8_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_9 0x0cd5
+#define mmSPI_SHADER_USER_DATA_ES_9_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_10 0x0cd6
+#define mmSPI_SHADER_USER_DATA_ES_10_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_11 0x0cd7
+#define mmSPI_SHADER_USER_DATA_ES_11_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_12 0x0cd8
+#define mmSPI_SHADER_USER_DATA_ES_12_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_13 0x0cd9
+#define mmSPI_SHADER_USER_DATA_ES_13_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_14 0x0cda
+#define mmSPI_SHADER_USER_DATA_ES_14_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_15 0x0cdb
+#define mmSPI_SHADER_USER_DATA_ES_15_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_16 0x0cdc
+#define mmSPI_SHADER_USER_DATA_ES_16_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_17 0x0cdd
+#define mmSPI_SHADER_USER_DATA_ES_17_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_18 0x0cde
+#define mmSPI_SHADER_USER_DATA_ES_18_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_19 0x0cdf
+#define mmSPI_SHADER_USER_DATA_ES_19_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_20 0x0ce0
+#define mmSPI_SHADER_USER_DATA_ES_20_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_21 0x0ce1
+#define mmSPI_SHADER_USER_DATA_ES_21_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_22 0x0ce2
+#define mmSPI_SHADER_USER_DATA_ES_22_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_23 0x0ce3
+#define mmSPI_SHADER_USER_DATA_ES_23_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_24 0x0ce4
+#define mmSPI_SHADER_USER_DATA_ES_24_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_25 0x0ce5
+#define mmSPI_SHADER_USER_DATA_ES_25_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_26 0x0ce6
+#define mmSPI_SHADER_USER_DATA_ES_26_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_27 0x0ce7
+#define mmSPI_SHADER_USER_DATA_ES_27_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_28 0x0ce8
+#define mmSPI_SHADER_USER_DATA_ES_28_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_29 0x0ce9
+#define mmSPI_SHADER_USER_DATA_ES_29_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_30 0x0cea
+#define mmSPI_SHADER_USER_DATA_ES_30_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ES_31 0x0ceb
+#define mmSPI_SHADER_USER_DATA_ES_31_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC4_HS 0x0d01
+#define mmSPI_SHADER_PGM_RSRC4_HS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ADDR_LO_HS 0x0d02
+#define mmSPI_SHADER_USER_DATA_ADDR_LO_HS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_ADDR_HI_HS 0x0d03
+#define mmSPI_SHADER_USER_DATA_ADDR_HI_HS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_LS 0x0d04
+#define mmSPI_SHADER_PGM_LO_LS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_LS 0x0d05
+#define mmSPI_SHADER_PGM_HI_LS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC3_HS 0x0d07
+#define mmSPI_SHADER_PGM_RSRC3_HS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_LO_HS 0x0d08
+#define mmSPI_SHADER_PGM_LO_HS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_HI_HS 0x0d09
+#define mmSPI_SHADER_PGM_HI_HS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC1_HS 0x0d0a
+#define mmSPI_SHADER_PGM_RSRC1_HS_BASE_IDX 0
+#define mmSPI_SHADER_PGM_RSRC2_HS 0x0d0b
+#define mmSPI_SHADER_PGM_RSRC2_HS_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_0 0x0d0c
+#define mmSPI_SHADER_USER_DATA_LS_0_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_1 0x0d0d
+#define mmSPI_SHADER_USER_DATA_LS_1_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_2 0x0d0e
+#define mmSPI_SHADER_USER_DATA_LS_2_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_3 0x0d0f
+#define mmSPI_SHADER_USER_DATA_LS_3_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_4 0x0d10
+#define mmSPI_SHADER_USER_DATA_LS_4_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_5 0x0d11
+#define mmSPI_SHADER_USER_DATA_LS_5_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_6 0x0d12
+#define mmSPI_SHADER_USER_DATA_LS_6_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_7 0x0d13
+#define mmSPI_SHADER_USER_DATA_LS_7_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_8 0x0d14
+#define mmSPI_SHADER_USER_DATA_LS_8_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_9 0x0d15
+#define mmSPI_SHADER_USER_DATA_LS_9_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_10 0x0d16
+#define mmSPI_SHADER_USER_DATA_LS_10_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_11 0x0d17
+#define mmSPI_SHADER_USER_DATA_LS_11_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_12 0x0d18
+#define mmSPI_SHADER_USER_DATA_LS_12_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_13 0x0d19
+#define mmSPI_SHADER_USER_DATA_LS_13_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_14 0x0d1a
+#define mmSPI_SHADER_USER_DATA_LS_14_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_15 0x0d1b
+#define mmSPI_SHADER_USER_DATA_LS_15_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_16 0x0d1c
+#define mmSPI_SHADER_USER_DATA_LS_16_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_17 0x0d1d
+#define mmSPI_SHADER_USER_DATA_LS_17_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_18 0x0d1e
+#define mmSPI_SHADER_USER_DATA_LS_18_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_19 0x0d1f
+#define mmSPI_SHADER_USER_DATA_LS_19_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_20 0x0d20
+#define mmSPI_SHADER_USER_DATA_LS_20_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_21 0x0d21
+#define mmSPI_SHADER_USER_DATA_LS_21_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_22 0x0d22
+#define mmSPI_SHADER_USER_DATA_LS_22_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_23 0x0d23
+#define mmSPI_SHADER_USER_DATA_LS_23_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_24 0x0d24
+#define mmSPI_SHADER_USER_DATA_LS_24_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_25 0x0d25
+#define mmSPI_SHADER_USER_DATA_LS_25_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_26 0x0d26
+#define mmSPI_SHADER_USER_DATA_LS_26_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_27 0x0d27
+#define mmSPI_SHADER_USER_DATA_LS_27_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_28 0x0d28
+#define mmSPI_SHADER_USER_DATA_LS_28_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_29 0x0d29
+#define mmSPI_SHADER_USER_DATA_LS_29_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_30 0x0d2a
+#define mmSPI_SHADER_USER_DATA_LS_30_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_LS_31 0x0d2b
+#define mmSPI_SHADER_USER_DATA_LS_31_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_0 0x0d4c
+#define mmSPI_SHADER_USER_DATA_COMMON_0_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_1 0x0d4d
+#define mmSPI_SHADER_USER_DATA_COMMON_1_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_2 0x0d4e
+#define mmSPI_SHADER_USER_DATA_COMMON_2_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_3 0x0d4f
+#define mmSPI_SHADER_USER_DATA_COMMON_3_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_4 0x0d50
+#define mmSPI_SHADER_USER_DATA_COMMON_4_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_5 0x0d51
+#define mmSPI_SHADER_USER_DATA_COMMON_5_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_6 0x0d52
+#define mmSPI_SHADER_USER_DATA_COMMON_6_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_7 0x0d53
+#define mmSPI_SHADER_USER_DATA_COMMON_7_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_8 0x0d54
+#define mmSPI_SHADER_USER_DATA_COMMON_8_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_9 0x0d55
+#define mmSPI_SHADER_USER_DATA_COMMON_9_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_10 0x0d56
+#define mmSPI_SHADER_USER_DATA_COMMON_10_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_11 0x0d57
+#define mmSPI_SHADER_USER_DATA_COMMON_11_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_12 0x0d58
+#define mmSPI_SHADER_USER_DATA_COMMON_12_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_13 0x0d59
+#define mmSPI_SHADER_USER_DATA_COMMON_13_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_14 0x0d5a
+#define mmSPI_SHADER_USER_DATA_COMMON_14_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_15 0x0d5b
+#define mmSPI_SHADER_USER_DATA_COMMON_15_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_16 0x0d5c
+#define mmSPI_SHADER_USER_DATA_COMMON_16_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_17 0x0d5d
+#define mmSPI_SHADER_USER_DATA_COMMON_17_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_18 0x0d5e
+#define mmSPI_SHADER_USER_DATA_COMMON_18_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_19 0x0d5f
+#define mmSPI_SHADER_USER_DATA_COMMON_19_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_20 0x0d60
+#define mmSPI_SHADER_USER_DATA_COMMON_20_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_21 0x0d61
+#define mmSPI_SHADER_USER_DATA_COMMON_21_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_22 0x0d62
+#define mmSPI_SHADER_USER_DATA_COMMON_22_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_23 0x0d63
+#define mmSPI_SHADER_USER_DATA_COMMON_23_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_24 0x0d64
+#define mmSPI_SHADER_USER_DATA_COMMON_24_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_25 0x0d65
+#define mmSPI_SHADER_USER_DATA_COMMON_25_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_26 0x0d66
+#define mmSPI_SHADER_USER_DATA_COMMON_26_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_27 0x0d67
+#define mmSPI_SHADER_USER_DATA_COMMON_27_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_28 0x0d68
+#define mmSPI_SHADER_USER_DATA_COMMON_28_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_29 0x0d69
+#define mmSPI_SHADER_USER_DATA_COMMON_29_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_30 0x0d6a
+#define mmSPI_SHADER_USER_DATA_COMMON_30_BASE_IDX 0
+#define mmSPI_SHADER_USER_DATA_COMMON_31 0x0d6b
+#define mmSPI_SHADER_USER_DATA_COMMON_31_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_INITIATOR 0x0e00
+#define mmCOMPUTE_DISPATCH_INITIATOR_BASE_IDX 0
+#define mmCOMPUTE_DIM_X 0x0e01
+#define mmCOMPUTE_DIM_X_BASE_IDX 0
+#define mmCOMPUTE_DIM_Y 0x0e02
+#define mmCOMPUTE_DIM_Y_BASE_IDX 0
+#define mmCOMPUTE_DIM_Z 0x0e03
+#define mmCOMPUTE_DIM_Z_BASE_IDX 0
+#define mmCOMPUTE_START_X 0x0e04
+#define mmCOMPUTE_START_X_BASE_IDX 0
+#define mmCOMPUTE_START_Y 0x0e05
+#define mmCOMPUTE_START_Y_BASE_IDX 0
+#define mmCOMPUTE_START_Z 0x0e06
+#define mmCOMPUTE_START_Z_BASE_IDX 0
+#define mmCOMPUTE_NUM_THREAD_X 0x0e07
+#define mmCOMPUTE_NUM_THREAD_X_BASE_IDX 0
+#define mmCOMPUTE_NUM_THREAD_Y 0x0e08
+#define mmCOMPUTE_NUM_THREAD_Y_BASE_IDX 0
+#define mmCOMPUTE_NUM_THREAD_Z 0x0e09
+#define mmCOMPUTE_NUM_THREAD_Z_BASE_IDX 0
+#define mmCOMPUTE_PIPELINESTAT_ENABLE 0x0e0a
+#define mmCOMPUTE_PIPELINESTAT_ENABLE_BASE_IDX 0
+#define mmCOMPUTE_PERFCOUNT_ENABLE 0x0e0b
+#define mmCOMPUTE_PERFCOUNT_ENABLE_BASE_IDX 0
+#define mmCOMPUTE_PGM_LO 0x0e0c
+#define mmCOMPUTE_PGM_LO_BASE_IDX 0
+#define mmCOMPUTE_PGM_HI 0x0e0d
+#define mmCOMPUTE_PGM_HI_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_PKT_ADDR_LO 0x0e0e
+#define mmCOMPUTE_DISPATCH_PKT_ADDR_LO_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_PKT_ADDR_HI 0x0e0f
+#define mmCOMPUTE_DISPATCH_PKT_ADDR_HI_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_LO 0x0e10
+#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_LO_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_HI 0x0e11
+#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_HI_BASE_IDX 0
+#define mmCOMPUTE_PGM_RSRC1 0x0e12
+#define mmCOMPUTE_PGM_RSRC1_BASE_IDX 0
+#define mmCOMPUTE_PGM_RSRC2 0x0e13
+#define mmCOMPUTE_PGM_RSRC2_BASE_IDX 0
+#define mmCOMPUTE_VMID 0x0e14
+#define mmCOMPUTE_VMID_BASE_IDX 0
+#define mmCOMPUTE_RESOURCE_LIMITS 0x0e15
+#define mmCOMPUTE_RESOURCE_LIMITS_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0 0x0e16
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1 0x0e17
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1_BASE_IDX 0
+#define mmCOMPUTE_TMPRING_SIZE 0x0e18
+#define mmCOMPUTE_TMPRING_SIZE_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2 0x0e19
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a
+#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define mmCOMPUTE_RESTART_X 0x0e1b
+#define mmCOMPUTE_RESTART_X_BASE_IDX 0
+#define mmCOMPUTE_RESTART_Y 0x0e1c
+#define mmCOMPUTE_RESTART_Y_BASE_IDX 0
+#define mmCOMPUTE_RESTART_Z 0x0e1d
+#define mmCOMPUTE_RESTART_Z_BASE_IDX 0
+#define mmCOMPUTE_THREAD_TRACE_ENABLE 0x0e1e
+#define mmCOMPUTE_THREAD_TRACE_ENABLE_BASE_IDX 0
+#define mmCOMPUTE_MISC_RESERVED 0x0e1f
+#define mmCOMPUTE_MISC_RESERVED_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_ID 0x0e20
+#define mmCOMPUTE_DISPATCH_ID_BASE_IDX 0
+#define mmCOMPUTE_THREADGROUP_ID 0x0e21
+#define mmCOMPUTE_THREADGROUP_ID_BASE_IDX 0
+#define mmCOMPUTE_RELAUNCH 0x0e22
+#define mmCOMPUTE_RELAUNCH_BASE_IDX 0
+#define mmCOMPUTE_WAVE_RESTORE_ADDR_LO 0x0e23
+#define mmCOMPUTE_WAVE_RESTORE_ADDR_LO_BASE_IDX 0
+#define mmCOMPUTE_WAVE_RESTORE_ADDR_HI 0x0e24
+#define mmCOMPUTE_WAVE_RESTORE_ADDR_HI_BASE_IDX 0
+#define mmCOMPUTE_SHADER_CHKSUM 0x0e25
+#define mmCOMPUTE_SHADER_CHKSUM_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_0 0x0e40
+#define mmCOMPUTE_USER_DATA_0_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_1 0x0e41
+#define mmCOMPUTE_USER_DATA_1_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_2 0x0e42
+#define mmCOMPUTE_USER_DATA_2_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_3 0x0e43
+#define mmCOMPUTE_USER_DATA_3_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_4 0x0e44
+#define mmCOMPUTE_USER_DATA_4_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_5 0x0e45
+#define mmCOMPUTE_USER_DATA_5_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_6 0x0e46
+#define mmCOMPUTE_USER_DATA_6_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_7 0x0e47
+#define mmCOMPUTE_USER_DATA_7_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_8 0x0e48
+#define mmCOMPUTE_USER_DATA_8_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_9 0x0e49
+#define mmCOMPUTE_USER_DATA_9_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_10 0x0e4a
+#define mmCOMPUTE_USER_DATA_10_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_11 0x0e4b
+#define mmCOMPUTE_USER_DATA_11_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_12 0x0e4c
+#define mmCOMPUTE_USER_DATA_12_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_13 0x0e4d
+#define mmCOMPUTE_USER_DATA_13_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_14 0x0e4e
+#define mmCOMPUTE_USER_DATA_14_BASE_IDX 0
+#define mmCOMPUTE_USER_DATA_15 0x0e4f
+#define mmCOMPUTE_USER_DATA_15_BASE_IDX 0
+#define mmCOMPUTE_DISPATCH_END 0x0e7e
+#define mmCOMPUTE_DISPATCH_END_BASE_IDX 0
+#define mmCOMPUTE_NOWHERE 0x0e7f
+#define mmCOMPUTE_NOWHERE_BASE_IDX 0
+
+
+// addressBlock: gc_cppdec
+// base address: 0xc080
+#define mmCP_DFY_CNTL 0x1020
+#define mmCP_DFY_CNTL_BASE_IDX 0
+#define mmCP_DFY_STAT 0x1021
+#define mmCP_DFY_STAT_BASE_IDX 0
+#define mmCP_DFY_ADDR_HI 0x1022
+#define mmCP_DFY_ADDR_HI_BASE_IDX 0
+#define mmCP_DFY_ADDR_LO 0x1023
+#define mmCP_DFY_ADDR_LO_BASE_IDX 0
+#define mmCP_DFY_DATA_0 0x1024
+#define mmCP_DFY_DATA_0_BASE_IDX 0
+#define mmCP_DFY_DATA_1 0x1025
+#define mmCP_DFY_DATA_1_BASE_IDX 0
+#define mmCP_DFY_DATA_2 0x1026
+#define mmCP_DFY_DATA_2_BASE_IDX 0
+#define mmCP_DFY_DATA_3 0x1027
+#define mmCP_DFY_DATA_3_BASE_IDX 0
+#define mmCP_DFY_DATA_4 0x1028
+#define mmCP_DFY_DATA_4_BASE_IDX 0
+#define mmCP_DFY_DATA_5 0x1029
+#define mmCP_DFY_DATA_5_BASE_IDX 0
+#define mmCP_DFY_DATA_6 0x102a
+#define mmCP_DFY_DATA_6_BASE_IDX 0
+#define mmCP_DFY_DATA_7 0x102b
+#define mmCP_DFY_DATA_7_BASE_IDX 0
+#define mmCP_DFY_DATA_8 0x102c
+#define mmCP_DFY_DATA_8_BASE_IDX 0
+#define mmCP_DFY_DATA_9 0x102d
+#define mmCP_DFY_DATA_9_BASE_IDX 0
+#define mmCP_DFY_DATA_10 0x102e
+#define mmCP_DFY_DATA_10_BASE_IDX 0
+#define mmCP_DFY_DATA_11 0x102f
+#define mmCP_DFY_DATA_11_BASE_IDX 0
+#define mmCP_DFY_DATA_12 0x1030
+#define mmCP_DFY_DATA_12_BASE_IDX 0
+#define mmCP_DFY_DATA_13 0x1031
+#define mmCP_DFY_DATA_13_BASE_IDX 0
+#define mmCP_DFY_DATA_14 0x1032
+#define mmCP_DFY_DATA_14_BASE_IDX 0
+#define mmCP_DFY_DATA_15 0x1033
+#define mmCP_DFY_DATA_15_BASE_IDX 0
+#define mmCP_DFY_CMD 0x1034
+#define mmCP_DFY_CMD_BASE_IDX 0
+#define mmCP_EOPQ_WAIT_TIME 0x1035
+#define mmCP_EOPQ_WAIT_TIME_BASE_IDX 0
+#define mmCP_CPC_MGCG_SYNC_CNTL 0x1036
+#define mmCP_CPC_MGCG_SYNC_CNTL_BASE_IDX 0
+#define mmCPC_INT_INFO 0x1037
+#define mmCPC_INT_INFO_BASE_IDX 0
+#define mmCP_VIRT_STATUS 0x1038
+#define mmCP_VIRT_STATUS_BASE_IDX 0
+#define mmCPC_INT_ADDR 0x1039
+#define mmCPC_INT_ADDR_BASE_IDX 0
+#define mmCPC_INT_PASID 0x103a
+#define mmCPC_INT_PASID_BASE_IDX 0
+#define mmCP_GFX_ERROR 0x103b
+#define mmCP_GFX_ERROR_BASE_IDX 0
+#define mmCPG_UTCL1_CNTL 0x103c
+#define mmCPG_UTCL1_CNTL_BASE_IDX 0
+#define mmCPC_UTCL1_CNTL 0x103d
+#define mmCPC_UTCL1_CNTL_BASE_IDX 0
+#define mmCPF_UTCL1_CNTL 0x103e
+#define mmCPF_UTCL1_CNTL_BASE_IDX 0
+#define mmCP_AQL_SMM_STATUS 0x103f
+#define mmCP_AQL_SMM_STATUS_BASE_IDX 0
+#define mmCP_RB0_BASE 0x1040
+#define mmCP_RB0_BASE_BASE_IDX 0
+#define mmCP_RB_BASE 0x1040
+#define mmCP_RB_BASE_BASE_IDX 0
+#define mmCP_RB0_CNTL 0x1041
+#define mmCP_RB0_CNTL_BASE_IDX 0
+#define mmCP_RB_CNTL 0x1041
+#define mmCP_RB_CNTL_BASE_IDX 0
+#define mmCP_RB_RPTR_WR 0x1042
+#define mmCP_RB_RPTR_WR_BASE_IDX 0
+#define mmCP_RB0_RPTR_ADDR 0x1043
+#define mmCP_RB0_RPTR_ADDR_BASE_IDX 0
+#define mmCP_RB_RPTR_ADDR 0x1043
+#define mmCP_RB_RPTR_ADDR_BASE_IDX 0
+#define mmCP_RB0_RPTR_ADDR_HI 0x1044
+#define mmCP_RB0_RPTR_ADDR_HI_BASE_IDX 0
+#define mmCP_RB_RPTR_ADDR_HI 0x1044
+#define mmCP_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmCP_RB0_BUFSZ_MASK 0x1045
+#define mmCP_RB0_BUFSZ_MASK_BASE_IDX 0
+#define mmCP_RB_BUFSZ_MASK 0x1045
+#define mmCP_RB_BUFSZ_MASK_BASE_IDX 0
+#define mmCP_RB_WPTR_POLL_ADDR_LO 0x1046
+#define mmCP_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmCP_RB_WPTR_POLL_ADDR_HI 0x1047
+#define mmCP_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmGC_PRIV_MODE 0x1048
+#define mmGC_PRIV_MODE_BASE_IDX 0
+#define mmCP_INT_CNTL 0x1049
+#define mmCP_INT_CNTL_BASE_IDX 0
+#define mmCP_INT_STATUS 0x104a
+#define mmCP_INT_STATUS_BASE_IDX 0
+#define mmCP_DEVICE_ID 0x104b
+#define mmCP_DEVICE_ID_BASE_IDX 0
+#define mmCP_ME0_PIPE_PRIORITY_CNTS 0x104c
+#define mmCP_ME0_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define mmCP_RING_PRIORITY_CNTS 0x104c
+#define mmCP_RING_PRIORITY_CNTS_BASE_IDX 0
+#define mmCP_ME0_PIPE0_PRIORITY 0x104d
+#define mmCP_ME0_PIPE0_PRIORITY_BASE_IDX 0
+#define mmCP_RING0_PRIORITY 0x104d
+#define mmCP_RING0_PRIORITY_BASE_IDX 0
+#define mmCP_ME0_PIPE1_PRIORITY 0x104e
+#define mmCP_ME0_PIPE1_PRIORITY_BASE_IDX 0
+#define mmCP_RING1_PRIORITY 0x104e
+#define mmCP_RING1_PRIORITY_BASE_IDX 0
+#define mmCP_ME0_PIPE2_PRIORITY 0x104f
+#define mmCP_ME0_PIPE2_PRIORITY_BASE_IDX 0
+#define mmCP_RING2_PRIORITY 0x104f
+#define mmCP_RING2_PRIORITY_BASE_IDX 0
+#define mmCP_FATAL_ERROR 0x1050
+#define mmCP_FATAL_ERROR_BASE_IDX 0
+#define mmCP_RB_VMID 0x1051
+#define mmCP_RB_VMID_BASE_IDX 0
+#define mmCP_ME0_PIPE0_VMID 0x1052
+#define mmCP_ME0_PIPE0_VMID_BASE_IDX 0
+#define mmCP_ME0_PIPE1_VMID 0x1053
+#define mmCP_ME0_PIPE1_VMID_BASE_IDX 0
+#define mmCP_RB0_WPTR 0x1054
+#define mmCP_RB0_WPTR_BASE_IDX 0
+#define mmCP_RB_WPTR 0x1054
+#define mmCP_RB_WPTR_BASE_IDX 0
+#define mmCP_RB0_WPTR_HI 0x1055
+#define mmCP_RB0_WPTR_HI_BASE_IDX 0
+#define mmCP_RB_WPTR_HI 0x1055
+#define mmCP_RB_WPTR_HI_BASE_IDX 0
+#define mmCP_RB1_WPTR 0x1056
+#define mmCP_RB1_WPTR_BASE_IDX 0
+#define mmCP_RB1_WPTR_HI 0x1057
+#define mmCP_RB1_WPTR_HI_BASE_IDX 0
+#define mmCP_RB2_WPTR 0x1058
+#define mmCP_RB2_WPTR_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL 0x1059
+#define mmCP_RB_DOORBELL_CONTROL_BASE_IDX 0
+#define mmCP_RB_DOORBELL_RANGE_LOWER 0x105a
+#define mmCP_RB_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define mmCP_RB_DOORBELL_RANGE_UPPER 0x105b
+#define mmCP_RB_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define mmCP_MEC_DOORBELL_RANGE_LOWER 0x105c
+#define mmCP_MEC_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define mmCP_MEC_DOORBELL_RANGE_UPPER 0x105d
+#define mmCP_MEC_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define mmCPG_UTCL1_ERROR 0x105e
+#define mmCPG_UTCL1_ERROR_BASE_IDX 0
+#define mmCPC_UTCL1_ERROR 0x105f
+#define mmCPC_UTCL1_ERROR_BASE_IDX 0
+#define mmCP_RB1_BASE 0x1060
+#define mmCP_RB1_BASE_BASE_IDX 0
+#define mmCP_RB1_CNTL 0x1061
+#define mmCP_RB1_CNTL_BASE_IDX 0
+#define mmCP_RB1_RPTR_ADDR 0x1062
+#define mmCP_RB1_RPTR_ADDR_BASE_IDX 0
+#define mmCP_RB1_RPTR_ADDR_HI 0x1063
+#define mmCP_RB1_RPTR_ADDR_HI_BASE_IDX 0
+#define mmCP_RB2_BASE 0x1065
+#define mmCP_RB2_BASE_BASE_IDX 0
+#define mmCP_RB2_CNTL 0x1066
+#define mmCP_RB2_CNTL_BASE_IDX 0
+#define mmCP_RB2_RPTR_ADDR 0x1067
+#define mmCP_RB2_RPTR_ADDR_BASE_IDX 0
+#define mmCP_RB2_RPTR_ADDR_HI 0x1068
+#define mmCP_RB2_RPTR_ADDR_HI_BASE_IDX 0
+#define mmCP_RB0_ACTIVE 0x1069
+#define mmCP_RB0_ACTIVE_BASE_IDX 0
+#define mmCP_RB_ACTIVE 0x1069
+#define mmCP_RB_ACTIVE_BASE_IDX 0
+#define mmCP_INT_CNTL_RING0 0x106a
+#define mmCP_INT_CNTL_RING0_BASE_IDX 0
+#define mmCP_INT_CNTL_RING1 0x106b
+#define mmCP_INT_CNTL_RING1_BASE_IDX 0
+#define mmCP_INT_CNTL_RING2 0x106c
+#define mmCP_INT_CNTL_RING2_BASE_IDX 0
+#define mmCP_INT_STATUS_RING0 0x106d
+#define mmCP_INT_STATUS_RING0_BASE_IDX 0
+#define mmCP_INT_STATUS_RING1 0x106e
+#define mmCP_INT_STATUS_RING1_BASE_IDX 0
+#define mmCP_INT_STATUS_RING2 0x106f
+#define mmCP_INT_STATUS_RING2_BASE_IDX 0
+#define mmCP_PWR_CNTL 0x1078
+#define mmCP_PWR_CNTL_BASE_IDX 0
+#define mmCP_MEM_SLP_CNTL 0x1079
+#define mmCP_MEM_SLP_CNTL_BASE_IDX 0
+#define mmCP_ECC_FIRSTOCCURRENCE 0x107a
+#define mmCP_ECC_FIRSTOCCURRENCE_BASE_IDX 0
+#define mmCP_ECC_FIRSTOCCURRENCE_RING0 0x107b
+#define mmCP_ECC_FIRSTOCCURRENCE_RING0_BASE_IDX 0
+#define mmCP_ECC_FIRSTOCCURRENCE_RING1 0x107c
+#define mmCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
+#define mmCP_ECC_FIRSTOCCURRENCE_RING2 0x107d
+#define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
+#define mmCP_PQ_WPTR_POLL_CNTL 0x1083
+#define mmCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmCP_PQ_WPTR_POLL_CNTL1 0x1084
+#define mmCP_PQ_WPTR_POLL_CNTL1_BASE_IDX 0
+#define mmCP_ME1_PIPE0_INT_CNTL 0x1085
+#define mmCP_ME1_PIPE0_INT_CNTL_BASE_IDX 0
+#define mmCP_ME1_PIPE1_INT_CNTL 0x1086
+#define mmCP_ME1_PIPE1_INT_CNTL_BASE_IDX 0
+#define mmCP_ME1_PIPE2_INT_CNTL 0x1087
+#define mmCP_ME1_PIPE2_INT_CNTL_BASE_IDX 0
+#define mmCP_ME1_PIPE3_INT_CNTL 0x1088
+#define mmCP_ME1_PIPE3_INT_CNTL_BASE_IDX 0
+#define mmCP_ME2_PIPE0_INT_CNTL 0x1089
+#define mmCP_ME2_PIPE0_INT_CNTL_BASE_IDX 0
+#define mmCP_ME2_PIPE1_INT_CNTL 0x108a
+#define mmCP_ME2_PIPE1_INT_CNTL_BASE_IDX 0
+#define mmCP_ME2_PIPE2_INT_CNTL 0x108b
+#define mmCP_ME2_PIPE2_INT_CNTL_BASE_IDX 0
+#define mmCP_ME2_PIPE3_INT_CNTL 0x108c
+#define mmCP_ME2_PIPE3_INT_CNTL_BASE_IDX 0
+#define mmCP_ME1_PIPE0_INT_STATUS 0x108d
+#define mmCP_ME1_PIPE0_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_PIPE1_INT_STATUS 0x108e
+#define mmCP_ME1_PIPE1_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_PIPE2_INT_STATUS 0x108f
+#define mmCP_ME1_PIPE2_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_PIPE3_INT_STATUS 0x1090
+#define mmCP_ME1_PIPE3_INT_STATUS_BASE_IDX 0
+#define mmCP_ME2_PIPE0_INT_STATUS 0x1091
+#define mmCP_ME2_PIPE0_INT_STATUS_BASE_IDX 0
+#define mmCP_ME2_PIPE1_INT_STATUS 0x1092
+#define mmCP_ME2_PIPE1_INT_STATUS_BASE_IDX 0
+#define mmCP_ME2_PIPE2_INT_STATUS 0x1093
+#define mmCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
+#define mmCP_ME2_PIPE3_INT_STATUS 0x1094
+#define mmCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_PIPE_PRIORITY_CNTS 0x1099
+#define mmCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define mmCP_ME1_PIPE0_PRIORITY 0x109a
+#define mmCP_ME1_PIPE0_PRIORITY_BASE_IDX 0
+#define mmCP_ME1_PIPE1_PRIORITY 0x109b
+#define mmCP_ME1_PIPE1_PRIORITY_BASE_IDX 0
+#define mmCP_ME1_PIPE2_PRIORITY 0x109c
+#define mmCP_ME1_PIPE2_PRIORITY_BASE_IDX 0
+#define mmCP_ME1_PIPE3_PRIORITY 0x109d
+#define mmCP_ME1_PIPE3_PRIORITY_BASE_IDX 0
+#define mmCP_ME2_PIPE_PRIORITY_CNTS 0x109e
+#define mmCP_ME2_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define mmCP_ME2_PIPE0_PRIORITY 0x109f
+#define mmCP_ME2_PIPE0_PRIORITY_BASE_IDX 0
+#define mmCP_ME2_PIPE1_PRIORITY 0x10a0
+#define mmCP_ME2_PIPE1_PRIORITY_BASE_IDX 0
+#define mmCP_ME2_PIPE2_PRIORITY 0x10a1
+#define mmCP_ME2_PIPE2_PRIORITY_BASE_IDX 0
+#define mmCP_ME2_PIPE3_PRIORITY 0x10a2
+#define mmCP_ME2_PIPE3_PRIORITY_BASE_IDX 0
+#define mmCP_CE_PRGRM_CNTR_START 0x10a3
+#define mmCP_CE_PRGRM_CNTR_START_BASE_IDX 0
+#define mmCP_PFP_PRGRM_CNTR_START 0x10a4
+#define mmCP_PFP_PRGRM_CNTR_START_BASE_IDX 0
+#define mmCP_ME_PRGRM_CNTR_START 0x10a5
+#define mmCP_ME_PRGRM_CNTR_START_BASE_IDX 0
+#define mmCP_MEC1_PRGRM_CNTR_START 0x10a6
+#define mmCP_MEC1_PRGRM_CNTR_START_BASE_IDX 0
+#define mmCP_MEC2_PRGRM_CNTR_START 0x10a7
+#define mmCP_MEC2_PRGRM_CNTR_START_BASE_IDX 0
+#define mmCP_CE_INTR_ROUTINE_START 0x10a8
+#define mmCP_CE_INTR_ROUTINE_START_BASE_IDX 0
+#define mmCP_PFP_INTR_ROUTINE_START 0x10a9
+#define mmCP_PFP_INTR_ROUTINE_START_BASE_IDX 0
+#define mmCP_ME_INTR_ROUTINE_START 0x10aa
+#define mmCP_ME_INTR_ROUTINE_START_BASE_IDX 0
+#define mmCP_MEC1_INTR_ROUTINE_START 0x10ab
+#define mmCP_MEC1_INTR_ROUTINE_START_BASE_IDX 0
+#define mmCP_MEC2_INTR_ROUTINE_START 0x10ac
+#define mmCP_MEC2_INTR_ROUTINE_START_BASE_IDX 0
+#define mmCP_CONTEXT_CNTL 0x10ad
+#define mmCP_CONTEXT_CNTL_BASE_IDX 0
+#define mmCP_MAX_CONTEXT 0x10ae
+#define mmCP_MAX_CONTEXT_BASE_IDX 0
+#define mmCP_IQ_WAIT_TIME1 0x10af
+#define mmCP_IQ_WAIT_TIME1_BASE_IDX 0
+#define mmCP_IQ_WAIT_TIME2 0x10b0
+#define mmCP_IQ_WAIT_TIME2_BASE_IDX 0
+#define mmCP_RB0_BASE_HI 0x10b1
+#define mmCP_RB0_BASE_HI_BASE_IDX 0
+#define mmCP_RB1_BASE_HI 0x10b2
+#define mmCP_RB1_BASE_HI_BASE_IDX 0
+#define mmCP_VMID_RESET 0x10b3
+#define mmCP_VMID_RESET_BASE_IDX 0
+#define mmCPC_INT_CNTL 0x10b4
+#define mmCPC_INT_CNTL_BASE_IDX 0
+#define mmCPC_INT_STATUS 0x10b5
+#define mmCPC_INT_STATUS_BASE_IDX 0
+#define mmCP_VMID_PREEMPT 0x10b6
+#define mmCP_VMID_PREEMPT_BASE_IDX 0
+#define mmCPC_INT_CNTX_ID 0x10b7
+#define mmCPC_INT_CNTX_ID_BASE_IDX 0
+#define mmCP_PQ_STATUS 0x10b8
+#define mmCP_PQ_STATUS_BASE_IDX 0
+#define mmCP_CPC_IC_BASE_LO 0x10b9
+#define mmCP_CPC_IC_BASE_LO_BASE_IDX 0
+#define mmCP_CPC_IC_BASE_HI 0x10ba
+#define mmCP_CPC_IC_BASE_HI_BASE_IDX 0
+#define mmCP_CPC_IC_BASE_CNTL 0x10bb
+#define mmCP_CPC_IC_BASE_CNTL_BASE_IDX 0
+#define mmCP_CPC_IC_OP_CNTL 0x10bc
+#define mmCP_CPC_IC_OP_CNTL_BASE_IDX 0
+#define mmCP_MEC1_F32_INT_DIS 0x10bd
+#define mmCP_MEC1_F32_INT_DIS_BASE_IDX 0
+#define mmCP_MEC2_F32_INT_DIS 0x10be
+#define mmCP_MEC2_F32_INT_DIS_BASE_IDX 0
+#define mmCP_VMID_STATUS 0x10bf
+#define mmCP_VMID_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_cppdec2
+// base address: 0xc600
+#define mmCP_RB_DOORBELL_CONTROL_SCH_0 0x1180
+#define mmCP_RB_DOORBELL_CONTROL_SCH_0_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_1 0x1181
+#define mmCP_RB_DOORBELL_CONTROL_SCH_1_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_2 0x1182
+#define mmCP_RB_DOORBELL_CONTROL_SCH_2_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_3 0x1183
+#define mmCP_RB_DOORBELL_CONTROL_SCH_3_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_4 0x1184
+#define mmCP_RB_DOORBELL_CONTROL_SCH_4_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_5 0x1185
+#define mmCP_RB_DOORBELL_CONTROL_SCH_5_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_6 0x1186
+#define mmCP_RB_DOORBELL_CONTROL_SCH_6_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CONTROL_SCH_7 0x1187
+#define mmCP_RB_DOORBELL_CONTROL_SCH_7_BASE_IDX 0
+#define mmCP_RB_DOORBELL_CLEAR 0x1188
+#define mmCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define mmCP_GFX_MQD_CONTROL 0x11a0
+#define mmCP_GFX_MQD_CONTROL_BASE_IDX 0
+#define mmCP_GFX_MQD_BASE_ADDR 0x11a1
+#define mmCP_GFX_MQD_BASE_ADDR_BASE_IDX 0
+#define mmCP_GFX_MQD_BASE_ADDR_HI 0x11a2
+#define mmCP_GFX_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define mmCP_RB_STATUS 0x11a3
+#define mmCP_RB_STATUS_BASE_IDX 0
+#define mmCPG_UTCL1_STATUS 0x11b4
+#define mmCPG_UTCL1_STATUS_BASE_IDX 0
+#define mmCPC_UTCL1_STATUS 0x11b5
+#define mmCPC_UTCL1_STATUS_BASE_IDX 0
+#define mmCPF_UTCL1_STATUS 0x11b6
+#define mmCPF_UTCL1_STATUS_BASE_IDX 0
+#define mmCP_SD_CNTL 0x11b7
+#define mmCP_SD_CNTL_BASE_IDX 0
+#define mmCP_SOFT_RESET_CNTL 0x11b9
+#define mmCP_SOFT_RESET_CNTL_BASE_IDX 0
+#define mmCP_CPC_GFX_CNTL 0x11ba
+#define mmCP_CPC_GFX_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec
+// base address: 0xc700
+#define mmSPI_ARB_PRIORITY 0x11c0
+#define mmSPI_ARB_PRIORITY_BASE_IDX 0
+#define mmSPI_ARB_CYCLES_0 0x11c1
+#define mmSPI_ARB_CYCLES_0_BASE_IDX 0
+#define mmSPI_ARB_CYCLES_1 0x11c2
+#define mmSPI_ARB_CYCLES_1_BASE_IDX 0
+#define mmSPI_CDBG_SYS_GFX 0x11c3
+#define mmSPI_CDBG_SYS_GFX_BASE_IDX 0
+#define mmSPI_CDBG_SYS_HP3D 0x11c4
+#define mmSPI_CDBG_SYS_HP3D_BASE_IDX 0
+#define mmSPI_CDBG_SYS_CS0 0x11c5
+#define mmSPI_CDBG_SYS_CS0_BASE_IDX 0
+#define mmSPI_CDBG_SYS_CS1 0x11c6
+#define mmSPI_CDBG_SYS_CS1_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_GFX 0x11c7
+#define mmSPI_WCL_PIPE_PERCENT_GFX_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_HP3D 0x11c8
+#define mmSPI_WCL_PIPE_PERCENT_HP3D_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS0 0x11c9
+#define mmSPI_WCL_PIPE_PERCENT_CS0_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS1 0x11ca
+#define mmSPI_WCL_PIPE_PERCENT_CS1_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS2 0x11cb
+#define mmSPI_WCL_PIPE_PERCENT_CS2_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS3 0x11cc
+#define mmSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS4 0x11cd
+#define mmSPI_WCL_PIPE_PERCENT_CS4_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS5 0x11ce
+#define mmSPI_WCL_PIPE_PERCENT_CS5_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS6 0x11cf
+#define mmSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
+#define mmSPI_WCL_PIPE_PERCENT_CS7 0x11d0
+#define mmSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL 0x11d1
+#define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_CONFIG 0x11d2
+#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_MASK 0x11d3
+#define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL2 0x11d4
+#define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL3 0x11d5
+#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA0 0x11d8
+#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA1 0x11d9
+#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0
+#define mmSPI_COMPUTE_QUEUE_RESET 0x11db
+#define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_0 0x11dc
+#define mmSPI_RESOURCE_RESERVE_CU_0_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_1 0x11dd
+#define mmSPI_RESOURCE_RESERVE_CU_1_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_2 0x11de
+#define mmSPI_RESOURCE_RESERVE_CU_2_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_3 0x11df
+#define mmSPI_RESOURCE_RESERVE_CU_3_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_4 0x11e0
+#define mmSPI_RESOURCE_RESERVE_CU_4_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_5 0x11e1
+#define mmSPI_RESOURCE_RESERVE_CU_5_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_6 0x11e2
+#define mmSPI_RESOURCE_RESERVE_CU_6_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_7 0x11e3
+#define mmSPI_RESOURCE_RESERVE_CU_7_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_8 0x11e4
+#define mmSPI_RESOURCE_RESERVE_CU_8_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_9 0x11e5
+#define mmSPI_RESOURCE_RESERVE_CU_9_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_0 0x11e6
+#define mmSPI_RESOURCE_RESERVE_EN_CU_0_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_1 0x11e7
+#define mmSPI_RESOURCE_RESERVE_EN_CU_1_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_2 0x11e8
+#define mmSPI_RESOURCE_RESERVE_EN_CU_2_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_3 0x11e9
+#define mmSPI_RESOURCE_RESERVE_EN_CU_3_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_4 0x11ea
+#define mmSPI_RESOURCE_RESERVE_EN_CU_4_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_5 0x11eb
+#define mmSPI_RESOURCE_RESERVE_EN_CU_5_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_6 0x11ec
+#define mmSPI_RESOURCE_RESERVE_EN_CU_6_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_7 0x11ed
+#define mmSPI_RESOURCE_RESERVE_EN_CU_7_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_8 0x11ee
+#define mmSPI_RESOURCE_RESERVE_EN_CU_8_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_9 0x11ef
+#define mmSPI_RESOURCE_RESERVE_EN_CU_9_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_10 0x11f0
+#define mmSPI_RESOURCE_RESERVE_CU_10_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_11 0x11f1
+#define mmSPI_RESOURCE_RESERVE_CU_11_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_10 0x11f2
+#define mmSPI_RESOURCE_RESERVE_EN_CU_10_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_11 0x11f3
+#define mmSPI_RESOURCE_RESERVE_EN_CU_11_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_12 0x11f4
+#define mmSPI_RESOURCE_RESERVE_CU_12_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_13 0x11f5
+#define mmSPI_RESOURCE_RESERVE_CU_13_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_14 0x11f6
+#define mmSPI_RESOURCE_RESERVE_CU_14_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_CU_15 0x11f7
+#define mmSPI_RESOURCE_RESERVE_CU_15_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_12 0x11f8
+#define mmSPI_RESOURCE_RESERVE_EN_CU_12_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_13 0x11f9
+#define mmSPI_RESOURCE_RESERVE_EN_CU_13_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_14 0x11fa
+#define mmSPI_RESOURCE_RESERVE_EN_CU_14_BASE_IDX 0
+#define mmSPI_RESOURCE_RESERVE_EN_CU_15 0x11fb
+#define mmSPI_RESOURCE_RESERVE_EN_CU_15_BASE_IDX 0
+#define mmSPI_COMPUTE_WF_CTX_SAVE 0x11fc
+#define mmSPI_COMPUTE_WF_CTX_SAVE_BASE_IDX 0
+#define mmSPI_ARB_CNTL_0 0x11fd
+#define mmSPI_ARB_CNTL_0_BASE_IDX 0
+
+
+// addressBlock: gc_cpphqddec
+// base address: 0xc800
+#define mmCP_HQD_GFX_CONTROL 0x123e
+#define mmCP_HQD_GFX_CONTROL_BASE_IDX 0
+#define mmCP_HQD_GFX_STATUS 0x123f
+#define mmCP_HQD_GFX_STATUS_BASE_IDX 0
+#define mmCP_HPD_ROQ_OFFSETS 0x1240
+#define mmCP_HPD_ROQ_OFFSETS_BASE_IDX 0
+#define mmCP_HPD_STATUS0 0x1241
+#define mmCP_HPD_STATUS0_BASE_IDX 0
+#define mmCP_HPD_UTCL1_CNTL 0x1242
+#define mmCP_HPD_UTCL1_CNTL_BASE_IDX 0
+#define mmCP_HPD_UTCL1_ERROR 0x1243
+#define mmCP_HPD_UTCL1_ERROR_BASE_IDX 0
+#define mmCP_HPD_UTCL1_ERROR_ADDR 0x1244
+#define mmCP_HPD_UTCL1_ERROR_ADDR_BASE_IDX 0
+#define mmCP_MQD_BASE_ADDR 0x1245
+#define mmCP_MQD_BASE_ADDR_BASE_IDX 0
+#define mmCP_MQD_BASE_ADDR_HI 0x1246
+#define mmCP_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_ACTIVE 0x1247
+#define mmCP_HQD_ACTIVE_BASE_IDX 0
+#define mmCP_HQD_VMID 0x1248
+#define mmCP_HQD_VMID_BASE_IDX 0
+#define mmCP_HQD_PERSISTENT_STATE 0x1249
+#define mmCP_HQD_PERSISTENT_STATE_BASE_IDX 0
+#define mmCP_HQD_PIPE_PRIORITY 0x124a
+#define mmCP_HQD_PIPE_PRIORITY_BASE_IDX 0
+#define mmCP_HQD_QUEUE_PRIORITY 0x124b
+#define mmCP_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define mmCP_HQD_QUANTUM 0x124c
+#define mmCP_HQD_QUANTUM_BASE_IDX 0
+#define mmCP_HQD_PQ_BASE 0x124d
+#define mmCP_HQD_PQ_BASE_BASE_IDX 0
+#define mmCP_HQD_PQ_BASE_HI 0x124e
+#define mmCP_HQD_PQ_BASE_HI_BASE_IDX 0
+#define mmCP_HQD_PQ_RPTR 0x124f
+#define mmCP_HQD_PQ_RPTR_BASE_IDX 0
+#define mmCP_HQD_PQ_RPTR_REPORT_ADDR 0x1250
+#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_BASE_IDX 0
+#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1251
+#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_PQ_WPTR_POLL_ADDR 0x1252
+#define mmCP_HQD_PQ_WPTR_POLL_ADDR_BASE_IDX 0
+#define mmCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1253
+#define mmCP_HQD_PQ_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_PQ_DOORBELL_CONTROL 0x1254
+#define mmCP_HQD_PQ_DOORBELL_CONTROL_BASE_IDX 0
+#define mmCP_HQD_PQ_CONTROL 0x1256
+#define mmCP_HQD_PQ_CONTROL_BASE_IDX 0
+#define mmCP_HQD_IB_BASE_ADDR 0x1257
+#define mmCP_HQD_IB_BASE_ADDR_BASE_IDX 0
+#define mmCP_HQD_IB_BASE_ADDR_HI 0x1258
+#define mmCP_HQD_IB_BASE_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_IB_RPTR 0x1259
+#define mmCP_HQD_IB_RPTR_BASE_IDX 0
+#define mmCP_HQD_IB_CONTROL 0x125a
+#define mmCP_HQD_IB_CONTROL_BASE_IDX 0
+#define mmCP_HQD_IQ_TIMER 0x125b
+#define mmCP_HQD_IQ_TIMER_BASE_IDX 0
+#define mmCP_HQD_IQ_RPTR 0x125c
+#define mmCP_HQD_IQ_RPTR_BASE_IDX 0
+#define mmCP_HQD_DEQUEUE_REQUEST 0x125d
+#define mmCP_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define mmCP_HQD_DMA_OFFLOAD 0x125e
+#define mmCP_HQD_DMA_OFFLOAD_BASE_IDX 0
+#define mmCP_HQD_OFFLOAD 0x125e
+#define mmCP_HQD_OFFLOAD_BASE_IDX 0
+#define mmCP_HQD_SEMA_CMD 0x125f
+#define mmCP_HQD_SEMA_CMD_BASE_IDX 0
+#define mmCP_HQD_MSG_TYPE 0x1260
+#define mmCP_HQD_MSG_TYPE_BASE_IDX 0
+#define mmCP_HQD_ATOMIC0_PREOP_LO 0x1261
+#define mmCP_HQD_ATOMIC0_PREOP_LO_BASE_IDX 0
+#define mmCP_HQD_ATOMIC0_PREOP_HI 0x1262
+#define mmCP_HQD_ATOMIC0_PREOP_HI_BASE_IDX 0
+#define mmCP_HQD_ATOMIC1_PREOP_LO 0x1263
+#define mmCP_HQD_ATOMIC1_PREOP_LO_BASE_IDX 0
+#define mmCP_HQD_ATOMIC1_PREOP_HI 0x1264
+#define mmCP_HQD_ATOMIC1_PREOP_HI_BASE_IDX 0
+#define mmCP_HQD_HQ_SCHEDULER0 0x1265
+#define mmCP_HQD_HQ_SCHEDULER0_BASE_IDX 0
+#define mmCP_HQD_HQ_STATUS0 0x1265
+#define mmCP_HQD_HQ_STATUS0_BASE_IDX 0
+#define mmCP_HQD_HQ_CONTROL0 0x1266
+#define mmCP_HQD_HQ_CONTROL0_BASE_IDX 0
+#define mmCP_HQD_HQ_SCHEDULER1 0x1266
+#define mmCP_HQD_HQ_SCHEDULER1_BASE_IDX 0
+#define mmCP_MQD_CONTROL 0x1267
+#define mmCP_MQD_CONTROL_BASE_IDX 0
+#define mmCP_HQD_HQ_STATUS1 0x1268
+#define mmCP_HQD_HQ_STATUS1_BASE_IDX 0
+#define mmCP_HQD_HQ_CONTROL1 0x1269
+#define mmCP_HQD_HQ_CONTROL1_BASE_IDX 0
+#define mmCP_HQD_EOP_BASE_ADDR 0x126a
+#define mmCP_HQD_EOP_BASE_ADDR_BASE_IDX 0
+#define mmCP_HQD_EOP_BASE_ADDR_HI 0x126b
+#define mmCP_HQD_EOP_BASE_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_EOP_CONTROL 0x126c
+#define mmCP_HQD_EOP_CONTROL_BASE_IDX 0
+#define mmCP_HQD_EOP_RPTR 0x126d
+#define mmCP_HQD_EOP_RPTR_BASE_IDX 0
+#define mmCP_HQD_EOP_WPTR 0x126e
+#define mmCP_HQD_EOP_WPTR_BASE_IDX 0
+#define mmCP_HQD_EOP_EVENTS 0x126f
+#define mmCP_HQD_EOP_EVENTS_BASE_IDX 0
+#define mmCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x1270
+#define mmCP_HQD_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define mmCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x1271
+#define mmCP_HQD_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define mmCP_HQD_CTX_SAVE_CONTROL 0x1272
+#define mmCP_HQD_CTX_SAVE_CONTROL_BASE_IDX 0
+#define mmCP_HQD_CNTL_STACK_OFFSET 0x1273
+#define mmCP_HQD_CNTL_STACK_OFFSET_BASE_IDX 0
+#define mmCP_HQD_CNTL_STACK_SIZE 0x1274
+#define mmCP_HQD_CNTL_STACK_SIZE_BASE_IDX 0
+#define mmCP_HQD_WG_STATE_OFFSET 0x1275
+#define mmCP_HQD_WG_STATE_OFFSET_BASE_IDX 0
+#define mmCP_HQD_CTX_SAVE_SIZE 0x1276
+#define mmCP_HQD_CTX_SAVE_SIZE_BASE_IDX 0
+#define mmCP_HQD_GDS_RESOURCE_STATE 0x1277
+#define mmCP_HQD_GDS_RESOURCE_STATE_BASE_IDX 0
+#define mmCP_HQD_ERROR 0x1278
+#define mmCP_HQD_ERROR_BASE_IDX 0
+#define mmCP_HQD_EOP_WPTR_MEM 0x1279
+#define mmCP_HQD_EOP_WPTR_MEM_BASE_IDX 0
+#define mmCP_HQD_AQL_CONTROL 0x127a
+#define mmCP_HQD_AQL_CONTROL_BASE_IDX 0
+#define mmCP_HQD_PQ_WPTR_LO 0x127b
+#define mmCP_HQD_PQ_WPTR_LO_BASE_IDX 0
+#define mmCP_HQD_PQ_WPTR_HI 0x127c
+#define mmCP_HQD_PQ_WPTR_HI_BASE_IDX 0
+
+
+// addressBlock: gc_didtdec
+// base address: 0xca00
+#define mmDIDT_IND_INDEX 0x1280
+#define mmDIDT_IND_INDEX_BASE_IDX 0
+#define mmDIDT_IND_DATA 0x1281
+#define mmDIDT_IND_DATA_BASE_IDX 0
+#define mmDIDT_INDEX_AUTO_INCR_EN 0x1282
+#define mmDIDT_INDEX_AUTO_INCR_EN_BASE_IDX 0
+
+
+// addressBlock: gc_gccacdec
+// base address: 0xca10
+#define mmGC_CAC_CTRL_1 0x1284
+#define mmGC_CAC_CTRL_1_BASE_IDX 0
+#define mmGC_CAC_CTRL_2 0x1285
+#define mmGC_CAC_CTRL_2_BASE_IDX 0
+#define mmGC_CAC_INDEX_AUTO_INCR_EN 0x1286
+#define mmGC_CAC_INDEX_AUTO_INCR_EN_BASE_IDX 0
+#define mmGC_CAC_AGGR_LOWER 0x1287
+#define mmGC_CAC_AGGR_LOWER_BASE_IDX 0
+#define mmGC_CAC_AGGR_UPPER 0x1288
+#define mmGC_CAC_AGGR_UPPER_BASE_IDX 0
+#define mmPCC_PERF_COUNTER 0x128a
+#define mmPCC_PERF_COUNTER_BASE_IDX 0
+#define mmGC_CAC_SOFT_CTRL 0x128d
+#define mmGC_CAC_SOFT_CTRL_BASE_IDX 0
+#define mmGC_DIDT_CTRL0 0x128e
+#define mmGC_DIDT_CTRL0_BASE_IDX 0
+#define mmGC_DIDT_CTRL1 0x128f
+#define mmGC_DIDT_CTRL1_BASE_IDX 0
+#define mmGC_DIDT_CTRL2 0x1290
+#define mmGC_DIDT_CTRL2_BASE_IDX 0
+#define mmGC_DIDT_WEIGHT 0x1291
+#define mmGC_DIDT_WEIGHT_BASE_IDX 0
+#define mmGC_EDC_CTRL 0x1293
+#define mmGC_EDC_CTRL_BASE_IDX 0
+#define mmGC_EDC_THRESHOLD 0x1294
+#define mmGC_EDC_THRESHOLD_BASE_IDX 0
+#define mmGC_DIDT_DROOP_CTRL 0x1298
+#define mmGC_DIDT_DROOP_CTRL_BASE_IDX 0
+#define mmGC_DIDT_DROOP_CTRL1 0x1299
+#define mmGC_DIDT_DROOP_CTRL1_BASE_IDX 0
+#define mmGC_EDC_DROOP_CTRL 0x129a
+#define mmGC_EDC_DROOP_CTRL_BASE_IDX 0
+#define mmGC_THROTTLE_CTRL 0x129b
+#define mmGC_THROTTLE_CTRL_BASE_IDX 0
+#define mmGC_CAC_IND_INDEX 0x129c
+#define mmGC_CAC_IND_INDEX_BASE_IDX 0
+#define mmGC_CAC_IND_DATA 0x129d
+#define mmGC_CAC_IND_DATA_BASE_IDX 0
+#define mmSE_CAC_IND_INDEX 0x129e
+#define mmSE_CAC_IND_INDEX_BASE_IDX 0
+#define mmSE_CAC_IND_DATA 0x129f
+#define mmSE_CAC_IND_DATA_BASE_IDX 0
+
+
+// addressBlock: gc_tcpdec
+// base address: 0xca80
+#define mmTCP_WATCH0_ADDR_H 0x12a0
+#define mmTCP_WATCH0_ADDR_H_BASE_IDX 0
+#define mmTCP_WATCH0_ADDR_L 0x12a1
+#define mmTCP_WATCH0_ADDR_L_BASE_IDX 0
+#define mmTCP_WATCH0_CNTL 0x12a2
+#define mmTCP_WATCH0_CNTL_BASE_IDX 0
+#define mmTCP_WATCH1_ADDR_H 0x12a3
+#define mmTCP_WATCH1_ADDR_H_BASE_IDX 0
+#define mmTCP_WATCH1_ADDR_L 0x12a4
+#define mmTCP_WATCH1_ADDR_L_BASE_IDX 0
+#define mmTCP_WATCH1_CNTL 0x12a5
+#define mmTCP_WATCH1_CNTL_BASE_IDX 0
+#define mmTCP_WATCH2_ADDR_H 0x12a6
+#define mmTCP_WATCH2_ADDR_H_BASE_IDX 0
+#define mmTCP_WATCH2_ADDR_L 0x12a7
+#define mmTCP_WATCH2_ADDR_L_BASE_IDX 0
+#define mmTCP_WATCH2_CNTL 0x12a8
+#define mmTCP_WATCH2_CNTL_BASE_IDX 0
+#define mmTCP_WATCH3_ADDR_H 0x12a9
+#define mmTCP_WATCH3_ADDR_H_BASE_IDX 0
+#define mmTCP_WATCH3_ADDR_L 0x12aa
+#define mmTCP_WATCH3_ADDR_L_BASE_IDX 0
+#define mmTCP_WATCH3_CNTL 0x12ab
+#define mmTCP_WATCH3_CNTL_BASE_IDX 0
+#define mmTCP_GATCL1_CNTL 0x12b0
+#define mmTCP_GATCL1_CNTL_BASE_IDX 0
+#define mmTCP_GATCL1_DSM_CNTL 0x12b2
+#define mmTCP_GATCL1_DSM_CNTL_BASE_IDX 0
+#define mmTCP_CNTL2 0x12b4
+#define mmTCP_CNTL2_BASE_IDX 0
+#define mmTCP_UTCL1_CNTL1 0x12b5
+#define mmTCP_UTCL1_CNTL1_BASE_IDX 0
+#define mmTCP_UTCL1_CNTL2 0x12b6
+#define mmTCP_UTCL1_CNTL2_BASE_IDX 0
+#define mmTCP_UTCL1_STATUS 0x12b7
+#define mmTCP_UTCL1_STATUS_BASE_IDX 0
+#define mmTCP_PERFCOUNTER_FILTER 0x12b9
+#define mmTCP_PERFCOUNTER_FILTER_BASE_IDX 0
+#define mmTCP_PERFCOUNTER_FILTER_EN 0x12ba
+#define mmTCP_PERFCOUNTER_FILTER_EN_BASE_IDX 0
+
+
+// addressBlock: gc_gdspdec
+// base address: 0xcc00
+#define mmGDS_VMID0_BASE 0x1300
+#define mmGDS_VMID0_BASE_BASE_IDX 0
+#define mmGDS_VMID0_SIZE 0x1301
+#define mmGDS_VMID0_SIZE_BASE_IDX 0
+#define mmGDS_VMID1_BASE 0x1302
+#define mmGDS_VMID1_BASE_BASE_IDX 0
+#define mmGDS_VMID1_SIZE 0x1303
+#define mmGDS_VMID1_SIZE_BASE_IDX 0
+#define mmGDS_VMID2_BASE 0x1304
+#define mmGDS_VMID2_BASE_BASE_IDX 0
+#define mmGDS_VMID2_SIZE 0x1305
+#define mmGDS_VMID2_SIZE_BASE_IDX 0
+#define mmGDS_VMID3_BASE 0x1306
+#define mmGDS_VMID3_BASE_BASE_IDX 0
+#define mmGDS_VMID3_SIZE 0x1307
+#define mmGDS_VMID3_SIZE_BASE_IDX 0
+#define mmGDS_VMID4_BASE 0x1308
+#define mmGDS_VMID4_BASE_BASE_IDX 0
+#define mmGDS_VMID4_SIZE 0x1309
+#define mmGDS_VMID4_SIZE_BASE_IDX 0
+#define mmGDS_VMID5_BASE 0x130a
+#define mmGDS_VMID5_BASE_BASE_IDX 0
+#define mmGDS_VMID5_SIZE 0x130b
+#define mmGDS_VMID5_SIZE_BASE_IDX 0
+#define mmGDS_VMID6_BASE 0x130c
+#define mmGDS_VMID6_BASE_BASE_IDX 0
+#define mmGDS_VMID6_SIZE 0x130d
+#define mmGDS_VMID6_SIZE_BASE_IDX 0
+#define mmGDS_VMID7_BASE 0x130e
+#define mmGDS_VMID7_BASE_BASE_IDX 0
+#define mmGDS_VMID7_SIZE 0x130f
+#define mmGDS_VMID7_SIZE_BASE_IDX 0
+#define mmGDS_VMID8_BASE 0x1310
+#define mmGDS_VMID8_BASE_BASE_IDX 0
+#define mmGDS_VMID8_SIZE 0x1311
+#define mmGDS_VMID8_SIZE_BASE_IDX 0
+#define mmGDS_VMID9_BASE 0x1312
+#define mmGDS_VMID9_BASE_BASE_IDX 0
+#define mmGDS_VMID9_SIZE 0x1313
+#define mmGDS_VMID9_SIZE_BASE_IDX 0
+#define mmGDS_VMID10_BASE 0x1314
+#define mmGDS_VMID10_BASE_BASE_IDX 0
+#define mmGDS_VMID10_SIZE 0x1315
+#define mmGDS_VMID10_SIZE_BASE_IDX 0
+#define mmGDS_VMID11_BASE 0x1316
+#define mmGDS_VMID11_BASE_BASE_IDX 0
+#define mmGDS_VMID11_SIZE 0x1317
+#define mmGDS_VMID11_SIZE_BASE_IDX 0
+#define mmGDS_VMID12_BASE 0x1318
+#define mmGDS_VMID12_BASE_BASE_IDX 0
+#define mmGDS_VMID12_SIZE 0x1319
+#define mmGDS_VMID12_SIZE_BASE_IDX 0
+#define mmGDS_VMID13_BASE 0x131a
+#define mmGDS_VMID13_BASE_BASE_IDX 0
+#define mmGDS_VMID13_SIZE 0x131b
+#define mmGDS_VMID13_SIZE_BASE_IDX 0
+#define mmGDS_VMID14_BASE 0x131c
+#define mmGDS_VMID14_BASE_BASE_IDX 0
+#define mmGDS_VMID14_SIZE 0x131d
+#define mmGDS_VMID14_SIZE_BASE_IDX 0
+#define mmGDS_VMID15_BASE 0x131e
+#define mmGDS_VMID15_BASE_BASE_IDX 0
+#define mmGDS_VMID15_SIZE 0x131f
+#define mmGDS_VMID15_SIZE_BASE_IDX 0
+#define mmGDS_GWS_VMID0 0x1320
+#define mmGDS_GWS_VMID0_BASE_IDX 0
+#define mmGDS_GWS_VMID1 0x1321
+#define mmGDS_GWS_VMID1_BASE_IDX 0
+#define mmGDS_GWS_VMID2 0x1322
+#define mmGDS_GWS_VMID2_BASE_IDX 0
+#define mmGDS_GWS_VMID3 0x1323
+#define mmGDS_GWS_VMID3_BASE_IDX 0
+#define mmGDS_GWS_VMID4 0x1324
+#define mmGDS_GWS_VMID4_BASE_IDX 0
+#define mmGDS_GWS_VMID5 0x1325
+#define mmGDS_GWS_VMID5_BASE_IDX 0
+#define mmGDS_GWS_VMID6 0x1326
+#define mmGDS_GWS_VMID6_BASE_IDX 0
+#define mmGDS_GWS_VMID7 0x1327
+#define mmGDS_GWS_VMID7_BASE_IDX 0
+#define mmGDS_GWS_VMID8 0x1328
+#define mmGDS_GWS_VMID8_BASE_IDX 0
+#define mmGDS_GWS_VMID9 0x1329
+#define mmGDS_GWS_VMID9_BASE_IDX 0
+#define mmGDS_GWS_VMID10 0x132a
+#define mmGDS_GWS_VMID10_BASE_IDX 0
+#define mmGDS_GWS_VMID11 0x132b
+#define mmGDS_GWS_VMID11_BASE_IDX 0
+#define mmGDS_GWS_VMID12 0x132c
+#define mmGDS_GWS_VMID12_BASE_IDX 0
+#define mmGDS_GWS_VMID13 0x132d
+#define mmGDS_GWS_VMID13_BASE_IDX 0
+#define mmGDS_GWS_VMID14 0x132e
+#define mmGDS_GWS_VMID14_BASE_IDX 0
+#define mmGDS_GWS_VMID15 0x132f
+#define mmGDS_GWS_VMID15_BASE_IDX 0
+#define mmGDS_OA_VMID0 0x1330
+#define mmGDS_OA_VMID0_BASE_IDX 0
+#define mmGDS_OA_VMID1 0x1331
+#define mmGDS_OA_VMID1_BASE_IDX 0
+#define mmGDS_OA_VMID2 0x1332
+#define mmGDS_OA_VMID2_BASE_IDX 0
+#define mmGDS_OA_VMID3 0x1333
+#define mmGDS_OA_VMID3_BASE_IDX 0
+#define mmGDS_OA_VMID4 0x1334
+#define mmGDS_OA_VMID4_BASE_IDX 0
+#define mmGDS_OA_VMID5 0x1335
+#define mmGDS_OA_VMID5_BASE_IDX 0
+#define mmGDS_OA_VMID6 0x1336
+#define mmGDS_OA_VMID6_BASE_IDX 0
+#define mmGDS_OA_VMID7 0x1337
+#define mmGDS_OA_VMID7_BASE_IDX 0
+#define mmGDS_OA_VMID8 0x1338
+#define mmGDS_OA_VMID8_BASE_IDX 0
+#define mmGDS_OA_VMID9 0x1339
+#define mmGDS_OA_VMID9_BASE_IDX 0
+#define mmGDS_OA_VMID10 0x133a
+#define mmGDS_OA_VMID10_BASE_IDX 0
+#define mmGDS_OA_VMID11 0x133b
+#define mmGDS_OA_VMID11_BASE_IDX 0
+#define mmGDS_OA_VMID12 0x133c
+#define mmGDS_OA_VMID12_BASE_IDX 0
+#define mmGDS_OA_VMID13 0x133d
+#define mmGDS_OA_VMID13_BASE_IDX 0
+#define mmGDS_OA_VMID14 0x133e
+#define mmGDS_OA_VMID14_BASE_IDX 0
+#define mmGDS_OA_VMID15 0x133f
+#define mmGDS_OA_VMID15_BASE_IDX 0
+#define mmGDS_GWS_RESET0 0x1344
+#define mmGDS_GWS_RESET0_BASE_IDX 0
+#define mmGDS_GWS_RESET1 0x1345
+#define mmGDS_GWS_RESET1_BASE_IDX 0
+#define mmGDS_GWS_RESOURCE_RESET 0x1346
+#define mmGDS_GWS_RESOURCE_RESET_BASE_IDX 0
+#define mmGDS_COMPUTE_MAX_WAVE_ID 0x1348
+#define mmGDS_COMPUTE_MAX_WAVE_ID_BASE_IDX 0
+#define mmGDS_OA_RESET_MASK 0x1349
+#define mmGDS_OA_RESET_MASK_BASE_IDX 0
+#define mmGDS_OA_RESET 0x134a
+#define mmGDS_OA_RESET_BASE_IDX 0
+#define mmGDS_ENHANCE 0x134b
+#define mmGDS_ENHANCE_BASE_IDX 0
+#define mmGDS_OA_CGPG_RESTORE 0x134c
+#define mmGDS_OA_CGPG_RESTORE_BASE_IDX 0
+#define mmGDS_CS_CTXSW_STATUS 0x134d
+#define mmGDS_CS_CTXSW_STATUS_BASE_IDX 0
+#define mmGDS_CS_CTXSW_CNT0 0x134e
+#define mmGDS_CS_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_CS_CTXSW_CNT1 0x134f
+#define mmGDS_CS_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_CS_CTXSW_CNT2 0x1350
+#define mmGDS_CS_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_CS_CTXSW_CNT3 0x1351
+#define mmGDS_CS_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_GFX_CTXSW_STATUS 0x1352
+#define mmGDS_GFX_CTXSW_STATUS_BASE_IDX 0
+#define mmGDS_VS_CTXSW_CNT0 0x1353
+#define mmGDS_VS_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_VS_CTXSW_CNT1 0x1354
+#define mmGDS_VS_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_VS_CTXSW_CNT2 0x1355
+#define mmGDS_VS_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_VS_CTXSW_CNT3 0x1356
+#define mmGDS_VS_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS0_CTXSW_CNT0 0x1357
+#define mmGDS_PS0_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS0_CTXSW_CNT1 0x1358
+#define mmGDS_PS0_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS0_CTXSW_CNT2 0x1359
+#define mmGDS_PS0_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS0_CTXSW_CNT3 0x135a
+#define mmGDS_PS0_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS1_CTXSW_CNT0 0x135b
+#define mmGDS_PS1_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS1_CTXSW_CNT1 0x135c
+#define mmGDS_PS1_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS1_CTXSW_CNT2 0x135d
+#define mmGDS_PS1_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS1_CTXSW_CNT3 0x135e
+#define mmGDS_PS1_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS2_CTXSW_CNT0 0x135f
+#define mmGDS_PS2_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS2_CTXSW_CNT1 0x1360
+#define mmGDS_PS2_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS2_CTXSW_CNT2 0x1361
+#define mmGDS_PS2_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS2_CTXSW_CNT3 0x1362
+#define mmGDS_PS2_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS3_CTXSW_CNT0 0x1363
+#define mmGDS_PS3_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS3_CTXSW_CNT1 0x1364
+#define mmGDS_PS3_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS3_CTXSW_CNT2 0x1365
+#define mmGDS_PS3_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS3_CTXSW_CNT3 0x1366
+#define mmGDS_PS3_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS4_CTXSW_CNT0 0x1367
+#define mmGDS_PS4_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS4_CTXSW_CNT1 0x1368
+#define mmGDS_PS4_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS4_CTXSW_CNT2 0x1369
+#define mmGDS_PS4_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS4_CTXSW_CNT3 0x136a
+#define mmGDS_PS4_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS5_CTXSW_CNT0 0x136b
+#define mmGDS_PS5_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS5_CTXSW_CNT1 0x136c
+#define mmGDS_PS5_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS5_CTXSW_CNT2 0x136d
+#define mmGDS_PS5_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS5_CTXSW_CNT3 0x136e
+#define mmGDS_PS5_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS6_CTXSW_CNT0 0x136f
+#define mmGDS_PS6_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS6_CTXSW_CNT1 0x1370
+#define mmGDS_PS6_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS6_CTXSW_CNT2 0x1371
+#define mmGDS_PS6_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS6_CTXSW_CNT3 0x1372
+#define mmGDS_PS6_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_PS7_CTXSW_CNT0 0x1373
+#define mmGDS_PS7_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_PS7_CTXSW_CNT1 0x1374
+#define mmGDS_PS7_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_PS7_CTXSW_CNT2 0x1375
+#define mmGDS_PS7_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_PS7_CTXSW_CNT3 0x1376
+#define mmGDS_PS7_CTXSW_CNT3_BASE_IDX 0
+#define mmGDS_GS_CTXSW_CNT0 0x1377
+#define mmGDS_GS_CTXSW_CNT0_BASE_IDX 0
+#define mmGDS_GS_CTXSW_CNT1 0x1378
+#define mmGDS_GS_CTXSW_CNT1_BASE_IDX 0
+#define mmGDS_GS_CTXSW_CNT2 0x1379
+#define mmGDS_GS_CTXSW_CNT2_BASE_IDX 0
+#define mmGDS_GS_CTXSW_CNT3 0x137a
+#define mmGDS_GS_CTXSW_CNT3_BASE_IDX 0
+
+
+// addressBlock: gc_rasdec
+// base address: 0xce00
+#define mmRAS_SIGNATURE_CONTROL 0x1380
+#define mmRAS_SIGNATURE_CONTROL_BASE_IDX 0
+#define mmRAS_SIGNATURE_MASK 0x1381
+#define mmRAS_SIGNATURE_MASK_BASE_IDX 0
+#define mmRAS_SX_SIGNATURE0 0x1382
+#define mmRAS_SX_SIGNATURE0_BASE_IDX 0
+#define mmRAS_SX_SIGNATURE1 0x1383
+#define mmRAS_SX_SIGNATURE1_BASE_IDX 0
+#define mmRAS_SX_SIGNATURE2 0x1384
+#define mmRAS_SX_SIGNATURE2_BASE_IDX 0
+#define mmRAS_SX_SIGNATURE3 0x1385
+#define mmRAS_SX_SIGNATURE3_BASE_IDX 0
+#define mmRAS_DB_SIGNATURE0 0x138b
+#define mmRAS_DB_SIGNATURE0_BASE_IDX 0
+#define mmRAS_PA_SIGNATURE0 0x138c
+#define mmRAS_PA_SIGNATURE0_BASE_IDX 0
+#define mmRAS_VGT_SIGNATURE0 0x138d
+#define mmRAS_VGT_SIGNATURE0_BASE_IDX 0
+#define mmRAS_SQ_SIGNATURE0 0x138e
+#define mmRAS_SQ_SIGNATURE0_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE0 0x138f
+#define mmRAS_SC_SIGNATURE0_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE1 0x1390
+#define mmRAS_SC_SIGNATURE1_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE2 0x1391
+#define mmRAS_SC_SIGNATURE2_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE3 0x1392
+#define mmRAS_SC_SIGNATURE3_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE4 0x1393
+#define mmRAS_SC_SIGNATURE4_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE5 0x1394
+#define mmRAS_SC_SIGNATURE5_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE6 0x1395
+#define mmRAS_SC_SIGNATURE6_BASE_IDX 0
+#define mmRAS_SC_SIGNATURE7 0x1396
+#define mmRAS_SC_SIGNATURE7_BASE_IDX 0
+#define mmRAS_IA_SIGNATURE0 0x1397
+#define mmRAS_IA_SIGNATURE0_BASE_IDX 0
+#define mmRAS_IA_SIGNATURE1 0x1398
+#define mmRAS_IA_SIGNATURE1_BASE_IDX 0
+#define mmRAS_SPI_SIGNATURE0 0x1399
+#define mmRAS_SPI_SIGNATURE0_BASE_IDX 0
+#define mmRAS_SPI_SIGNATURE1 0x139a
+#define mmRAS_SPI_SIGNATURE1_BASE_IDX 0
+#define mmRAS_TA_SIGNATURE0 0x139b
+#define mmRAS_TA_SIGNATURE0_BASE_IDX 0
+#define mmRAS_TD_SIGNATURE0 0x139c
+#define mmRAS_TD_SIGNATURE0_BASE_IDX 0
+#define mmRAS_CB_SIGNATURE0 0x139d
+#define mmRAS_CB_SIGNATURE0_BASE_IDX 0
+#define mmRAS_BCI_SIGNATURE0 0x139e
+#define mmRAS_BCI_SIGNATURE0_BASE_IDX 0
+#define mmRAS_BCI_SIGNATURE1 0x139f
+#define mmRAS_BCI_SIGNATURE1_BASE_IDX 0
+#define mmRAS_TA_SIGNATURE1 0x13a0
+#define mmRAS_TA_SIGNATURE1_BASE_IDX 0
+
+
+// addressBlock: gc_gfxdec0
+// base address: 0x28000
+#define mmDB_RENDER_CONTROL 0x0000
+#define mmDB_RENDER_CONTROL_BASE_IDX 1
+#define mmDB_COUNT_CONTROL 0x0001
+#define mmDB_COUNT_CONTROL_BASE_IDX 1
+#define mmDB_DEPTH_VIEW 0x0002
+#define mmDB_DEPTH_VIEW_BASE_IDX 1
+#define mmDB_RENDER_OVERRIDE 0x0003
+#define mmDB_RENDER_OVERRIDE_BASE_IDX 1
+#define mmDB_RENDER_OVERRIDE2 0x0004
+#define mmDB_RENDER_OVERRIDE2_BASE_IDX 1
+#define mmDB_HTILE_DATA_BASE 0x0005
+#define mmDB_HTILE_DATA_BASE_BASE_IDX 1
+#define mmDB_HTILE_DATA_BASE_HI 0x0006
+#define mmDB_HTILE_DATA_BASE_HI_BASE_IDX 1
+#define mmDB_DEPTH_SIZE 0x0007
+#define mmDB_DEPTH_SIZE_BASE_IDX 1
+#define mmDB_DEPTH_BOUNDS_MIN 0x0008
+#define mmDB_DEPTH_BOUNDS_MIN_BASE_IDX 1
+#define mmDB_DEPTH_BOUNDS_MAX 0x0009
+#define mmDB_DEPTH_BOUNDS_MAX_BASE_IDX 1
+#define mmDB_STENCIL_CLEAR 0x000a
+#define mmDB_STENCIL_CLEAR_BASE_IDX 1
+#define mmDB_DEPTH_CLEAR 0x000b
+#define mmDB_DEPTH_CLEAR_BASE_IDX 1
+#define mmPA_SC_SCREEN_SCISSOR_TL 0x000c
+#define mmPA_SC_SCREEN_SCISSOR_TL_BASE_IDX 1
+#define mmPA_SC_SCREEN_SCISSOR_BR 0x000d
+#define mmPA_SC_SCREEN_SCISSOR_BR_BASE_IDX 1
+#define mmDB_Z_INFO 0x000e
+#define mmDB_Z_INFO_BASE_IDX 1
+#define mmDB_STENCIL_INFO 0x000f
+#define mmDB_STENCIL_INFO_BASE_IDX 1
+#define mmDB_Z_READ_BASE 0x0010
+#define mmDB_Z_READ_BASE_BASE_IDX 1
+#define mmDB_Z_READ_BASE_HI 0x0011
+#define mmDB_Z_READ_BASE_HI_BASE_IDX 1
+#define mmDB_STENCIL_READ_BASE 0x0012
+#define mmDB_STENCIL_READ_BASE_BASE_IDX 1
+#define mmDB_STENCIL_READ_BASE_HI 0x0013
+#define mmDB_STENCIL_READ_BASE_HI_BASE_IDX 1
+#define mmDB_Z_WRITE_BASE 0x0014
+#define mmDB_Z_WRITE_BASE_BASE_IDX 1
+#define mmDB_Z_WRITE_BASE_HI 0x0015
+#define mmDB_Z_WRITE_BASE_HI_BASE_IDX 1
+#define mmDB_STENCIL_WRITE_BASE 0x0016
+#define mmDB_STENCIL_WRITE_BASE_BASE_IDX 1
+#define mmDB_STENCIL_WRITE_BASE_HI 0x0017
+#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
+#define mmDB_DFSM_CONTROL 0x0018
+#define mmDB_DFSM_CONTROL_BASE_IDX 1
+#define mmDB_Z_INFO2 0x001a
+#define mmDB_Z_INFO2_BASE_IDX 1
+#define mmDB_STENCIL_INFO2 0x001b
+#define mmDB_STENCIL_INFO2_BASE_IDX 1
+#define mmTA_BC_BASE_ADDR 0x0020
+#define mmTA_BC_BASE_ADDR_BASE_IDX 1
+#define mmTA_BC_BASE_ADDR_HI 0x0021
+#define mmTA_BC_BASE_ADDR_HI_BASE_IDX 1
+#define mmCOHER_DEST_BASE_HI_0 0x007a
+#define mmCOHER_DEST_BASE_HI_0_BASE_IDX 1
+#define mmCOHER_DEST_BASE_HI_1 0x007b
+#define mmCOHER_DEST_BASE_HI_1_BASE_IDX 1
+#define mmCOHER_DEST_BASE_HI_2 0x007c
+#define mmCOHER_DEST_BASE_HI_2_BASE_IDX 1
+#define mmCOHER_DEST_BASE_HI_3 0x007d
+#define mmCOHER_DEST_BASE_HI_3_BASE_IDX 1
+#define mmCOHER_DEST_BASE_2 0x007e
+#define mmCOHER_DEST_BASE_2_BASE_IDX 1
+#define mmCOHER_DEST_BASE_3 0x007f
+#define mmCOHER_DEST_BASE_3_BASE_IDX 1
+#define mmPA_SC_WINDOW_OFFSET 0x0080
+#define mmPA_SC_WINDOW_OFFSET_BASE_IDX 1
+#define mmPA_SC_WINDOW_SCISSOR_TL 0x0081
+#define mmPA_SC_WINDOW_SCISSOR_TL_BASE_IDX 1
+#define mmPA_SC_WINDOW_SCISSOR_BR 0x0082
+#define mmPA_SC_WINDOW_SCISSOR_BR_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_RULE 0x0083
+#define mmPA_SC_CLIPRECT_RULE_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_0_TL 0x0084
+#define mmPA_SC_CLIPRECT_0_TL_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_0_BR 0x0085
+#define mmPA_SC_CLIPRECT_0_BR_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_1_TL 0x0086
+#define mmPA_SC_CLIPRECT_1_TL_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_1_BR 0x0087
+#define mmPA_SC_CLIPRECT_1_BR_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_2_TL 0x0088
+#define mmPA_SC_CLIPRECT_2_TL_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_2_BR 0x0089
+#define mmPA_SC_CLIPRECT_2_BR_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_3_TL 0x008a
+#define mmPA_SC_CLIPRECT_3_TL_BASE_IDX 1
+#define mmPA_SC_CLIPRECT_3_BR 0x008b
+#define mmPA_SC_CLIPRECT_3_BR_BASE_IDX 1
+#define mmPA_SC_EDGERULE 0x008c
+#define mmPA_SC_EDGERULE_BASE_IDX 1
+#define mmPA_SU_HARDWARE_SCREEN_OFFSET 0x008d
+#define mmPA_SU_HARDWARE_SCREEN_OFFSET_BASE_IDX 1
+#define mmCB_TARGET_MASK 0x008e
+#define mmCB_TARGET_MASK_BASE_IDX 1
+#define mmCB_SHADER_MASK 0x008f
+#define mmCB_SHADER_MASK_BASE_IDX 1
+#define mmPA_SC_GENERIC_SCISSOR_TL 0x0090
+#define mmPA_SC_GENERIC_SCISSOR_TL_BASE_IDX 1
+#define mmPA_SC_GENERIC_SCISSOR_BR 0x0091
+#define mmPA_SC_GENERIC_SCISSOR_BR_BASE_IDX 1
+#define mmCOHER_DEST_BASE_0 0x0092
+#define mmCOHER_DEST_BASE_0_BASE_IDX 1
+#define mmCOHER_DEST_BASE_1 0x0093
+#define mmCOHER_DEST_BASE_1_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_0_TL 0x0094
+#define mmPA_SC_VPORT_SCISSOR_0_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_0_BR 0x0095
+#define mmPA_SC_VPORT_SCISSOR_0_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_1_TL 0x0096
+#define mmPA_SC_VPORT_SCISSOR_1_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_1_BR 0x0097
+#define mmPA_SC_VPORT_SCISSOR_1_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_2_TL 0x0098
+#define mmPA_SC_VPORT_SCISSOR_2_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_2_BR 0x0099
+#define mmPA_SC_VPORT_SCISSOR_2_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_3_TL 0x009a
+#define mmPA_SC_VPORT_SCISSOR_3_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_3_BR 0x009b
+#define mmPA_SC_VPORT_SCISSOR_3_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_4_TL 0x009c
+#define mmPA_SC_VPORT_SCISSOR_4_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_4_BR 0x009d
+#define mmPA_SC_VPORT_SCISSOR_4_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_5_TL 0x009e
+#define mmPA_SC_VPORT_SCISSOR_5_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_5_BR 0x009f
+#define mmPA_SC_VPORT_SCISSOR_5_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_6_TL 0x00a0
+#define mmPA_SC_VPORT_SCISSOR_6_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_6_BR 0x00a1
+#define mmPA_SC_VPORT_SCISSOR_6_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_7_TL 0x00a2
+#define mmPA_SC_VPORT_SCISSOR_7_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_7_BR 0x00a3
+#define mmPA_SC_VPORT_SCISSOR_7_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_8_TL 0x00a4
+#define mmPA_SC_VPORT_SCISSOR_8_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_8_BR 0x00a5
+#define mmPA_SC_VPORT_SCISSOR_8_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_9_TL 0x00a6
+#define mmPA_SC_VPORT_SCISSOR_9_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_9_BR 0x00a7
+#define mmPA_SC_VPORT_SCISSOR_9_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_10_TL 0x00a8
+#define mmPA_SC_VPORT_SCISSOR_10_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_10_BR 0x00a9
+#define mmPA_SC_VPORT_SCISSOR_10_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_11_TL 0x00aa
+#define mmPA_SC_VPORT_SCISSOR_11_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_11_BR 0x00ab
+#define mmPA_SC_VPORT_SCISSOR_11_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_12_TL 0x00ac
+#define mmPA_SC_VPORT_SCISSOR_12_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_12_BR 0x00ad
+#define mmPA_SC_VPORT_SCISSOR_12_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_13_TL 0x00ae
+#define mmPA_SC_VPORT_SCISSOR_13_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_13_BR 0x00af
+#define mmPA_SC_VPORT_SCISSOR_13_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_14_TL 0x00b0
+#define mmPA_SC_VPORT_SCISSOR_14_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_14_BR 0x00b1
+#define mmPA_SC_VPORT_SCISSOR_14_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_15_TL 0x00b2
+#define mmPA_SC_VPORT_SCISSOR_15_TL_BASE_IDX 1
+#define mmPA_SC_VPORT_SCISSOR_15_BR 0x00b3
+#define mmPA_SC_VPORT_SCISSOR_15_BR_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_0 0x00b4
+#define mmPA_SC_VPORT_ZMIN_0_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_0 0x00b5
+#define mmPA_SC_VPORT_ZMAX_0_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_1 0x00b6
+#define mmPA_SC_VPORT_ZMIN_1_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_1 0x00b7
+#define mmPA_SC_VPORT_ZMAX_1_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_2 0x00b8
+#define mmPA_SC_VPORT_ZMIN_2_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_2 0x00b9
+#define mmPA_SC_VPORT_ZMAX_2_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_3 0x00ba
+#define mmPA_SC_VPORT_ZMIN_3_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_3 0x00bb
+#define mmPA_SC_VPORT_ZMAX_3_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_4 0x00bc
+#define mmPA_SC_VPORT_ZMIN_4_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_4 0x00bd
+#define mmPA_SC_VPORT_ZMAX_4_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_5 0x00be
+#define mmPA_SC_VPORT_ZMIN_5_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_5 0x00bf
+#define mmPA_SC_VPORT_ZMAX_5_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_6 0x00c0
+#define mmPA_SC_VPORT_ZMIN_6_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_6 0x00c1
+#define mmPA_SC_VPORT_ZMAX_6_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_7 0x00c2
+#define mmPA_SC_VPORT_ZMIN_7_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_7 0x00c3
+#define mmPA_SC_VPORT_ZMAX_7_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_8 0x00c4
+#define mmPA_SC_VPORT_ZMIN_8_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_8 0x00c5
+#define mmPA_SC_VPORT_ZMAX_8_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_9 0x00c6
+#define mmPA_SC_VPORT_ZMIN_9_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_9 0x00c7
+#define mmPA_SC_VPORT_ZMAX_9_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_10 0x00c8
+#define mmPA_SC_VPORT_ZMIN_10_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_10 0x00c9
+#define mmPA_SC_VPORT_ZMAX_10_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_11 0x00ca
+#define mmPA_SC_VPORT_ZMIN_11_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_11 0x00cb
+#define mmPA_SC_VPORT_ZMAX_11_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_12 0x00cc
+#define mmPA_SC_VPORT_ZMIN_12_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_12 0x00cd
+#define mmPA_SC_VPORT_ZMAX_12_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_13 0x00ce
+#define mmPA_SC_VPORT_ZMIN_13_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_13 0x00cf
+#define mmPA_SC_VPORT_ZMAX_13_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_14 0x00d0
+#define mmPA_SC_VPORT_ZMIN_14_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_14 0x00d1
+#define mmPA_SC_VPORT_ZMAX_14_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMIN_15 0x00d2
+#define mmPA_SC_VPORT_ZMIN_15_BASE_IDX 1
+#define mmPA_SC_VPORT_ZMAX_15 0x00d3
+#define mmPA_SC_VPORT_ZMAX_15_BASE_IDX 1
+#define mmPA_SC_RASTER_CONFIG 0x00d4
+#define mmPA_SC_RASTER_CONFIG_BASE_IDX 1
+#define mmPA_SC_RASTER_CONFIG_1 0x00d5
+#define mmPA_SC_RASTER_CONFIG_1_BASE_IDX 1
+#define mmPA_SC_SCREEN_EXTENT_CONTROL 0x00d6
+#define mmPA_SC_SCREEN_EXTENT_CONTROL_BASE_IDX 1
+#define mmPA_SC_TILE_STEERING_OVERRIDE 0x00d7
+#define mmPA_SC_TILE_STEERING_OVERRIDE_BASE_IDX 1
+#define mmCP_PERFMON_CNTX_CNTL 0x00d8
+#define mmCP_PERFMON_CNTX_CNTL_BASE_IDX 1
+#define mmCP_PIPEID 0x00d9
+#define mmCP_PIPEID_BASE_IDX 1
+#define mmCP_RINGID 0x00d9
+#define mmCP_RINGID_BASE_IDX 1
+#define mmCP_VMID 0x00da
+#define mmCP_VMID_BASE_IDX 1
+#define mmPA_SC_RIGHT_VERT_GRID 0x00e8
+#define mmPA_SC_RIGHT_VERT_GRID_BASE_IDX 1
+#define mmPA_SC_LEFT_VERT_GRID 0x00e9
+#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
+#define mmPA_SC_HORIZ_GRID 0x00ea
+#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
+#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
+#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
+#define mmCB_BLEND_RED 0x0105
+#define mmCB_BLEND_RED_BASE_IDX 1
+#define mmCB_BLEND_GREEN 0x0106
+#define mmCB_BLEND_GREEN_BASE_IDX 1
+#define mmCB_BLEND_BLUE 0x0107
+#define mmCB_BLEND_BLUE_BASE_IDX 1
+#define mmCB_BLEND_ALPHA 0x0108
+#define mmCB_BLEND_ALPHA_BASE_IDX 1
+#define mmCB_DCC_CONTROL 0x0109
+#define mmCB_DCC_CONTROL_BASE_IDX 1
+#define mmDB_STENCIL_CONTROL 0x010b
+#define mmDB_STENCIL_CONTROL_BASE_IDX 1
+#define mmDB_STENCILREFMASK 0x010c
+#define mmDB_STENCILREFMASK_BASE_IDX 1
+#define mmDB_STENCILREFMASK_BF 0x010d
+#define mmDB_STENCILREFMASK_BF_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE 0x010f
+#define mmPA_CL_VPORT_XSCALE_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET 0x0110
+#define mmPA_CL_VPORT_XOFFSET_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE 0x0111
+#define mmPA_CL_VPORT_YSCALE_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET 0x0112
+#define mmPA_CL_VPORT_YOFFSET_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE 0x0113
+#define mmPA_CL_VPORT_ZSCALE_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET 0x0114
+#define mmPA_CL_VPORT_ZOFFSET_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_1 0x0115
+#define mmPA_CL_VPORT_XSCALE_1_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_1 0x0116
+#define mmPA_CL_VPORT_XOFFSET_1_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_1 0x0117
+#define mmPA_CL_VPORT_YSCALE_1_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_1 0x0118
+#define mmPA_CL_VPORT_YOFFSET_1_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_1 0x0119
+#define mmPA_CL_VPORT_ZSCALE_1_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_1 0x011a
+#define mmPA_CL_VPORT_ZOFFSET_1_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_2 0x011b
+#define mmPA_CL_VPORT_XSCALE_2_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_2 0x011c
+#define mmPA_CL_VPORT_XOFFSET_2_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_2 0x011d
+#define mmPA_CL_VPORT_YSCALE_2_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_2 0x011e
+#define mmPA_CL_VPORT_YOFFSET_2_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_2 0x011f
+#define mmPA_CL_VPORT_ZSCALE_2_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_2 0x0120
+#define mmPA_CL_VPORT_ZOFFSET_2_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_3 0x0121
+#define mmPA_CL_VPORT_XSCALE_3_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_3 0x0122
+#define mmPA_CL_VPORT_XOFFSET_3_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_3 0x0123
+#define mmPA_CL_VPORT_YSCALE_3_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_3 0x0124
+#define mmPA_CL_VPORT_YOFFSET_3_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_3 0x0125
+#define mmPA_CL_VPORT_ZSCALE_3_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_3 0x0126
+#define mmPA_CL_VPORT_ZOFFSET_3_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_4 0x0127
+#define mmPA_CL_VPORT_XSCALE_4_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_4 0x0128
+#define mmPA_CL_VPORT_XOFFSET_4_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_4 0x0129
+#define mmPA_CL_VPORT_YSCALE_4_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_4 0x012a
+#define mmPA_CL_VPORT_YOFFSET_4_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_4 0x012b
+#define mmPA_CL_VPORT_ZSCALE_4_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_4 0x012c
+#define mmPA_CL_VPORT_ZOFFSET_4_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_5 0x012d
+#define mmPA_CL_VPORT_XSCALE_5_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_5 0x012e
+#define mmPA_CL_VPORT_XOFFSET_5_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_5 0x012f
+#define mmPA_CL_VPORT_YSCALE_5_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_5 0x0130
+#define mmPA_CL_VPORT_YOFFSET_5_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_5 0x0131
+#define mmPA_CL_VPORT_ZSCALE_5_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_5 0x0132
+#define mmPA_CL_VPORT_ZOFFSET_5_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_6 0x0133
+#define mmPA_CL_VPORT_XSCALE_6_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_6 0x0134
+#define mmPA_CL_VPORT_XOFFSET_6_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_6 0x0135
+#define mmPA_CL_VPORT_YSCALE_6_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_6 0x0136
+#define mmPA_CL_VPORT_YOFFSET_6_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_6 0x0137
+#define mmPA_CL_VPORT_ZSCALE_6_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_6 0x0138
+#define mmPA_CL_VPORT_ZOFFSET_6_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_7 0x0139
+#define mmPA_CL_VPORT_XSCALE_7_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_7 0x013a
+#define mmPA_CL_VPORT_XOFFSET_7_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_7 0x013b
+#define mmPA_CL_VPORT_YSCALE_7_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_7 0x013c
+#define mmPA_CL_VPORT_YOFFSET_7_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_7 0x013d
+#define mmPA_CL_VPORT_ZSCALE_7_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_7 0x013e
+#define mmPA_CL_VPORT_ZOFFSET_7_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_8 0x013f
+#define mmPA_CL_VPORT_XSCALE_8_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_8 0x0140
+#define mmPA_CL_VPORT_XOFFSET_8_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_8 0x0141
+#define mmPA_CL_VPORT_YSCALE_8_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_8 0x0142
+#define mmPA_CL_VPORT_YOFFSET_8_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_8 0x0143
+#define mmPA_CL_VPORT_ZSCALE_8_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_8 0x0144
+#define mmPA_CL_VPORT_ZOFFSET_8_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_9 0x0145
+#define mmPA_CL_VPORT_XSCALE_9_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_9 0x0146
+#define mmPA_CL_VPORT_XOFFSET_9_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_9 0x0147
+#define mmPA_CL_VPORT_YSCALE_9_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_9 0x0148
+#define mmPA_CL_VPORT_YOFFSET_9_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_9 0x0149
+#define mmPA_CL_VPORT_ZSCALE_9_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_9 0x014a
+#define mmPA_CL_VPORT_ZOFFSET_9_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_10 0x014b
+#define mmPA_CL_VPORT_XSCALE_10_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_10 0x014c
+#define mmPA_CL_VPORT_XOFFSET_10_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_10 0x014d
+#define mmPA_CL_VPORT_YSCALE_10_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_10 0x014e
+#define mmPA_CL_VPORT_YOFFSET_10_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_10 0x014f
+#define mmPA_CL_VPORT_ZSCALE_10_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_10 0x0150
+#define mmPA_CL_VPORT_ZOFFSET_10_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_11 0x0151
+#define mmPA_CL_VPORT_XSCALE_11_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_11 0x0152
+#define mmPA_CL_VPORT_XOFFSET_11_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_11 0x0153
+#define mmPA_CL_VPORT_YSCALE_11_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_11 0x0154
+#define mmPA_CL_VPORT_YOFFSET_11_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_11 0x0155
+#define mmPA_CL_VPORT_ZSCALE_11_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_11 0x0156
+#define mmPA_CL_VPORT_ZOFFSET_11_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_12 0x0157
+#define mmPA_CL_VPORT_XSCALE_12_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_12 0x0158
+#define mmPA_CL_VPORT_XOFFSET_12_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_12 0x0159
+#define mmPA_CL_VPORT_YSCALE_12_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_12 0x015a
+#define mmPA_CL_VPORT_YOFFSET_12_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_12 0x015b
+#define mmPA_CL_VPORT_ZSCALE_12_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_12 0x015c
+#define mmPA_CL_VPORT_ZOFFSET_12_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_13 0x015d
+#define mmPA_CL_VPORT_XSCALE_13_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_13 0x015e
+#define mmPA_CL_VPORT_XOFFSET_13_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_13 0x015f
+#define mmPA_CL_VPORT_YSCALE_13_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_13 0x0160
+#define mmPA_CL_VPORT_YOFFSET_13_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_13 0x0161
+#define mmPA_CL_VPORT_ZSCALE_13_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_13 0x0162
+#define mmPA_CL_VPORT_ZOFFSET_13_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_14 0x0163
+#define mmPA_CL_VPORT_XSCALE_14_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_14 0x0164
+#define mmPA_CL_VPORT_XOFFSET_14_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_14 0x0165
+#define mmPA_CL_VPORT_YSCALE_14_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_14 0x0166
+#define mmPA_CL_VPORT_YOFFSET_14_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_14 0x0167
+#define mmPA_CL_VPORT_ZSCALE_14_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_14 0x0168
+#define mmPA_CL_VPORT_ZOFFSET_14_BASE_IDX 1
+#define mmPA_CL_VPORT_XSCALE_15 0x0169
+#define mmPA_CL_VPORT_XSCALE_15_BASE_IDX 1
+#define mmPA_CL_VPORT_XOFFSET_15 0x016a
+#define mmPA_CL_VPORT_XOFFSET_15_BASE_IDX 1
+#define mmPA_CL_VPORT_YSCALE_15 0x016b
+#define mmPA_CL_VPORT_YSCALE_15_BASE_IDX 1
+#define mmPA_CL_VPORT_YOFFSET_15 0x016c
+#define mmPA_CL_VPORT_YOFFSET_15_BASE_IDX 1
+#define mmPA_CL_VPORT_ZSCALE_15 0x016d
+#define mmPA_CL_VPORT_ZSCALE_15_BASE_IDX 1
+#define mmPA_CL_VPORT_ZOFFSET_15 0x016e
+#define mmPA_CL_VPORT_ZOFFSET_15_BASE_IDX 1
+#define mmPA_CL_UCP_0_X 0x016f
+#define mmPA_CL_UCP_0_X_BASE_IDX 1
+#define mmPA_CL_UCP_0_Y 0x0170
+#define mmPA_CL_UCP_0_Y_BASE_IDX 1
+#define mmPA_CL_UCP_0_Z 0x0171
+#define mmPA_CL_UCP_0_Z_BASE_IDX 1
+#define mmPA_CL_UCP_0_W 0x0172
+#define mmPA_CL_UCP_0_W_BASE_IDX 1
+#define mmPA_CL_UCP_1_X 0x0173
+#define mmPA_CL_UCP_1_X_BASE_IDX 1
+#define mmPA_CL_UCP_1_Y 0x0174
+#define mmPA_CL_UCP_1_Y_BASE_IDX 1
+#define mmPA_CL_UCP_1_Z 0x0175
+#define mmPA_CL_UCP_1_Z_BASE_IDX 1
+#define mmPA_CL_UCP_1_W 0x0176
+#define mmPA_CL_UCP_1_W_BASE_IDX 1
+#define mmPA_CL_UCP_2_X 0x0177
+#define mmPA_CL_UCP_2_X_BASE_IDX 1
+#define mmPA_CL_UCP_2_Y 0x0178
+#define mmPA_CL_UCP_2_Y_BASE_IDX 1
+#define mmPA_CL_UCP_2_Z 0x0179
+#define mmPA_CL_UCP_2_Z_BASE_IDX 1
+#define mmPA_CL_UCP_2_W 0x017a
+#define mmPA_CL_UCP_2_W_BASE_IDX 1
+#define mmPA_CL_UCP_3_X 0x017b
+#define mmPA_CL_UCP_3_X_BASE_IDX 1
+#define mmPA_CL_UCP_3_Y 0x017c
+#define mmPA_CL_UCP_3_Y_BASE_IDX 1
+#define mmPA_CL_UCP_3_Z 0x017d
+#define mmPA_CL_UCP_3_Z_BASE_IDX 1
+#define mmPA_CL_UCP_3_W 0x017e
+#define mmPA_CL_UCP_3_W_BASE_IDX 1
+#define mmPA_CL_UCP_4_X 0x017f
+#define mmPA_CL_UCP_4_X_BASE_IDX 1
+#define mmPA_CL_UCP_4_Y 0x0180
+#define mmPA_CL_UCP_4_Y_BASE_IDX 1
+#define mmPA_CL_UCP_4_Z 0x0181
+#define mmPA_CL_UCP_4_Z_BASE_IDX 1
+#define mmPA_CL_UCP_4_W 0x0182
+#define mmPA_CL_UCP_4_W_BASE_IDX 1
+#define mmPA_CL_UCP_5_X 0x0183
+#define mmPA_CL_UCP_5_X_BASE_IDX 1
+#define mmPA_CL_UCP_5_Y 0x0184
+#define mmPA_CL_UCP_5_Y_BASE_IDX 1
+#define mmPA_CL_UCP_5_Z 0x0185
+#define mmPA_CL_UCP_5_Z_BASE_IDX 1
+#define mmPA_CL_UCP_5_W 0x0186
+#define mmPA_CL_UCP_5_W_BASE_IDX 1
+#define mmPA_CL_PROG_NEAR_CLIP_Z 0x0187
+#define mmPA_CL_PROG_NEAR_CLIP_Z_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_0 0x0191
+#define mmSPI_PS_INPUT_CNTL_0_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_1 0x0192
+#define mmSPI_PS_INPUT_CNTL_1_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_2 0x0193
+#define mmSPI_PS_INPUT_CNTL_2_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_3 0x0194
+#define mmSPI_PS_INPUT_CNTL_3_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_4 0x0195
+#define mmSPI_PS_INPUT_CNTL_4_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_5 0x0196
+#define mmSPI_PS_INPUT_CNTL_5_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_6 0x0197
+#define mmSPI_PS_INPUT_CNTL_6_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_7 0x0198
+#define mmSPI_PS_INPUT_CNTL_7_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_8 0x0199
+#define mmSPI_PS_INPUT_CNTL_8_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_9 0x019a
+#define mmSPI_PS_INPUT_CNTL_9_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_10 0x019b
+#define mmSPI_PS_INPUT_CNTL_10_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_11 0x019c
+#define mmSPI_PS_INPUT_CNTL_11_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_12 0x019d
+#define mmSPI_PS_INPUT_CNTL_12_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_13 0x019e
+#define mmSPI_PS_INPUT_CNTL_13_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_14 0x019f
+#define mmSPI_PS_INPUT_CNTL_14_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_15 0x01a0
+#define mmSPI_PS_INPUT_CNTL_15_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_16 0x01a1
+#define mmSPI_PS_INPUT_CNTL_16_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_17 0x01a2
+#define mmSPI_PS_INPUT_CNTL_17_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_18 0x01a3
+#define mmSPI_PS_INPUT_CNTL_18_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_19 0x01a4
+#define mmSPI_PS_INPUT_CNTL_19_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_20 0x01a5
+#define mmSPI_PS_INPUT_CNTL_20_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_21 0x01a6
+#define mmSPI_PS_INPUT_CNTL_21_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_22 0x01a7
+#define mmSPI_PS_INPUT_CNTL_22_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_23 0x01a8
+#define mmSPI_PS_INPUT_CNTL_23_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_24 0x01a9
+#define mmSPI_PS_INPUT_CNTL_24_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_25 0x01aa
+#define mmSPI_PS_INPUT_CNTL_25_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_26 0x01ab
+#define mmSPI_PS_INPUT_CNTL_26_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_27 0x01ac
+#define mmSPI_PS_INPUT_CNTL_27_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_28 0x01ad
+#define mmSPI_PS_INPUT_CNTL_28_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_29 0x01ae
+#define mmSPI_PS_INPUT_CNTL_29_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_30 0x01af
+#define mmSPI_PS_INPUT_CNTL_30_BASE_IDX 1
+#define mmSPI_PS_INPUT_CNTL_31 0x01b0
+#define mmSPI_PS_INPUT_CNTL_31_BASE_IDX 1
+#define mmSPI_VS_OUT_CONFIG 0x01b1
+#define mmSPI_VS_OUT_CONFIG_BASE_IDX 1
+#define mmSPI_PS_INPUT_ENA 0x01b3
+#define mmSPI_PS_INPUT_ENA_BASE_IDX 1
+#define mmSPI_PS_INPUT_ADDR 0x01b4
+#define mmSPI_PS_INPUT_ADDR_BASE_IDX 1
+#define mmSPI_INTERP_CONTROL_0 0x01b5
+#define mmSPI_INTERP_CONTROL_0_BASE_IDX 1
+#define mmSPI_PS_IN_CONTROL 0x01b6
+#define mmSPI_PS_IN_CONTROL_BASE_IDX 1
+#define mmSPI_BARYC_CNTL 0x01b8
+#define mmSPI_BARYC_CNTL_BASE_IDX 1
+#define mmSPI_TMPRING_SIZE 0x01ba
+#define mmSPI_TMPRING_SIZE_BASE_IDX 1
+#define mmSPI_SHADER_POS_FORMAT 0x01c3
+#define mmSPI_SHADER_POS_FORMAT_BASE_IDX 1
+#define mmSPI_SHADER_Z_FORMAT 0x01c4
+#define mmSPI_SHADER_Z_FORMAT_BASE_IDX 1
+#define mmSPI_SHADER_COL_FORMAT 0x01c5
+#define mmSPI_SHADER_COL_FORMAT_BASE_IDX 1
+#define mmSX_PS_DOWNCONVERT 0x01d5
+#define mmSX_PS_DOWNCONVERT_BASE_IDX 1
+#define mmSX_BLEND_OPT_EPSILON 0x01d6
+#define mmSX_BLEND_OPT_EPSILON_BASE_IDX 1
+#define mmSX_BLEND_OPT_CONTROL 0x01d7
+#define mmSX_BLEND_OPT_CONTROL_BASE_IDX 1
+#define mmSX_MRT0_BLEND_OPT 0x01d8
+#define mmSX_MRT0_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT1_BLEND_OPT 0x01d9
+#define mmSX_MRT1_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT2_BLEND_OPT 0x01da
+#define mmSX_MRT2_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT3_BLEND_OPT 0x01db
+#define mmSX_MRT3_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT4_BLEND_OPT 0x01dc
+#define mmSX_MRT4_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT5_BLEND_OPT 0x01dd
+#define mmSX_MRT5_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT6_BLEND_OPT 0x01de
+#define mmSX_MRT6_BLEND_OPT_BASE_IDX 1
+#define mmSX_MRT7_BLEND_OPT 0x01df
+#define mmSX_MRT7_BLEND_OPT_BASE_IDX 1
+#define mmCB_BLEND0_CONTROL 0x01e0
+#define mmCB_BLEND0_CONTROL_BASE_IDX 1
+#define mmCB_BLEND1_CONTROL 0x01e1
+#define mmCB_BLEND1_CONTROL_BASE_IDX 1
+#define mmCB_BLEND2_CONTROL 0x01e2
+#define mmCB_BLEND2_CONTROL_BASE_IDX 1
+#define mmCB_BLEND3_CONTROL 0x01e3
+#define mmCB_BLEND3_CONTROL_BASE_IDX 1
+#define mmCB_BLEND4_CONTROL 0x01e4
+#define mmCB_BLEND4_CONTROL_BASE_IDX 1
+#define mmCB_BLEND5_CONTROL 0x01e5
+#define mmCB_BLEND5_CONTROL_BASE_IDX 1
+#define mmCB_BLEND6_CONTROL 0x01e6
+#define mmCB_BLEND6_CONTROL_BASE_IDX 1
+#define mmCB_BLEND7_CONTROL 0x01e7
+#define mmCB_BLEND7_CONTROL_BASE_IDX 1
+#define mmCB_MRT0_EPITCH 0x01e8
+#define mmCB_MRT0_EPITCH_BASE_IDX 1
+#define mmCB_MRT1_EPITCH 0x01e9
+#define mmCB_MRT1_EPITCH_BASE_IDX 1
+#define mmCB_MRT2_EPITCH 0x01ea
+#define mmCB_MRT2_EPITCH_BASE_IDX 1
+#define mmCB_MRT3_EPITCH 0x01eb
+#define mmCB_MRT3_EPITCH_BASE_IDX 1
+#define mmCB_MRT4_EPITCH 0x01ec
+#define mmCB_MRT4_EPITCH_BASE_IDX 1
+#define mmCB_MRT5_EPITCH 0x01ed
+#define mmCB_MRT5_EPITCH_BASE_IDX 1
+#define mmCB_MRT6_EPITCH 0x01ee
+#define mmCB_MRT6_EPITCH_BASE_IDX 1
+#define mmCB_MRT7_EPITCH 0x01ef
+#define mmCB_MRT7_EPITCH_BASE_IDX 1
+#define mmCS_COPY_STATE 0x01f3
+#define mmCS_COPY_STATE_BASE_IDX 1
+#define mmGFX_COPY_STATE 0x01f4
+#define mmGFX_COPY_STATE_BASE_IDX 1
+#define mmPA_CL_POINT_X_RAD 0x01f5
+#define mmPA_CL_POINT_X_RAD_BASE_IDX 1
+#define mmPA_CL_POINT_Y_RAD 0x01f6
+#define mmPA_CL_POINT_Y_RAD_BASE_IDX 1
+#define mmPA_CL_POINT_SIZE 0x01f7
+#define mmPA_CL_POINT_SIZE_BASE_IDX 1
+#define mmPA_CL_POINT_CULL_RAD 0x01f8
+#define mmPA_CL_POINT_CULL_RAD_BASE_IDX 1
+#define mmVGT_DMA_BASE_HI 0x01f9
+#define mmVGT_DMA_BASE_HI_BASE_IDX 1
+#define mmVGT_DMA_BASE 0x01fa
+#define mmVGT_DMA_BASE_BASE_IDX 1
+#define mmVGT_DRAW_INITIATOR 0x01fc
+#define mmVGT_DRAW_INITIATOR_BASE_IDX 1
+#define mmVGT_IMMED_DATA 0x01fd
+#define mmVGT_IMMED_DATA_BASE_IDX 1
+#define mmVGT_EVENT_ADDRESS_REG 0x01fe
+#define mmVGT_EVENT_ADDRESS_REG_BASE_IDX 1
+#define mmDB_DEPTH_CONTROL 0x0200
+#define mmDB_DEPTH_CONTROL_BASE_IDX 1
+#define mmDB_EQAA 0x0201
+#define mmDB_EQAA_BASE_IDX 1
+#define mmCB_COLOR_CONTROL 0x0202
+#define mmCB_COLOR_CONTROL_BASE_IDX 1
+#define mmDB_SHADER_CONTROL 0x0203
+#define mmDB_SHADER_CONTROL_BASE_IDX 1
+#define mmPA_CL_CLIP_CNTL 0x0204
+#define mmPA_CL_CLIP_CNTL_BASE_IDX 1
+#define mmPA_SU_SC_MODE_CNTL 0x0205
+#define mmPA_SU_SC_MODE_CNTL_BASE_IDX 1
+#define mmPA_CL_VTE_CNTL 0x0206
+#define mmPA_CL_VTE_CNTL_BASE_IDX 1
+#define mmPA_CL_VS_OUT_CNTL 0x0207
+#define mmPA_CL_VS_OUT_CNTL_BASE_IDX 1
+#define mmPA_CL_NANINF_CNTL 0x0208
+#define mmPA_CL_NANINF_CNTL_BASE_IDX 1
+#define mmPA_SU_LINE_STIPPLE_CNTL 0x0209
+#define mmPA_SU_LINE_STIPPLE_CNTL_BASE_IDX 1
+#define mmPA_SU_LINE_STIPPLE_SCALE 0x020a
+#define mmPA_SU_LINE_STIPPLE_SCALE_BASE_IDX 1
+#define mmPA_SU_PRIM_FILTER_CNTL 0x020b
+#define mmPA_SU_PRIM_FILTER_CNTL_BASE_IDX 1
+#define mmPA_SU_SMALL_PRIM_FILTER_CNTL 0x020c
+#define mmPA_SU_SMALL_PRIM_FILTER_CNTL_BASE_IDX 1
+#define mmPA_CL_OBJPRIM_ID_CNTL 0x020d
+#define mmPA_CL_OBJPRIM_ID_CNTL_BASE_IDX 1
+#define mmPA_CL_NGG_CNTL 0x020e
+#define mmPA_CL_NGG_CNTL_BASE_IDX 1
+#define mmPA_SU_OVER_RASTERIZATION_CNTL 0x020f
+#define mmPA_SU_OVER_RASTERIZATION_CNTL_BASE_IDX 1
+#define mmPA_STEREO_CNTL 0x0210
+#define mmPA_STEREO_CNTL_BASE_IDX 1
+#define mmPA_SU_POINT_SIZE 0x0280
+#define mmPA_SU_POINT_SIZE_BASE_IDX 1
+#define mmPA_SU_POINT_MINMAX 0x0281
+#define mmPA_SU_POINT_MINMAX_BASE_IDX 1
+#define mmPA_SU_LINE_CNTL 0x0282
+#define mmPA_SU_LINE_CNTL_BASE_IDX 1
+#define mmPA_SC_LINE_STIPPLE 0x0283
+#define mmPA_SC_LINE_STIPPLE_BASE_IDX 1
+#define mmVGT_OUTPUT_PATH_CNTL 0x0284
+#define mmVGT_OUTPUT_PATH_CNTL_BASE_IDX 1
+#define mmVGT_HOS_CNTL 0x0285
+#define mmVGT_HOS_CNTL_BASE_IDX 1
+#define mmVGT_HOS_MAX_TESS_LEVEL 0x0286
+#define mmVGT_HOS_MAX_TESS_LEVEL_BASE_IDX 1
+#define mmVGT_HOS_MIN_TESS_LEVEL 0x0287
+#define mmVGT_HOS_MIN_TESS_LEVEL_BASE_IDX 1
+#define mmVGT_HOS_REUSE_DEPTH 0x0288
+#define mmVGT_HOS_REUSE_DEPTH_BASE_IDX 1
+#define mmVGT_GROUP_PRIM_TYPE 0x0289
+#define mmVGT_GROUP_PRIM_TYPE_BASE_IDX 1
+#define mmVGT_GROUP_FIRST_DECR 0x028a
+#define mmVGT_GROUP_FIRST_DECR_BASE_IDX 1
+#define mmVGT_GROUP_DECR 0x028b
+#define mmVGT_GROUP_DECR_BASE_IDX 1
+#define mmVGT_GROUP_VECT_0_CNTL 0x028c
+#define mmVGT_GROUP_VECT_0_CNTL_BASE_IDX 1
+#define mmVGT_GROUP_VECT_1_CNTL 0x028d
+#define mmVGT_GROUP_VECT_1_CNTL_BASE_IDX 1
+#define mmVGT_GROUP_VECT_0_FMT_CNTL 0x028e
+#define mmVGT_GROUP_VECT_0_FMT_CNTL_BASE_IDX 1
+#define mmVGT_GROUP_VECT_1_FMT_CNTL 0x028f
+#define mmVGT_GROUP_VECT_1_FMT_CNTL_BASE_IDX 1
+#define mmVGT_GS_MODE 0x0290
+#define mmVGT_GS_MODE_BASE_IDX 1
+#define mmVGT_GS_ONCHIP_CNTL 0x0291
+#define mmVGT_GS_ONCHIP_CNTL_BASE_IDX 1
+#define mmPA_SC_MODE_CNTL_0 0x0292
+#define mmPA_SC_MODE_CNTL_0_BASE_IDX 1
+#define mmPA_SC_MODE_CNTL_1 0x0293
+#define mmPA_SC_MODE_CNTL_1_BASE_IDX 1
+#define mmVGT_ENHANCE 0x0294
+#define mmVGT_ENHANCE_BASE_IDX 1
+#define mmVGT_GS_PER_ES 0x0295
+#define mmVGT_GS_PER_ES_BASE_IDX 1
+#define mmVGT_ES_PER_GS 0x0296
+#define mmVGT_ES_PER_GS_BASE_IDX 1
+#define mmVGT_GS_PER_VS 0x0297
+#define mmVGT_GS_PER_VS_BASE_IDX 1
+#define mmVGT_GSVS_RING_OFFSET_1 0x0298
+#define mmVGT_GSVS_RING_OFFSET_1_BASE_IDX 1
+#define mmVGT_GSVS_RING_OFFSET_2 0x0299
+#define mmVGT_GSVS_RING_OFFSET_2_BASE_IDX 1
+#define mmVGT_GSVS_RING_OFFSET_3 0x029a
+#define mmVGT_GSVS_RING_OFFSET_3_BASE_IDX 1
+#define mmVGT_GS_OUT_PRIM_TYPE 0x029b
+#define mmVGT_GS_OUT_PRIM_TYPE_BASE_IDX 1
+#define mmIA_ENHANCE 0x029c
+#define mmIA_ENHANCE_BASE_IDX 1
+#define mmVGT_DMA_SIZE 0x029d
+#define mmVGT_DMA_SIZE_BASE_IDX 1
+#define mmVGT_DMA_MAX_SIZE 0x029e
+#define mmVGT_DMA_MAX_SIZE_BASE_IDX 1
+#define mmVGT_DMA_INDEX_TYPE 0x029f
+#define mmVGT_DMA_INDEX_TYPE_BASE_IDX 1
+#define mmWD_ENHANCE 0x02a0
+#define mmWD_ENHANCE_BASE_IDX 1
+#define mmVGT_PRIMITIVEID_EN 0x02a1
+#define mmVGT_PRIMITIVEID_EN_BASE_IDX 1
+#define mmVGT_DMA_NUM_INSTANCES 0x02a2
+#define mmVGT_DMA_NUM_INSTANCES_BASE_IDX 1
+#define mmVGT_PRIMITIVEID_RESET 0x02a3
+#define mmVGT_PRIMITIVEID_RESET_BASE_IDX 1
+#define mmVGT_EVENT_INITIATOR 0x02a4
+#define mmVGT_EVENT_INITIATOR_BASE_IDX 1
+#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP 0x02a5
+#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
+#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
+#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
+#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
+#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
+#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
+#define mmVGT_INSTANCE_STEP_RATE_1_BASE_IDX 1
+#define mmIA_MULTI_VGT_PARAM_BC 0x02aa
+#define mmIA_MULTI_VGT_PARAM_BC_BASE_IDX 1
+#define mmVGT_ESGS_RING_ITEMSIZE 0x02ab
+#define mmVGT_ESGS_RING_ITEMSIZE_BASE_IDX 1
+#define mmVGT_GSVS_RING_ITEMSIZE 0x02ac
+#define mmVGT_GSVS_RING_ITEMSIZE_BASE_IDX 1
+#define mmVGT_REUSE_OFF 0x02ad
+#define mmVGT_REUSE_OFF_BASE_IDX 1
+#define mmVGT_VTX_CNT_EN 0x02ae
+#define mmVGT_VTX_CNT_EN_BASE_IDX 1
+#define mmDB_HTILE_SURFACE 0x02af
+#define mmDB_HTILE_SURFACE_BASE_IDX 1
+#define mmDB_SRESULTS_COMPARE_STATE0 0x02b0
+#define mmDB_SRESULTS_COMPARE_STATE0_BASE_IDX 1
+#define mmDB_SRESULTS_COMPARE_STATE1 0x02b1
+#define mmDB_SRESULTS_COMPARE_STATE1_BASE_IDX 1
+#define mmDB_PRELOAD_CONTROL 0x02b2
+#define mmDB_PRELOAD_CONTROL_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_SIZE_0 0x02b4
+#define mmVGT_STRMOUT_BUFFER_SIZE_0_BASE_IDX 1
+#define mmVGT_STRMOUT_VTX_STRIDE_0 0x02b5
+#define mmVGT_STRMOUT_VTX_STRIDE_0_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_OFFSET_0 0x02b7
+#define mmVGT_STRMOUT_BUFFER_OFFSET_0_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_SIZE_1 0x02b8
+#define mmVGT_STRMOUT_BUFFER_SIZE_1_BASE_IDX 1
+#define mmVGT_STRMOUT_VTX_STRIDE_1 0x02b9
+#define mmVGT_STRMOUT_VTX_STRIDE_1_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_OFFSET_1 0x02bb
+#define mmVGT_STRMOUT_BUFFER_OFFSET_1_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_SIZE_2 0x02bc
+#define mmVGT_STRMOUT_BUFFER_SIZE_2_BASE_IDX 1
+#define mmVGT_STRMOUT_VTX_STRIDE_2 0x02bd
+#define mmVGT_STRMOUT_VTX_STRIDE_2_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_OFFSET_2 0x02bf
+#define mmVGT_STRMOUT_BUFFER_OFFSET_2_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_SIZE_3 0x02c0
+#define mmVGT_STRMOUT_BUFFER_SIZE_3_BASE_IDX 1
+#define mmVGT_STRMOUT_VTX_STRIDE_3 0x02c1
+#define mmVGT_STRMOUT_VTX_STRIDE_3_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_OFFSET_3 0x02c3
+#define mmVGT_STRMOUT_BUFFER_OFFSET_3_BASE_IDX 1
+#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0x02ca
+#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET_BASE_IDX 1
+#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x02cb
+#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_BASE_IDX 1
+#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x02cc
+#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_BASE_IDX 1
+#define mmVGT_GS_MAX_VERT_OUT 0x02ce
+#define mmVGT_GS_MAX_VERT_OUT_BASE_IDX 1
+#define mmVGT_TESS_DISTRIBUTION 0x02d4
+#define mmVGT_TESS_DISTRIBUTION_BASE_IDX 1
+#define mmVGT_SHADER_STAGES_EN 0x02d5
+#define mmVGT_SHADER_STAGES_EN_BASE_IDX 1
+#define mmVGT_LS_HS_CONFIG 0x02d6
+#define mmVGT_LS_HS_CONFIG_BASE_IDX 1
+#define mmVGT_GS_VERT_ITEMSIZE 0x02d7
+#define mmVGT_GS_VERT_ITEMSIZE_BASE_IDX 1
+#define mmVGT_GS_VERT_ITEMSIZE_1 0x02d8
+#define mmVGT_GS_VERT_ITEMSIZE_1_BASE_IDX 1
+#define mmVGT_GS_VERT_ITEMSIZE_2 0x02d9
+#define mmVGT_GS_VERT_ITEMSIZE_2_BASE_IDX 1
+#define mmVGT_GS_VERT_ITEMSIZE_3 0x02da
+#define mmVGT_GS_VERT_ITEMSIZE_3_BASE_IDX 1
+#define mmVGT_TF_PARAM 0x02db
+#define mmVGT_TF_PARAM_BASE_IDX 1
+#define mmDB_ALPHA_TO_MASK 0x02dc
+#define mmDB_ALPHA_TO_MASK_BASE_IDX 1
+#define mmVGT_DISPATCH_DRAW_INDEX 0x02dd
+#define mmVGT_DISPATCH_DRAW_INDEX_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL 0x02de
+#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_CLAMP 0x02df
+#define mmPA_SU_POLY_OFFSET_CLAMP_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_FRONT_SCALE 0x02e0
+#define mmPA_SU_POLY_OFFSET_FRONT_SCALE_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET 0x02e1
+#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_BACK_SCALE 0x02e2
+#define mmPA_SU_POLY_OFFSET_BACK_SCALE_BASE_IDX 1
+#define mmPA_SU_POLY_OFFSET_BACK_OFFSET 0x02e3
+#define mmPA_SU_POLY_OFFSET_BACK_OFFSET_BASE_IDX 1
+#define mmVGT_GS_INSTANCE_CNT 0x02e4
+#define mmVGT_GS_INSTANCE_CNT_BASE_IDX 1
+#define mmVGT_STRMOUT_CONFIG 0x02e5
+#define mmVGT_STRMOUT_CONFIG_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_CONFIG 0x02e6
+#define mmVGT_STRMOUT_BUFFER_CONFIG_BASE_IDX 1
+#define mmVGT_DMA_EVENT_INITIATOR 0x02e7
+#define mmVGT_DMA_EVENT_INITIATOR_BASE_IDX 1
+#define mmPA_SC_CENTROID_PRIORITY_0 0x02f5
+#define mmPA_SC_CENTROID_PRIORITY_0_BASE_IDX 1
+#define mmPA_SC_CENTROID_PRIORITY_1 0x02f6
+#define mmPA_SC_CENTROID_PRIORITY_1_BASE_IDX 1
+#define mmPA_SC_LINE_CNTL 0x02f7
+#define mmPA_SC_LINE_CNTL_BASE_IDX 1
+#define mmPA_SC_AA_CONFIG 0x02f8
+#define mmPA_SC_AA_CONFIG_BASE_IDX 1
+#define mmPA_SU_VTX_CNTL 0x02f9
+#define mmPA_SU_VTX_CNTL_BASE_IDX 1
+#define mmPA_CL_GB_VERT_CLIP_ADJ 0x02fa
+#define mmPA_CL_GB_VERT_CLIP_ADJ_BASE_IDX 1
+#define mmPA_CL_GB_VERT_DISC_ADJ 0x02fb
+#define mmPA_CL_GB_VERT_DISC_ADJ_BASE_IDX 1
+#define mmPA_CL_GB_HORZ_CLIP_ADJ 0x02fc
+#define mmPA_CL_GB_HORZ_CLIP_ADJ_BASE_IDX 1
+#define mmPA_CL_GB_HORZ_DISC_ADJ 0x02fd
+#define mmPA_CL_GB_HORZ_DISC_ADJ_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0x02fe
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0x02ff
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0x0300
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0x0301
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0x0302
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0x0303
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0x0304
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0x0305
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x0306
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0x0307
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0x0308
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0x0309
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x030a
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0x030b
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0x030c
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_BASE_IDX 1
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0x030d
+#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_BASE_IDX 1
+#define mmPA_SC_AA_MASK_X0Y0_X1Y0 0x030e
+#define mmPA_SC_AA_MASK_X0Y0_X1Y0_BASE_IDX 1
+#define mmPA_SC_AA_MASK_X0Y1_X1Y1 0x030f
+#define mmPA_SC_AA_MASK_X0Y1_X1Y1_BASE_IDX 1
+#define mmPA_SC_SHADER_CONTROL 0x0310
+#define mmPA_SC_SHADER_CONTROL_BASE_IDX 1
+#define mmPA_SC_BINNER_CNTL_0 0x0311
+#define mmPA_SC_BINNER_CNTL_0_BASE_IDX 1
+#define mmPA_SC_BINNER_CNTL_1 0x0312
+#define mmPA_SC_BINNER_CNTL_1_BASE_IDX 1
+#define mmPA_SC_CONSERVATIVE_RASTERIZATION_CNTL 0x0313
+#define mmPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_BASE_IDX 1
+#define mmPA_SC_NGG_MODE_CNTL 0x0314
+#define mmPA_SC_NGG_MODE_CNTL_BASE_IDX 1
+#define mmVGT_VERTEX_REUSE_BLOCK_CNTL 0x0316
+#define mmVGT_VERTEX_REUSE_BLOCK_CNTL_BASE_IDX 1
+#define mmVGT_OUT_DEALLOC_CNTL 0x0317
+#define mmVGT_OUT_DEALLOC_CNTL_BASE_IDX 1
+#define mmCB_COLOR0_BASE 0x0318
+#define mmCB_COLOR0_BASE_BASE_IDX 1
+#define mmCB_COLOR0_BASE_EXT 0x0319
+#define mmCB_COLOR0_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR0_ATTRIB2 0x031a
+#define mmCB_COLOR0_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR0_VIEW 0x031b
+#define mmCB_COLOR0_VIEW_BASE_IDX 1
+#define mmCB_COLOR0_INFO 0x031c
+#define mmCB_COLOR0_INFO_BASE_IDX 1
+#define mmCB_COLOR0_ATTRIB 0x031d
+#define mmCB_COLOR0_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR0_DCC_CONTROL 0x031e
+#define mmCB_COLOR0_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR0_CMASK 0x031f
+#define mmCB_COLOR0_CMASK_BASE_IDX 1
+#define mmCB_COLOR0_CMASK_BASE_EXT 0x0320
+#define mmCB_COLOR0_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR0_FMASK 0x0321
+#define mmCB_COLOR0_FMASK_BASE_IDX 1
+#define mmCB_COLOR0_FMASK_BASE_EXT 0x0322
+#define mmCB_COLOR0_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR0_CLEAR_WORD0 0x0323
+#define mmCB_COLOR0_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR0_CLEAR_WORD1 0x0324
+#define mmCB_COLOR0_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR0_DCC_BASE 0x0325
+#define mmCB_COLOR0_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR0_DCC_BASE_EXT 0x0326
+#define mmCB_COLOR0_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR1_BASE 0x0327
+#define mmCB_COLOR1_BASE_BASE_IDX 1
+#define mmCB_COLOR1_BASE_EXT 0x0328
+#define mmCB_COLOR1_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR1_ATTRIB2 0x0329
+#define mmCB_COLOR1_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR1_VIEW 0x032a
+#define mmCB_COLOR1_VIEW_BASE_IDX 1
+#define mmCB_COLOR1_INFO 0x032b
+#define mmCB_COLOR1_INFO_BASE_IDX 1
+#define mmCB_COLOR1_ATTRIB 0x032c
+#define mmCB_COLOR1_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR1_DCC_CONTROL 0x032d
+#define mmCB_COLOR1_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR1_CMASK 0x032e
+#define mmCB_COLOR1_CMASK_BASE_IDX 1
+#define mmCB_COLOR1_CMASK_BASE_EXT 0x032f
+#define mmCB_COLOR1_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR1_FMASK 0x0330
+#define mmCB_COLOR1_FMASK_BASE_IDX 1
+#define mmCB_COLOR1_FMASK_BASE_EXT 0x0331
+#define mmCB_COLOR1_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR1_CLEAR_WORD0 0x0332
+#define mmCB_COLOR1_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR1_CLEAR_WORD1 0x0333
+#define mmCB_COLOR1_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR1_DCC_BASE 0x0334
+#define mmCB_COLOR1_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR1_DCC_BASE_EXT 0x0335
+#define mmCB_COLOR1_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR2_BASE 0x0336
+#define mmCB_COLOR2_BASE_BASE_IDX 1
+#define mmCB_COLOR2_BASE_EXT 0x0337
+#define mmCB_COLOR2_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR2_ATTRIB2 0x0338
+#define mmCB_COLOR2_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR2_VIEW 0x0339
+#define mmCB_COLOR2_VIEW_BASE_IDX 1
+#define mmCB_COLOR2_INFO 0x033a
+#define mmCB_COLOR2_INFO_BASE_IDX 1
+#define mmCB_COLOR2_ATTRIB 0x033b
+#define mmCB_COLOR2_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR2_DCC_CONTROL 0x033c
+#define mmCB_COLOR2_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR2_CMASK 0x033d
+#define mmCB_COLOR2_CMASK_BASE_IDX 1
+#define mmCB_COLOR2_CMASK_BASE_EXT 0x033e
+#define mmCB_COLOR2_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR2_FMASK 0x033f
+#define mmCB_COLOR2_FMASK_BASE_IDX 1
+#define mmCB_COLOR2_FMASK_BASE_EXT 0x0340
+#define mmCB_COLOR2_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR2_CLEAR_WORD0 0x0341
+#define mmCB_COLOR2_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR2_CLEAR_WORD1 0x0342
+#define mmCB_COLOR2_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR2_DCC_BASE 0x0343
+#define mmCB_COLOR2_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR2_DCC_BASE_EXT 0x0344
+#define mmCB_COLOR2_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR3_BASE 0x0345
+#define mmCB_COLOR3_BASE_BASE_IDX 1
+#define mmCB_COLOR3_BASE_EXT 0x0346
+#define mmCB_COLOR3_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR3_ATTRIB2 0x0347
+#define mmCB_COLOR3_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR3_VIEW 0x0348
+#define mmCB_COLOR3_VIEW_BASE_IDX 1
+#define mmCB_COLOR3_INFO 0x0349
+#define mmCB_COLOR3_INFO_BASE_IDX 1
+#define mmCB_COLOR3_ATTRIB 0x034a
+#define mmCB_COLOR3_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR3_DCC_CONTROL 0x034b
+#define mmCB_COLOR3_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR3_CMASK 0x034c
+#define mmCB_COLOR3_CMASK_BASE_IDX 1
+#define mmCB_COLOR3_CMASK_BASE_EXT 0x034d
+#define mmCB_COLOR3_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR3_FMASK 0x034e
+#define mmCB_COLOR3_FMASK_BASE_IDX 1
+#define mmCB_COLOR3_FMASK_BASE_EXT 0x034f
+#define mmCB_COLOR3_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR3_CLEAR_WORD0 0x0350
+#define mmCB_COLOR3_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR3_CLEAR_WORD1 0x0351
+#define mmCB_COLOR3_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR3_DCC_BASE 0x0352
+#define mmCB_COLOR3_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR3_DCC_BASE_EXT 0x0353
+#define mmCB_COLOR3_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR4_BASE 0x0354
+#define mmCB_COLOR4_BASE_BASE_IDX 1
+#define mmCB_COLOR4_BASE_EXT 0x0355
+#define mmCB_COLOR4_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR4_ATTRIB2 0x0356
+#define mmCB_COLOR4_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR4_VIEW 0x0357
+#define mmCB_COLOR4_VIEW_BASE_IDX 1
+#define mmCB_COLOR4_INFO 0x0358
+#define mmCB_COLOR4_INFO_BASE_IDX 1
+#define mmCB_COLOR4_ATTRIB 0x0359
+#define mmCB_COLOR4_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR4_DCC_CONTROL 0x035a
+#define mmCB_COLOR4_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR4_CMASK 0x035b
+#define mmCB_COLOR4_CMASK_BASE_IDX 1
+#define mmCB_COLOR4_CMASK_BASE_EXT 0x035c
+#define mmCB_COLOR4_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR4_FMASK 0x035d
+#define mmCB_COLOR4_FMASK_BASE_IDX 1
+#define mmCB_COLOR4_FMASK_BASE_EXT 0x035e
+#define mmCB_COLOR4_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR4_CLEAR_WORD0 0x035f
+#define mmCB_COLOR4_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR4_CLEAR_WORD1 0x0360
+#define mmCB_COLOR4_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR4_DCC_BASE 0x0361
+#define mmCB_COLOR4_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR4_DCC_BASE_EXT 0x0362
+#define mmCB_COLOR4_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR5_BASE 0x0363
+#define mmCB_COLOR5_BASE_BASE_IDX 1
+#define mmCB_COLOR5_BASE_EXT 0x0364
+#define mmCB_COLOR5_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR5_ATTRIB2 0x0365
+#define mmCB_COLOR5_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR5_VIEW 0x0366
+#define mmCB_COLOR5_VIEW_BASE_IDX 1
+#define mmCB_COLOR5_INFO 0x0367
+#define mmCB_COLOR5_INFO_BASE_IDX 1
+#define mmCB_COLOR5_ATTRIB 0x0368
+#define mmCB_COLOR5_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR5_DCC_CONTROL 0x0369
+#define mmCB_COLOR5_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR5_CMASK 0x036a
+#define mmCB_COLOR5_CMASK_BASE_IDX 1
+#define mmCB_COLOR5_CMASK_BASE_EXT 0x036b
+#define mmCB_COLOR5_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR5_FMASK 0x036c
+#define mmCB_COLOR5_FMASK_BASE_IDX 1
+#define mmCB_COLOR5_FMASK_BASE_EXT 0x036d
+#define mmCB_COLOR5_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR5_CLEAR_WORD0 0x036e
+#define mmCB_COLOR5_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR5_CLEAR_WORD1 0x036f
+#define mmCB_COLOR5_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR5_DCC_BASE 0x0370
+#define mmCB_COLOR5_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR5_DCC_BASE_EXT 0x0371
+#define mmCB_COLOR5_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR6_BASE 0x0372
+#define mmCB_COLOR6_BASE_BASE_IDX 1
+#define mmCB_COLOR6_BASE_EXT 0x0373
+#define mmCB_COLOR6_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR6_ATTRIB2 0x0374
+#define mmCB_COLOR6_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR6_VIEW 0x0375
+#define mmCB_COLOR6_VIEW_BASE_IDX 1
+#define mmCB_COLOR6_INFO 0x0376
+#define mmCB_COLOR6_INFO_BASE_IDX 1
+#define mmCB_COLOR6_ATTRIB 0x0377
+#define mmCB_COLOR6_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR6_DCC_CONTROL 0x0378
+#define mmCB_COLOR6_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR6_CMASK 0x0379
+#define mmCB_COLOR6_CMASK_BASE_IDX 1
+#define mmCB_COLOR6_CMASK_BASE_EXT 0x037a
+#define mmCB_COLOR6_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR6_FMASK 0x037b
+#define mmCB_COLOR6_FMASK_BASE_IDX 1
+#define mmCB_COLOR6_FMASK_BASE_EXT 0x037c
+#define mmCB_COLOR6_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR6_CLEAR_WORD0 0x037d
+#define mmCB_COLOR6_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR6_CLEAR_WORD1 0x037e
+#define mmCB_COLOR6_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR6_DCC_BASE 0x037f
+#define mmCB_COLOR6_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR6_DCC_BASE_EXT 0x0380
+#define mmCB_COLOR6_DCC_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR7_BASE 0x0381
+#define mmCB_COLOR7_BASE_BASE_IDX 1
+#define mmCB_COLOR7_BASE_EXT 0x0382
+#define mmCB_COLOR7_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR7_ATTRIB2 0x0383
+#define mmCB_COLOR7_ATTRIB2_BASE_IDX 1
+#define mmCB_COLOR7_VIEW 0x0384
+#define mmCB_COLOR7_VIEW_BASE_IDX 1
+#define mmCB_COLOR7_INFO 0x0385
+#define mmCB_COLOR7_INFO_BASE_IDX 1
+#define mmCB_COLOR7_ATTRIB 0x0386
+#define mmCB_COLOR7_ATTRIB_BASE_IDX 1
+#define mmCB_COLOR7_DCC_CONTROL 0x0387
+#define mmCB_COLOR7_DCC_CONTROL_BASE_IDX 1
+#define mmCB_COLOR7_CMASK 0x0388
+#define mmCB_COLOR7_CMASK_BASE_IDX 1
+#define mmCB_COLOR7_CMASK_BASE_EXT 0x0389
+#define mmCB_COLOR7_CMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR7_FMASK 0x038a
+#define mmCB_COLOR7_FMASK_BASE_IDX 1
+#define mmCB_COLOR7_FMASK_BASE_EXT 0x038b
+#define mmCB_COLOR7_FMASK_BASE_EXT_BASE_IDX 1
+#define mmCB_COLOR7_CLEAR_WORD0 0x038c
+#define mmCB_COLOR7_CLEAR_WORD0_BASE_IDX 1
+#define mmCB_COLOR7_CLEAR_WORD1 0x038d
+#define mmCB_COLOR7_CLEAR_WORD1_BASE_IDX 1
+#define mmCB_COLOR7_DCC_BASE 0x038e
+#define mmCB_COLOR7_DCC_BASE_BASE_IDX 1
+#define mmCB_COLOR7_DCC_BASE_EXT 0x038f
+#define mmCB_COLOR7_DCC_BASE_EXT_BASE_IDX 1
+
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define mmCP_EOP_DONE_ADDR_LO 0x2000
+#define mmCP_EOP_DONE_ADDR_LO_BASE_IDX 1
+#define mmCP_EOP_DONE_ADDR_HI 0x2001
+#define mmCP_EOP_DONE_ADDR_HI_BASE_IDX 1
+#define mmCP_EOP_DONE_DATA_LO 0x2002
+#define mmCP_EOP_DONE_DATA_LO_BASE_IDX 1
+#define mmCP_EOP_DONE_DATA_HI 0x2003
+#define mmCP_EOP_DONE_DATA_HI_BASE_IDX 1
+#define mmCP_EOP_LAST_FENCE_LO 0x2004
+#define mmCP_EOP_LAST_FENCE_LO_BASE_IDX 1
+#define mmCP_EOP_LAST_FENCE_HI 0x2005
+#define mmCP_EOP_LAST_FENCE_HI_BASE_IDX 1
+#define mmCP_STREAM_OUT_ADDR_LO 0x2006
+#define mmCP_STREAM_OUT_ADDR_LO_BASE_IDX 1
+#define mmCP_STREAM_OUT_ADDR_HI 0x2007
+#define mmCP_STREAM_OUT_ADDR_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO 0x2008
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI 0x2009
+#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO 0x200a
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI 0x200b
+#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO 0x200c
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI 0x200d
+#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO 0x200e
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI 0x200f
+#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO 0x2010
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI 0x2011
+#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO 0x2012
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI 0x2013
+#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO 0x2014
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI 0x2015
+#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO 0x2016
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO_BASE_IDX 1
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI 0x2017
+#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI_BASE_IDX 1
+#define mmCP_PIPE_STATS_ADDR_LO 0x2018
+#define mmCP_PIPE_STATS_ADDR_LO_BASE_IDX 1
+#define mmCP_PIPE_STATS_ADDR_HI 0x2019
+#define mmCP_PIPE_STATS_ADDR_HI_BASE_IDX 1
+#define mmCP_VGT_IAVERT_COUNT_LO 0x201a
+#define mmCP_VGT_IAVERT_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_IAVERT_COUNT_HI 0x201b
+#define mmCP_VGT_IAVERT_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_IAPRIM_COUNT_LO 0x201c
+#define mmCP_VGT_IAPRIM_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_IAPRIM_COUNT_HI 0x201d
+#define mmCP_VGT_IAPRIM_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_GSPRIM_COUNT_LO 0x201e
+#define mmCP_VGT_GSPRIM_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_GSPRIM_COUNT_HI 0x201f
+#define mmCP_VGT_GSPRIM_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_VSINVOC_COUNT_LO 0x2020
+#define mmCP_VGT_VSINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_VSINVOC_COUNT_HI 0x2021
+#define mmCP_VGT_VSINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_GSINVOC_COUNT_LO 0x2022
+#define mmCP_VGT_GSINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_GSINVOC_COUNT_HI 0x2023
+#define mmCP_VGT_GSINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_HSINVOC_COUNT_LO 0x2024
+#define mmCP_VGT_HSINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_HSINVOC_COUNT_HI 0x2025
+#define mmCP_VGT_HSINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_VGT_DSINVOC_COUNT_LO 0x2026
+#define mmCP_VGT_DSINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_DSINVOC_COUNT_HI 0x2027
+#define mmCP_VGT_DSINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_PA_CINVOC_COUNT_LO 0x2028
+#define mmCP_PA_CINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_PA_CINVOC_COUNT_HI 0x2029
+#define mmCP_PA_CINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_PA_CPRIM_COUNT_LO 0x202a
+#define mmCP_PA_CPRIM_COUNT_LO_BASE_IDX 1
+#define mmCP_PA_CPRIM_COUNT_HI 0x202b
+#define mmCP_PA_CPRIM_COUNT_HI_BASE_IDX 1
+#define mmCP_SC_PSINVOC_COUNT0_LO 0x202c
+#define mmCP_SC_PSINVOC_COUNT0_LO_BASE_IDX 1
+#define mmCP_SC_PSINVOC_COUNT0_HI 0x202d
+#define mmCP_SC_PSINVOC_COUNT0_HI_BASE_IDX 1
+#define mmCP_SC_PSINVOC_COUNT1_LO 0x202e
+#define mmCP_SC_PSINVOC_COUNT1_LO_BASE_IDX 1
+#define mmCP_SC_PSINVOC_COUNT1_HI 0x202f
+#define mmCP_SC_PSINVOC_COUNT1_HI_BASE_IDX 1
+#define mmCP_VGT_CSINVOC_COUNT_LO 0x2030
+#define mmCP_VGT_CSINVOC_COUNT_LO_BASE_IDX 1
+#define mmCP_VGT_CSINVOC_COUNT_HI 0x2031
+#define mmCP_VGT_CSINVOC_COUNT_HI_BASE_IDX 1
+#define mmCP_PIPE_STATS_CONTROL 0x203d
+#define mmCP_PIPE_STATS_CONTROL_BASE_IDX 1
+#define mmCP_STREAM_OUT_CONTROL 0x203e
+#define mmCP_STREAM_OUT_CONTROL_BASE_IDX 1
+#define mmCP_STRMOUT_CNTL 0x203f
+#define mmCP_STRMOUT_CNTL_BASE_IDX 1
+#define mmSCRATCH_REG0 0x2040
+#define mmSCRATCH_REG0_BASE_IDX 1
+#define mmSCRATCH_REG1 0x2041
+#define mmSCRATCH_REG1_BASE_IDX 1
+#define mmSCRATCH_REG2 0x2042
+#define mmSCRATCH_REG2_BASE_IDX 1
+#define mmSCRATCH_REG3 0x2043
+#define mmSCRATCH_REG3_BASE_IDX 1
+#define mmSCRATCH_REG4 0x2044
+#define mmSCRATCH_REG4_BASE_IDX 1
+#define mmSCRATCH_REG5 0x2045
+#define mmSCRATCH_REG5_BASE_IDX 1
+#define mmSCRATCH_REG6 0x2046
+#define mmSCRATCH_REG6_BASE_IDX 1
+#define mmSCRATCH_REG7 0x2047
+#define mmSCRATCH_REG7_BASE_IDX 1
+#define mmCP_APPEND_DATA_HI 0x204c
+#define mmCP_APPEND_DATA_HI_BASE_IDX 1
+#define mmCP_APPEND_LAST_CS_FENCE_HI 0x204d
+#define mmCP_APPEND_LAST_CS_FENCE_HI_BASE_IDX 1
+#define mmCP_APPEND_LAST_PS_FENCE_HI 0x204e
+#define mmCP_APPEND_LAST_PS_FENCE_HI_BASE_IDX 1
+#define mmSCRATCH_UMSK 0x2050
+#define mmSCRATCH_UMSK_BASE_IDX 1
+#define mmSCRATCH_ADDR 0x2051
+#define mmSCRATCH_ADDR_BASE_IDX 1
+#define mmCP_PFP_ATOMIC_PREOP_LO 0x2052
+#define mmCP_PFP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmCP_PFP_ATOMIC_PREOP_HI 0x2053
+#define mmCP_PFP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmCP_PFP_GDS_ATOMIC0_PREOP_LO 0x2054
+#define mmCP_PFP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define mmCP_PFP_GDS_ATOMIC0_PREOP_HI 0x2055
+#define mmCP_PFP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define mmCP_PFP_GDS_ATOMIC1_PREOP_LO 0x2056
+#define mmCP_PFP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define mmCP_PFP_GDS_ATOMIC1_PREOP_HI 0x2057
+#define mmCP_PFP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define mmCP_APPEND_ADDR_LO 0x2058
+#define mmCP_APPEND_ADDR_LO_BASE_IDX 1
+#define mmCP_APPEND_ADDR_HI 0x2059
+#define mmCP_APPEND_ADDR_HI_BASE_IDX 1
+#define mmCP_APPEND_DATA_LO 0x205a
+#define mmCP_APPEND_DATA_LO_BASE_IDX 1
+#define mmCP_APPEND_LAST_CS_FENCE_LO 0x205b
+#define mmCP_APPEND_LAST_CS_FENCE_LO_BASE_IDX 1
+#define mmCP_APPEND_LAST_PS_FENCE_LO 0x205c
+#define mmCP_APPEND_LAST_PS_FENCE_LO_BASE_IDX 1
+#define mmCP_ATOMIC_PREOP_LO 0x205d
+#define mmCP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmCP_ME_ATOMIC_PREOP_LO 0x205d
+#define mmCP_ME_ATOMIC_PREOP_LO_BASE_IDX 1
+#define mmCP_ATOMIC_PREOP_HI 0x205e
+#define mmCP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmCP_ME_ATOMIC_PREOP_HI 0x205e
+#define mmCP_ME_ATOMIC_PREOP_HI_BASE_IDX 1
+#define mmCP_GDS_ATOMIC0_PREOP_LO 0x205f
+#define mmCP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define mmCP_ME_GDS_ATOMIC0_PREOP_LO 0x205f
+#define mmCP_ME_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define mmCP_GDS_ATOMIC0_PREOP_HI 0x2060
+#define mmCP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define mmCP_ME_GDS_ATOMIC0_PREOP_HI 0x2060
+#define mmCP_ME_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define mmCP_GDS_ATOMIC1_PREOP_LO 0x2061
+#define mmCP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define mmCP_ME_GDS_ATOMIC1_PREOP_LO 0x2061
+#define mmCP_ME_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define mmCP_GDS_ATOMIC1_PREOP_HI 0x2062
+#define mmCP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define mmCP_ME_GDS_ATOMIC1_PREOP_HI 0x2062
+#define mmCP_ME_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define mmCP_ME_MC_WADDR_LO 0x2069
+#define mmCP_ME_MC_WADDR_LO_BASE_IDX 1
+#define mmCP_ME_MC_WADDR_HI 0x206a
+#define mmCP_ME_MC_WADDR_HI_BASE_IDX 1
+#define mmCP_ME_MC_WDATA_LO 0x206b
+#define mmCP_ME_MC_WDATA_LO_BASE_IDX 1
+#define mmCP_ME_MC_WDATA_HI 0x206c
+#define mmCP_ME_MC_WDATA_HI_BASE_IDX 1
+#define mmCP_ME_MC_RADDR_LO 0x206d
+#define mmCP_ME_MC_RADDR_LO_BASE_IDX 1
+#define mmCP_ME_MC_RADDR_HI 0x206e
+#define mmCP_ME_MC_RADDR_HI_BASE_IDX 1
+#define mmCP_SEM_WAIT_TIMER 0x206f
+#define mmCP_SEM_WAIT_TIMER_BASE_IDX 1
+#define mmCP_SIG_SEM_ADDR_LO 0x2070
+#define mmCP_SIG_SEM_ADDR_LO_BASE_IDX 1
+#define mmCP_SIG_SEM_ADDR_HI 0x2071
+#define mmCP_SIG_SEM_ADDR_HI_BASE_IDX 1
+#define mmCP_WAIT_REG_MEM_TIMEOUT 0x2074
+#define mmCP_WAIT_REG_MEM_TIMEOUT_BASE_IDX 1
+#define mmCP_WAIT_SEM_ADDR_LO 0x2075
+#define mmCP_WAIT_SEM_ADDR_LO_BASE_IDX 1
+#define mmCP_WAIT_SEM_ADDR_HI 0x2076
+#define mmCP_WAIT_SEM_ADDR_HI_BASE_IDX 1
+#define mmCP_DMA_PFP_CONTROL 0x2077
+#define mmCP_DMA_PFP_CONTROL_BASE_IDX 1
+#define mmCP_DMA_ME_CONTROL 0x2078
+#define mmCP_DMA_ME_CONTROL_BASE_IDX 1
+#define mmCP_COHER_BASE_HI 0x2079
+#define mmCP_COHER_BASE_HI_BASE_IDX 1
+#define mmCP_COHER_START_DELAY 0x207b
+#define mmCP_COHER_START_DELAY_BASE_IDX 1
+#define mmCP_COHER_CNTL 0x207c
+#define mmCP_COHER_CNTL_BASE_IDX 1
+#define mmCP_COHER_SIZE 0x207d
+#define mmCP_COHER_SIZE_BASE_IDX 1
+#define mmCP_COHER_BASE 0x207e
+#define mmCP_COHER_BASE_BASE_IDX 1
+#define mmCP_COHER_STATUS 0x207f
+#define mmCP_COHER_STATUS_BASE_IDX 1
+#define mmCP_DMA_ME_SRC_ADDR 0x2080
+#define mmCP_DMA_ME_SRC_ADDR_BASE_IDX 1
+#define mmCP_DMA_ME_SRC_ADDR_HI 0x2081
+#define mmCP_DMA_ME_SRC_ADDR_HI_BASE_IDX 1
+#define mmCP_DMA_ME_DST_ADDR 0x2082
+#define mmCP_DMA_ME_DST_ADDR_BASE_IDX 1
+#define mmCP_DMA_ME_DST_ADDR_HI 0x2083
+#define mmCP_DMA_ME_DST_ADDR_HI_BASE_IDX 1
+#define mmCP_DMA_ME_COMMAND 0x2084
+#define mmCP_DMA_ME_COMMAND_BASE_IDX 1
+#define mmCP_DMA_PFP_SRC_ADDR 0x2085
+#define mmCP_DMA_PFP_SRC_ADDR_BASE_IDX 1
+#define mmCP_DMA_PFP_SRC_ADDR_HI 0x2086
+#define mmCP_DMA_PFP_SRC_ADDR_HI_BASE_IDX 1
+#define mmCP_DMA_PFP_DST_ADDR 0x2087
+#define mmCP_DMA_PFP_DST_ADDR_BASE_IDX 1
+#define mmCP_DMA_PFP_DST_ADDR_HI 0x2088
+#define mmCP_DMA_PFP_DST_ADDR_HI_BASE_IDX 1
+#define mmCP_DMA_PFP_COMMAND 0x2089
+#define mmCP_DMA_PFP_COMMAND_BASE_IDX 1
+#define mmCP_DMA_CNTL 0x208a
+#define mmCP_DMA_CNTL_BASE_IDX 1
+#define mmCP_DMA_READ_TAGS 0x208b
+#define mmCP_DMA_READ_TAGS_BASE_IDX 1
+#define mmCP_COHER_SIZE_HI 0x208c
+#define mmCP_COHER_SIZE_HI_BASE_IDX 1
+#define mmCP_PFP_IB_CONTROL 0x208d
+#define mmCP_PFP_IB_CONTROL_BASE_IDX 1
+#define mmCP_PFP_LOAD_CONTROL 0x208e
+#define mmCP_PFP_LOAD_CONTROL_BASE_IDX 1
+#define mmCP_SCRATCH_INDEX 0x208f
+#define mmCP_SCRATCH_INDEX_BASE_IDX 1
+#define mmCP_SCRATCH_DATA 0x2090
+#define mmCP_SCRATCH_DATA_BASE_IDX 1
+#define mmCP_RB_OFFSET 0x2091
+#define mmCP_RB_OFFSET_BASE_IDX 1
+#define mmCP_IB1_OFFSET 0x2092
+#define mmCP_IB1_OFFSET_BASE_IDX 1
+#define mmCP_IB2_OFFSET 0x2093
+#define mmCP_IB2_OFFSET_BASE_IDX 1
+#define mmCP_IB1_PREAMBLE_BEGIN 0x2094
+#define mmCP_IB1_PREAMBLE_BEGIN_BASE_IDX 1
+#define mmCP_IB1_PREAMBLE_END 0x2095
+#define mmCP_IB1_PREAMBLE_END_BASE_IDX 1
+#define mmCP_IB2_PREAMBLE_BEGIN 0x2096
+#define mmCP_IB2_PREAMBLE_BEGIN_BASE_IDX 1
+#define mmCP_IB2_PREAMBLE_END 0x2097
+#define mmCP_IB2_PREAMBLE_END_BASE_IDX 1
+#define mmCP_CE_IB1_OFFSET 0x2098
+#define mmCP_CE_IB1_OFFSET_BASE_IDX 1
+#define mmCP_CE_IB2_OFFSET 0x2099
+#define mmCP_CE_IB2_OFFSET_BASE_IDX 1
+#define mmCP_CE_COUNTER 0x209a
+#define mmCP_CE_COUNTER_BASE_IDX 1
+#define mmCP_CE_RB_OFFSET 0x209b
+#define mmCP_CE_RB_OFFSET_BASE_IDX 1
+#define mmCP_CE_INIT_CMD_BUFSZ 0x20bd
+#define mmCP_CE_INIT_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_CE_IB1_CMD_BUFSZ 0x20be
+#define mmCP_CE_IB1_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_CE_IB2_CMD_BUFSZ 0x20bf
+#define mmCP_CE_IB2_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_IB1_CMD_BUFSZ 0x20c0
+#define mmCP_IB1_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_IB2_CMD_BUFSZ 0x20c1
+#define mmCP_IB2_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_ST_CMD_BUFSZ 0x20c2
+#define mmCP_ST_CMD_BUFSZ_BASE_IDX 1
+#define mmCP_CE_INIT_BASE_LO 0x20c3
+#define mmCP_CE_INIT_BASE_LO_BASE_IDX 1
+#define mmCP_CE_INIT_BASE_HI 0x20c4
+#define mmCP_CE_INIT_BASE_HI_BASE_IDX 1
+#define mmCP_CE_INIT_BUFSZ 0x20c5
+#define mmCP_CE_INIT_BUFSZ_BASE_IDX 1
+#define mmCP_CE_IB1_BASE_LO 0x20c6
+#define mmCP_CE_IB1_BASE_LO_BASE_IDX 1
+#define mmCP_CE_IB1_BASE_HI 0x20c7
+#define mmCP_CE_IB1_BASE_HI_BASE_IDX 1
+#define mmCP_CE_IB1_BUFSZ 0x20c8
+#define mmCP_CE_IB1_BUFSZ_BASE_IDX 1
+#define mmCP_CE_IB2_BASE_LO 0x20c9
+#define mmCP_CE_IB2_BASE_LO_BASE_IDX 1
+#define mmCP_CE_IB2_BASE_HI 0x20ca
+#define mmCP_CE_IB2_BASE_HI_BASE_IDX 1
+#define mmCP_CE_IB2_BUFSZ 0x20cb
+#define mmCP_CE_IB2_BUFSZ_BASE_IDX 1
+#define mmCP_IB1_BASE_LO 0x20cc
+#define mmCP_IB1_BASE_LO_BASE_IDX 1
+#define mmCP_IB1_BASE_HI 0x20cd
+#define mmCP_IB1_BASE_HI_BASE_IDX 1
+#define mmCP_IB1_BUFSZ 0x20ce
+#define mmCP_IB1_BUFSZ_BASE_IDX 1
+#define mmCP_IB2_BASE_LO 0x20cf
+#define mmCP_IB2_BASE_LO_BASE_IDX 1
+#define mmCP_IB2_BASE_HI 0x20d0
+#define mmCP_IB2_BASE_HI_BASE_IDX 1
+#define mmCP_IB2_BUFSZ 0x20d1
+#define mmCP_IB2_BUFSZ_BASE_IDX 1
+#define mmCP_ST_BASE_LO 0x20d2
+#define mmCP_ST_BASE_LO_BASE_IDX 1
+#define mmCP_ST_BASE_HI 0x20d3
+#define mmCP_ST_BASE_HI_BASE_IDX 1
+#define mmCP_ST_BUFSZ 0x20d4
+#define mmCP_ST_BUFSZ_BASE_IDX 1
+#define mmCP_EOP_DONE_EVENT_CNTL 0x20d5
+#define mmCP_EOP_DONE_EVENT_CNTL_BASE_IDX 1
+#define mmCP_EOP_DONE_DATA_CNTL 0x20d6
+#define mmCP_EOP_DONE_DATA_CNTL_BASE_IDX 1
+#define mmCP_EOP_DONE_CNTX_ID 0x20d7
+#define mmCP_EOP_DONE_CNTX_ID_BASE_IDX 1
+#define mmCP_PFP_COMPLETION_STATUS 0x20ec
+#define mmCP_PFP_COMPLETION_STATUS_BASE_IDX 1
+#define mmCP_CE_COMPLETION_STATUS 0x20ed
+#define mmCP_CE_COMPLETION_STATUS_BASE_IDX 1
+#define mmCP_PRED_NOT_VISIBLE 0x20ee
+#define mmCP_PRED_NOT_VISIBLE_BASE_IDX 1
+#define mmCP_PFP_METADATA_BASE_ADDR 0x20f0
+#define mmCP_PFP_METADATA_BASE_ADDR_BASE_IDX 1
+#define mmCP_PFP_METADATA_BASE_ADDR_HI 0x20f1
+#define mmCP_PFP_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define mmCP_CE_METADATA_BASE_ADDR 0x20f2
+#define mmCP_CE_METADATA_BASE_ADDR_BASE_IDX 1
+#define mmCP_CE_METADATA_BASE_ADDR_HI 0x20f3
+#define mmCP_CE_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define mmCP_DRAW_INDX_INDR_ADDR 0x20f4
+#define mmCP_DRAW_INDX_INDR_ADDR_BASE_IDX 1
+#define mmCP_DRAW_INDX_INDR_ADDR_HI 0x20f5
+#define mmCP_DRAW_INDX_INDR_ADDR_HI_BASE_IDX 1
+#define mmCP_DISPATCH_INDR_ADDR 0x20f6
+#define mmCP_DISPATCH_INDR_ADDR_BASE_IDX 1
+#define mmCP_DISPATCH_INDR_ADDR_HI 0x20f7
+#define mmCP_DISPATCH_INDR_ADDR_HI_BASE_IDX 1
+#define mmCP_INDEX_BASE_ADDR 0x20f8
+#define mmCP_INDEX_BASE_ADDR_BASE_IDX 1
+#define mmCP_INDEX_BASE_ADDR_HI 0x20f9
+#define mmCP_INDEX_BASE_ADDR_HI_BASE_IDX 1
+#define mmCP_INDEX_TYPE 0x20fa
+#define mmCP_INDEX_TYPE_BASE_IDX 1
+#define mmCP_GDS_BKUP_ADDR 0x20fb
+#define mmCP_GDS_BKUP_ADDR_BASE_IDX 1
+#define mmCP_GDS_BKUP_ADDR_HI 0x20fc
+#define mmCP_GDS_BKUP_ADDR_HI_BASE_IDX 1
+#define mmCP_SAMPLE_STATUS 0x20fd
+#define mmCP_SAMPLE_STATUS_BASE_IDX 1
+#define mmCP_ME_COHER_CNTL 0x20fe
+#define mmCP_ME_COHER_CNTL_BASE_IDX 1
+#define mmCP_ME_COHER_SIZE 0x20ff
+#define mmCP_ME_COHER_SIZE_BASE_IDX 1
+#define mmCP_ME_COHER_SIZE_HI 0x2100
+#define mmCP_ME_COHER_SIZE_HI_BASE_IDX 1
+#define mmCP_ME_COHER_BASE 0x2101
+#define mmCP_ME_COHER_BASE_BASE_IDX 1
+#define mmCP_ME_COHER_BASE_HI 0x2102
+#define mmCP_ME_COHER_BASE_HI_BASE_IDX 1
+#define mmCP_ME_COHER_STATUS 0x2103
+#define mmCP_ME_COHER_STATUS_BASE_IDX 1
+#define mmRLC_GPM_PERF_COUNT_0 0x2140
+#define mmRLC_GPM_PERF_COUNT_0_BASE_IDX 1
+#define mmRLC_GPM_PERF_COUNT_1 0x2141
+#define mmRLC_GPM_PERF_COUNT_1_BASE_IDX 1
+#define mmGRBM_GFX_INDEX 0x2200
+#define mmGRBM_GFX_INDEX_BASE_IDX 1
+#define mmVGT_GSVS_RING_SIZE 0x2241
+#define mmVGT_GSVS_RING_SIZE_BASE_IDX 1
+#define mmVGT_PRIMITIVE_TYPE 0x2242
+#define mmVGT_PRIMITIVE_TYPE_BASE_IDX 1
+#define mmVGT_INDEX_TYPE 0x2243
+#define mmVGT_INDEX_TYPE_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0 0x2244
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1 0x2245
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2 0x2246
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2_BASE_IDX 1
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3 0x2247
+#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3_BASE_IDX 1
+#define mmVGT_MAX_VTX_INDX 0x2248
+#define mmVGT_MAX_VTX_INDX_BASE_IDX 1
+#define mmVGT_MIN_VTX_INDX 0x2249
+#define mmVGT_MIN_VTX_INDX_BASE_IDX 1
+#define mmVGT_INDX_OFFSET 0x224a
+#define mmVGT_INDX_OFFSET_BASE_IDX 1
+#define mmVGT_MULTI_PRIM_IB_RESET_EN 0x224b
+#define mmVGT_MULTI_PRIM_IB_RESET_EN_BASE_IDX 1
+#define mmVGT_NUM_INDICES 0x224c
+#define mmVGT_NUM_INDICES_BASE_IDX 1
+#define mmVGT_NUM_INSTANCES 0x224d
+#define mmVGT_NUM_INSTANCES_BASE_IDX 1
+#define mmVGT_TF_RING_SIZE 0x224e
+#define mmVGT_TF_RING_SIZE_BASE_IDX 1
+#define mmVGT_HS_OFFCHIP_PARAM 0x224f
+#define mmVGT_HS_OFFCHIP_PARAM_BASE_IDX 1
+#define mmVGT_TF_MEMORY_BASE 0x2250
+#define mmVGT_TF_MEMORY_BASE_BASE_IDX 1
+#define mmVGT_TF_MEMORY_BASE_HI 0x2251
+#define mmVGT_TF_MEMORY_BASE_HI_BASE_IDX 1
+#define mmWD_POS_BUF_BASE 0x2252
+#define mmWD_POS_BUF_BASE_BASE_IDX 1
+#define mmWD_POS_BUF_BASE_HI 0x2253
+#define mmWD_POS_BUF_BASE_HI_BASE_IDX 1
+#define mmWD_CNTL_SB_BUF_BASE 0x2254
+#define mmWD_CNTL_SB_BUF_BASE_BASE_IDX 1
+#define mmWD_CNTL_SB_BUF_BASE_HI 0x2255
+#define mmWD_CNTL_SB_BUF_BASE_HI_BASE_IDX 1
+#define mmWD_INDEX_BUF_BASE 0x2256
+#define mmWD_INDEX_BUF_BASE_BASE_IDX 1
+#define mmWD_INDEX_BUF_BASE_HI 0x2257
+#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
+#define mmIA_MULTI_VGT_PARAM 0x2258
+#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
+#define mmVGT_INSTANCE_BASE_ID 0x225a
+#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
+#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
+#define mmPA_SU_LINE_STIPPLE_VALUE_BASE_IDX 1
+#define mmPA_SC_LINE_STIPPLE_STATE 0x2281
+#define mmPA_SC_LINE_STIPPLE_STATE_BASE_IDX 1
+#define mmPA_SC_SCREEN_EXTENT_MIN_0 0x2284
+#define mmPA_SC_SCREEN_EXTENT_MIN_0_BASE_IDX 1
+#define mmPA_SC_SCREEN_EXTENT_MAX_0 0x2285
+#define mmPA_SC_SCREEN_EXTENT_MAX_0_BASE_IDX 1
+#define mmPA_SC_SCREEN_EXTENT_MIN_1 0x2286
+#define mmPA_SC_SCREEN_EXTENT_MIN_1_BASE_IDX 1
+#define mmPA_SC_SCREEN_EXTENT_MAX_1 0x228b
+#define mmPA_SC_SCREEN_EXTENT_MAX_1_BASE_IDX 1
+#define mmPA_SC_P3D_TRAP_SCREEN_HV_EN 0x22a0
+#define mmPA_SC_P3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define mmPA_SC_P3D_TRAP_SCREEN_H 0x22a1
+#define mmPA_SC_P3D_TRAP_SCREEN_H_BASE_IDX 1
+#define mmPA_SC_P3D_TRAP_SCREEN_V 0x22a2
+#define mmPA_SC_P3D_TRAP_SCREEN_V_BASE_IDX 1
+#define mmPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0x22a3
+#define mmPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define mmPA_SC_P3D_TRAP_SCREEN_COUNT 0x22a4
+#define mmPA_SC_P3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define mmPA_SC_HP3D_TRAP_SCREEN_HV_EN 0x22a8
+#define mmPA_SC_HP3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define mmPA_SC_HP3D_TRAP_SCREEN_H 0x22a9
+#define mmPA_SC_HP3D_TRAP_SCREEN_H_BASE_IDX 1
+#define mmPA_SC_HP3D_TRAP_SCREEN_V 0x22aa
+#define mmPA_SC_HP3D_TRAP_SCREEN_V_BASE_IDX 1
+#define mmPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0x22ab
+#define mmPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define mmPA_SC_HP3D_TRAP_SCREEN_COUNT 0x22ac
+#define mmPA_SC_HP3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define mmPA_SC_TRAP_SCREEN_HV_EN 0x22b0
+#define mmPA_SC_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define mmPA_SC_TRAP_SCREEN_H 0x22b1
+#define mmPA_SC_TRAP_SCREEN_H_BASE_IDX 1
+#define mmPA_SC_TRAP_SCREEN_V 0x22b2
+#define mmPA_SC_TRAP_SCREEN_V_BASE_IDX 1
+#define mmPA_SC_TRAP_SCREEN_OCCURRENCE 0x22b3
+#define mmPA_SC_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define mmPA_SC_TRAP_SCREEN_COUNT 0x22b4
+#define mmPA_SC_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define mmPA_STATE_STEREO_X 0x22b5
+#define mmPA_STATE_STEREO_X_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_BASE 0x2330
+#define mmSQ_THREAD_TRACE_BASE_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_SIZE 0x2331
+#define mmSQ_THREAD_TRACE_SIZE_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_MASK 0x2332
+#define mmSQ_THREAD_TRACE_MASK_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_TOKEN_MASK 0x2333
+#define mmSQ_THREAD_TRACE_TOKEN_MASK_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_PERF_MASK 0x2334
+#define mmSQ_THREAD_TRACE_PERF_MASK_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_CTRL 0x2335
+#define mmSQ_THREAD_TRACE_CTRL_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_MODE 0x2336
+#define mmSQ_THREAD_TRACE_MODE_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_BASE2 0x2337
+#define mmSQ_THREAD_TRACE_BASE2_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_TOKEN_MASK2 0x2338
+#define mmSQ_THREAD_TRACE_TOKEN_MASK2_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_WPTR 0x2339
+#define mmSQ_THREAD_TRACE_WPTR_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_STATUS 0x233a
+#define mmSQ_THREAD_TRACE_STATUS_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_HIWATER 0x233b
+#define mmSQ_THREAD_TRACE_HIWATER_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_CNTR 0x233c
+#define mmSQ_THREAD_TRACE_CNTR_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_USERDATA_0 0x2340
+#define mmSQ_THREAD_TRACE_USERDATA_0_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_USERDATA_1 0x2341
+#define mmSQ_THREAD_TRACE_USERDATA_1_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_USERDATA_2 0x2342
+#define mmSQ_THREAD_TRACE_USERDATA_2_BASE_IDX 1
+#define mmSQ_THREAD_TRACE_USERDATA_3 0x2343
+#define mmSQ_THREAD_TRACE_USERDATA_3_BASE_IDX 1
+#define mmSQC_CACHES 0x2348
+#define mmSQC_CACHES_BASE_IDX 1
+#define mmSQC_WRITEBACK 0x2349
+#define mmSQC_WRITEBACK_BASE_IDX 1
+#define mmTA_CS_BC_BASE_ADDR 0x2380
+#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
+#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
+#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
+#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
+#define mmDB_OCCLUSION_COUNT0_HI_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT1_LOW 0x23c2
+#define mmDB_OCCLUSION_COUNT1_LOW_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT1_HI 0x23c3
+#define mmDB_OCCLUSION_COUNT1_HI_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT2_LOW 0x23c4
+#define mmDB_OCCLUSION_COUNT2_LOW_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT2_HI 0x23c5
+#define mmDB_OCCLUSION_COUNT2_HI_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT3_LOW 0x23c6
+#define mmDB_OCCLUSION_COUNT3_LOW_BASE_IDX 1
+#define mmDB_OCCLUSION_COUNT3_HI 0x23c7
+#define mmDB_OCCLUSION_COUNT3_HI_BASE_IDX 1
+#define mmDB_ZPASS_COUNT_LOW 0x23fe
+#define mmDB_ZPASS_COUNT_LOW_BASE_IDX 1
+#define mmDB_ZPASS_COUNT_HI 0x23ff
+#define mmDB_ZPASS_COUNT_HI_BASE_IDX 1
+#define mmGDS_RD_ADDR 0x2400
+#define mmGDS_RD_ADDR_BASE_IDX 1
+#define mmGDS_RD_DATA 0x2401
+#define mmGDS_RD_DATA_BASE_IDX 1
+#define mmGDS_RD_BURST_ADDR 0x2402
+#define mmGDS_RD_BURST_ADDR_BASE_IDX 1
+#define mmGDS_RD_BURST_COUNT 0x2403
+#define mmGDS_RD_BURST_COUNT_BASE_IDX 1
+#define mmGDS_RD_BURST_DATA 0x2404
+#define mmGDS_RD_BURST_DATA_BASE_IDX 1
+#define mmGDS_WR_ADDR 0x2405
+#define mmGDS_WR_ADDR_BASE_IDX 1
+#define mmGDS_WR_DATA 0x2406
+#define mmGDS_WR_DATA_BASE_IDX 1
+#define mmGDS_WR_BURST_ADDR 0x2407
+#define mmGDS_WR_BURST_ADDR_BASE_IDX 1
+#define mmGDS_WR_BURST_DATA 0x2408
+#define mmGDS_WR_BURST_DATA_BASE_IDX 1
+#define mmGDS_WRITE_COMPLETE 0x2409
+#define mmGDS_WRITE_COMPLETE_BASE_IDX 1
+#define mmGDS_ATOM_CNTL 0x240a
+#define mmGDS_ATOM_CNTL_BASE_IDX 1
+#define mmGDS_ATOM_COMPLETE 0x240b
+#define mmGDS_ATOM_COMPLETE_BASE_IDX 1
+#define mmGDS_ATOM_BASE 0x240c
+#define mmGDS_ATOM_BASE_BASE_IDX 1
+#define mmGDS_ATOM_SIZE 0x240d
+#define mmGDS_ATOM_SIZE_BASE_IDX 1
+#define mmGDS_ATOM_OFFSET0 0x240e
+#define mmGDS_ATOM_OFFSET0_BASE_IDX 1
+#define mmGDS_ATOM_OFFSET1 0x240f
+#define mmGDS_ATOM_OFFSET1_BASE_IDX 1
+#define mmGDS_ATOM_DST 0x2410
+#define mmGDS_ATOM_DST_BASE_IDX 1
+#define mmGDS_ATOM_OP 0x2411
+#define mmGDS_ATOM_OP_BASE_IDX 1
+#define mmGDS_ATOM_SRC0 0x2412
+#define mmGDS_ATOM_SRC0_BASE_IDX 1
+#define mmGDS_ATOM_SRC0_U 0x2413
+#define mmGDS_ATOM_SRC0_U_BASE_IDX 1
+#define mmGDS_ATOM_SRC1 0x2414
+#define mmGDS_ATOM_SRC1_BASE_IDX 1
+#define mmGDS_ATOM_SRC1_U 0x2415
+#define mmGDS_ATOM_SRC1_U_BASE_IDX 1
+#define mmGDS_ATOM_READ0 0x2416
+#define mmGDS_ATOM_READ0_BASE_IDX 1
+#define mmGDS_ATOM_READ0_U 0x2417
+#define mmGDS_ATOM_READ0_U_BASE_IDX 1
+#define mmGDS_ATOM_READ1 0x2418
+#define mmGDS_ATOM_READ1_BASE_IDX 1
+#define mmGDS_ATOM_READ1_U 0x2419
+#define mmGDS_ATOM_READ1_U_BASE_IDX 1
+#define mmGDS_GWS_RESOURCE_CNTL 0x241a
+#define mmGDS_GWS_RESOURCE_CNTL_BASE_IDX 1
+#define mmGDS_GWS_RESOURCE 0x241b
+#define mmGDS_GWS_RESOURCE_BASE_IDX 1
+#define mmGDS_GWS_RESOURCE_CNT 0x241c
+#define mmGDS_GWS_RESOURCE_CNT_BASE_IDX 1
+#define mmGDS_OA_CNTL 0x241d
+#define mmGDS_OA_CNTL_BASE_IDX 1
+#define mmGDS_OA_COUNTER 0x241e
+#define mmGDS_OA_COUNTER_BASE_IDX 1
+#define mmGDS_OA_ADDRESS 0x241f
+#define mmGDS_OA_ADDRESS_BASE_IDX 1
+#define mmGDS_OA_INCDEC 0x2420
+#define mmGDS_OA_INCDEC_BASE_IDX 1
+#define mmGDS_OA_RING_SIZE 0x2421
+#define mmGDS_OA_RING_SIZE_BASE_IDX 1
+#define mmSPI_CONFIG_CNTL 0x2440
+#define mmSPI_CONFIG_CNTL_BASE_IDX 1
+#define mmSPI_CONFIG_CNTL_1 0x2441
+#define mmSPI_CONFIG_CNTL_1_BASE_IDX 1
+#define mmSPI_CONFIG_CNTL_2 0x2442
+#define mmSPI_CONFIG_CNTL_2_BASE_IDX 1
+#define mmSPI_WAVE_LIMIT_CNTL 0x2443
+#define mmSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_perfddec
+// base address: 0x34000
+#define mmCPG_PERFCOUNTER1_LO 0x3000
+#define mmCPG_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmCPG_PERFCOUNTER1_HI 0x3001
+#define mmCPG_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmCPG_PERFCOUNTER0_LO 0x3002
+#define mmCPG_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmCPG_PERFCOUNTER0_HI 0x3003
+#define mmCPG_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmCPC_PERFCOUNTER1_LO 0x3004
+#define mmCPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmCPC_PERFCOUNTER1_HI 0x3005
+#define mmCPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmCPC_PERFCOUNTER0_LO 0x3006
+#define mmCPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmCPC_PERFCOUNTER0_HI 0x3007
+#define mmCPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmCPF_PERFCOUNTER1_LO 0x3008
+#define mmCPF_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmCPF_PERFCOUNTER1_HI 0x3009
+#define mmCPF_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmCPF_PERFCOUNTER0_LO 0x300a
+#define mmCPF_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmCPF_PERFCOUNTER0_HI 0x300b
+#define mmCPF_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmCPF_LATENCY_STATS_DATA 0x300c
+#define mmCPF_LATENCY_STATS_DATA_BASE_IDX 1
+#define mmCPG_LATENCY_STATS_DATA 0x300d
+#define mmCPG_LATENCY_STATS_DATA_BASE_IDX 1
+#define mmCPC_LATENCY_STATS_DATA 0x300e
+#define mmCPC_LATENCY_STATS_DATA_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER0_LO 0x3040
+#define mmGRBM_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER0_HI 0x3041
+#define mmGRBM_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER1_LO 0x3043
+#define mmGRBM_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER1_HI 0x3044
+#define mmGRBM_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmGRBM_SE0_PERFCOUNTER_LO 0x3045
+#define mmGRBM_SE0_PERFCOUNTER_LO_BASE_IDX 1
+#define mmGRBM_SE0_PERFCOUNTER_HI 0x3046
+#define mmGRBM_SE0_PERFCOUNTER_HI_BASE_IDX 1
+#define mmGRBM_SE1_PERFCOUNTER_LO 0x3047
+#define mmGRBM_SE1_PERFCOUNTER_LO_BASE_IDX 1
+#define mmGRBM_SE1_PERFCOUNTER_HI 0x3048
+#define mmGRBM_SE1_PERFCOUNTER_HI_BASE_IDX 1
+#define mmGRBM_SE2_PERFCOUNTER_LO 0x3049
+#define mmGRBM_SE2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmGRBM_SE2_PERFCOUNTER_HI 0x304a
+#define mmGRBM_SE2_PERFCOUNTER_HI_BASE_IDX 1
+#define mmGRBM_SE3_PERFCOUNTER_LO 0x304b
+#define mmGRBM_SE3_PERFCOUNTER_LO_BASE_IDX 1
+#define mmGRBM_SE3_PERFCOUNTER_HI 0x304c
+#define mmGRBM_SE3_PERFCOUNTER_HI_BASE_IDX 1
+#define mmWD_PERFCOUNTER0_LO 0x3080
+#define mmWD_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmWD_PERFCOUNTER0_HI 0x3081
+#define mmWD_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmWD_PERFCOUNTER1_LO 0x3082
+#define mmWD_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmWD_PERFCOUNTER1_HI 0x3083
+#define mmWD_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmWD_PERFCOUNTER2_LO 0x3084
+#define mmWD_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmWD_PERFCOUNTER2_HI 0x3085
+#define mmWD_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmWD_PERFCOUNTER3_LO 0x3086
+#define mmWD_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmWD_PERFCOUNTER3_HI 0x3087
+#define mmWD_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmIA_PERFCOUNTER0_LO 0x3088
+#define mmIA_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmIA_PERFCOUNTER0_HI 0x3089
+#define mmIA_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmIA_PERFCOUNTER1_LO 0x308a
+#define mmIA_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmIA_PERFCOUNTER1_HI 0x308b
+#define mmIA_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmIA_PERFCOUNTER2_LO 0x308c
+#define mmIA_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmIA_PERFCOUNTER2_HI 0x308d
+#define mmIA_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmIA_PERFCOUNTER3_LO 0x308e
+#define mmIA_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmIA_PERFCOUNTER3_HI 0x308f
+#define mmIA_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmVGT_PERFCOUNTER0_LO 0x3090
+#define mmVGT_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmVGT_PERFCOUNTER0_HI 0x3091
+#define mmVGT_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmVGT_PERFCOUNTER1_LO 0x3092
+#define mmVGT_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmVGT_PERFCOUNTER1_HI 0x3093
+#define mmVGT_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmVGT_PERFCOUNTER2_LO 0x3094
+#define mmVGT_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmVGT_PERFCOUNTER2_HI 0x3095
+#define mmVGT_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmVGT_PERFCOUNTER3_LO 0x3096
+#define mmVGT_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmVGT_PERFCOUNTER3_HI 0x3097
+#define mmVGT_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER0_LO 0x3100
+#define mmPA_SU_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER0_HI 0x3101
+#define mmPA_SU_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER1_LO 0x3102
+#define mmPA_SU_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER1_HI 0x3103
+#define mmPA_SU_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER2_LO 0x3104
+#define mmPA_SU_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER2_HI 0x3105
+#define mmPA_SU_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER3_LO 0x3106
+#define mmPA_SU_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER3_HI 0x3107
+#define mmPA_SU_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER0_LO 0x3140
+#define mmPA_SC_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER0_HI 0x3141
+#define mmPA_SC_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER1_LO 0x3142
+#define mmPA_SC_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER1_HI 0x3143
+#define mmPA_SC_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER2_LO 0x3144
+#define mmPA_SC_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER2_HI 0x3145
+#define mmPA_SC_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER3_LO 0x3146
+#define mmPA_SC_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER3_HI 0x3147
+#define mmPA_SC_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER4_LO 0x3148
+#define mmPA_SC_PERFCOUNTER4_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER4_HI 0x3149
+#define mmPA_SC_PERFCOUNTER4_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER5_LO 0x314a
+#define mmPA_SC_PERFCOUNTER5_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER5_HI 0x314b
+#define mmPA_SC_PERFCOUNTER5_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER6_LO 0x314c
+#define mmPA_SC_PERFCOUNTER6_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER6_HI 0x314d
+#define mmPA_SC_PERFCOUNTER6_HI_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER7_LO 0x314e
+#define mmPA_SC_PERFCOUNTER7_LO_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER7_HI 0x314f
+#define mmPA_SC_PERFCOUNTER7_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER0_HI 0x3180
+#define mmSPI_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER0_LO 0x3181
+#define mmSPI_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmSPI_PERFCOUNTER1_HI 0x3182
+#define mmSPI_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER1_LO 0x3183
+#define mmSPI_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmSPI_PERFCOUNTER2_HI 0x3184
+#define mmSPI_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER2_LO 0x3185
+#define mmSPI_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmSPI_PERFCOUNTER3_HI 0x3186
+#define mmSPI_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER3_LO 0x3187
+#define mmSPI_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmSPI_PERFCOUNTER4_HI 0x3188
+#define mmSPI_PERFCOUNTER4_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER4_LO 0x3189
+#define mmSPI_PERFCOUNTER4_LO_BASE_IDX 1
+#define mmSPI_PERFCOUNTER5_HI 0x318a
+#define mmSPI_PERFCOUNTER5_HI_BASE_IDX 1
+#define mmSPI_PERFCOUNTER5_LO 0x318b
+#define mmSPI_PERFCOUNTER5_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER0_LO 0x31c0
+#define mmSQ_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER0_HI 0x31c1
+#define mmSQ_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER1_LO 0x31c2
+#define mmSQ_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER1_HI 0x31c3
+#define mmSQ_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER2_LO 0x31c4
+#define mmSQ_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER2_HI 0x31c5
+#define mmSQ_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER3_LO 0x31c6
+#define mmSQ_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER3_HI 0x31c7
+#define mmSQ_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER4_LO 0x31c8
+#define mmSQ_PERFCOUNTER4_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER4_HI 0x31c9
+#define mmSQ_PERFCOUNTER4_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER5_LO 0x31ca
+#define mmSQ_PERFCOUNTER5_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER5_HI 0x31cb
+#define mmSQ_PERFCOUNTER5_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER6_LO 0x31cc
+#define mmSQ_PERFCOUNTER6_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER6_HI 0x31cd
+#define mmSQ_PERFCOUNTER6_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER7_LO 0x31ce
+#define mmSQ_PERFCOUNTER7_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER7_HI 0x31cf
+#define mmSQ_PERFCOUNTER7_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER8_LO 0x31d0
+#define mmSQ_PERFCOUNTER8_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER8_HI 0x31d1
+#define mmSQ_PERFCOUNTER8_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER9_LO 0x31d2
+#define mmSQ_PERFCOUNTER9_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER9_HI 0x31d3
+#define mmSQ_PERFCOUNTER9_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER10_LO 0x31d4
+#define mmSQ_PERFCOUNTER10_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER10_HI 0x31d5
+#define mmSQ_PERFCOUNTER10_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER11_LO 0x31d6
+#define mmSQ_PERFCOUNTER11_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER11_HI 0x31d7
+#define mmSQ_PERFCOUNTER11_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER12_LO 0x31d8
+#define mmSQ_PERFCOUNTER12_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER12_HI 0x31d9
+#define mmSQ_PERFCOUNTER12_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER13_LO 0x31da
+#define mmSQ_PERFCOUNTER13_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER13_HI 0x31db
+#define mmSQ_PERFCOUNTER13_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER14_LO 0x31dc
+#define mmSQ_PERFCOUNTER14_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER14_HI 0x31dd
+#define mmSQ_PERFCOUNTER14_HI_BASE_IDX 1
+#define mmSQ_PERFCOUNTER15_LO 0x31de
+#define mmSQ_PERFCOUNTER15_LO_BASE_IDX 1
+#define mmSQ_PERFCOUNTER15_HI 0x31df
+#define mmSQ_PERFCOUNTER15_HI_BASE_IDX 1
+#define mmSX_PERFCOUNTER0_LO 0x3240
+#define mmSX_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmSX_PERFCOUNTER0_HI 0x3241
+#define mmSX_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmSX_PERFCOUNTER1_LO 0x3242
+#define mmSX_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmSX_PERFCOUNTER1_HI 0x3243
+#define mmSX_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmSX_PERFCOUNTER2_LO 0x3244
+#define mmSX_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmSX_PERFCOUNTER2_HI 0x3245
+#define mmSX_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmSX_PERFCOUNTER3_LO 0x3246
+#define mmSX_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmSX_PERFCOUNTER3_HI 0x3247
+#define mmSX_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmGDS_PERFCOUNTER0_LO 0x3280
+#define mmGDS_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmGDS_PERFCOUNTER0_HI 0x3281
+#define mmGDS_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmGDS_PERFCOUNTER1_LO 0x3282
+#define mmGDS_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmGDS_PERFCOUNTER1_HI 0x3283
+#define mmGDS_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmGDS_PERFCOUNTER2_LO 0x3284
+#define mmGDS_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmGDS_PERFCOUNTER2_HI 0x3285
+#define mmGDS_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmGDS_PERFCOUNTER3_LO 0x3286
+#define mmGDS_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmGDS_PERFCOUNTER3_HI 0x3287
+#define mmGDS_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmTA_PERFCOUNTER0_LO 0x32c0
+#define mmTA_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmTA_PERFCOUNTER0_HI 0x32c1
+#define mmTA_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmTA_PERFCOUNTER1_LO 0x32c2
+#define mmTA_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmTA_PERFCOUNTER1_HI 0x32c3
+#define mmTA_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmTD_PERFCOUNTER0_LO 0x3300
+#define mmTD_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmTD_PERFCOUNTER0_HI 0x3301
+#define mmTD_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmTD_PERFCOUNTER1_LO 0x3302
+#define mmTD_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmTD_PERFCOUNTER1_HI 0x3303
+#define mmTD_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmTCP_PERFCOUNTER0_LO 0x3340
+#define mmTCP_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmTCP_PERFCOUNTER0_HI 0x3341
+#define mmTCP_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmTCP_PERFCOUNTER1_LO 0x3342
+#define mmTCP_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmTCP_PERFCOUNTER1_HI 0x3343
+#define mmTCP_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmTCP_PERFCOUNTER2_LO 0x3344
+#define mmTCP_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmTCP_PERFCOUNTER2_HI 0x3345
+#define mmTCP_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmTCP_PERFCOUNTER3_LO 0x3346
+#define mmTCP_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmTCP_PERFCOUNTER3_HI 0x3347
+#define mmTCP_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmTCC_PERFCOUNTER0_LO 0x3380
+#define mmTCC_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmTCC_PERFCOUNTER0_HI 0x3381
+#define mmTCC_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmTCC_PERFCOUNTER1_LO 0x3382
+#define mmTCC_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmTCC_PERFCOUNTER1_HI 0x3383
+#define mmTCC_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmTCC_PERFCOUNTER2_LO 0x3384
+#define mmTCC_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmTCC_PERFCOUNTER2_HI 0x3385
+#define mmTCC_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmTCC_PERFCOUNTER3_LO 0x3386
+#define mmTCC_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmTCC_PERFCOUNTER3_HI 0x3387
+#define mmTCC_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmTCA_PERFCOUNTER0_LO 0x3390
+#define mmTCA_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmTCA_PERFCOUNTER0_HI 0x3391
+#define mmTCA_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmTCA_PERFCOUNTER1_LO 0x3392
+#define mmTCA_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmTCA_PERFCOUNTER1_HI 0x3393
+#define mmTCA_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmTCA_PERFCOUNTER2_LO 0x3394
+#define mmTCA_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmTCA_PERFCOUNTER2_HI 0x3395
+#define mmTCA_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmTCA_PERFCOUNTER3_LO 0x3396
+#define mmTCA_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmTCA_PERFCOUNTER3_HI 0x3397
+#define mmTCA_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmCB_PERFCOUNTER0_LO 0x3406
+#define mmCB_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmCB_PERFCOUNTER0_HI 0x3407
+#define mmCB_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmCB_PERFCOUNTER1_LO 0x3408
+#define mmCB_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmCB_PERFCOUNTER1_HI 0x3409
+#define mmCB_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmCB_PERFCOUNTER2_LO 0x340a
+#define mmCB_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmCB_PERFCOUNTER2_HI 0x340b
+#define mmCB_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmCB_PERFCOUNTER3_LO 0x340c
+#define mmCB_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmCB_PERFCOUNTER3_HI 0x340d
+#define mmCB_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmDB_PERFCOUNTER0_LO 0x3440
+#define mmDB_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmDB_PERFCOUNTER0_HI 0x3441
+#define mmDB_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmDB_PERFCOUNTER1_LO 0x3442
+#define mmDB_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmDB_PERFCOUNTER1_HI 0x3443
+#define mmDB_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmDB_PERFCOUNTER2_LO 0x3444
+#define mmDB_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmDB_PERFCOUNTER2_HI 0x3445
+#define mmDB_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmDB_PERFCOUNTER3_LO 0x3446
+#define mmDB_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmDB_PERFCOUNTER3_HI 0x3447
+#define mmDB_PERFCOUNTER3_HI_BASE_IDX 1
+#define mmRLC_PERFCOUNTER0_LO 0x3480
+#define mmRLC_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmRLC_PERFCOUNTER0_HI 0x3481
+#define mmRLC_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmRLC_PERFCOUNTER1_LO 0x3482
+#define mmRLC_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmRLC_PERFCOUNTER1_HI 0x3483
+#define mmRLC_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmRMI_PERFCOUNTER0_LO 0x34c0
+#define mmRMI_PERFCOUNTER0_LO_BASE_IDX 1
+#define mmRMI_PERFCOUNTER0_HI 0x34c1
+#define mmRMI_PERFCOUNTER0_HI_BASE_IDX 1
+#define mmRMI_PERFCOUNTER1_LO 0x34c2
+#define mmRMI_PERFCOUNTER1_LO_BASE_IDX 1
+#define mmRMI_PERFCOUNTER1_HI 0x34c3
+#define mmRMI_PERFCOUNTER1_HI_BASE_IDX 1
+#define mmRMI_PERFCOUNTER2_LO 0x34c4
+#define mmRMI_PERFCOUNTER2_LO_BASE_IDX 1
+#define mmRMI_PERFCOUNTER2_HI 0x34c5
+#define mmRMI_PERFCOUNTER2_HI_BASE_IDX 1
+#define mmRMI_PERFCOUNTER3_LO 0x34c6
+#define mmRMI_PERFCOUNTER3_LO_BASE_IDX 1
+#define mmRMI_PERFCOUNTER3_HI 0x34c7
+#define mmRMI_PERFCOUNTER3_HI_BASE_IDX 1
+
+
+// addressBlock: gc_utcl2_atcl2pfcntrdec
+// base address: 0x35400
+#define mmATC_L2_PERFCOUNTER_LO 0x3500
+#define mmATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmATC_L2_PERFCOUNTER_HI 0x3501
+#define mmATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_utcl2_vml2prdec
+// base address: 0x35420
+#define mmMC_VM_L2_PERFCOUNTER_LO 0x3508
+#define mmMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER_HI 0x3509
+#define mmMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_perfsdec
+// base address: 0x36000
+#define mmCPG_PERFCOUNTER1_SELECT 0x3800
+#define mmCPG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmCPG_PERFCOUNTER0_SELECT1 0x3801
+#define mmCPG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmCPG_PERFCOUNTER0_SELECT 0x3802
+#define mmCPG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmCPC_PERFCOUNTER1_SELECT 0x3803
+#define mmCPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmCPC_PERFCOUNTER0_SELECT1 0x3804
+#define mmCPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmCPF_PERFCOUNTER1_SELECT 0x3805
+#define mmCPF_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmCPF_PERFCOUNTER0_SELECT1 0x3806
+#define mmCPF_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmCPF_PERFCOUNTER0_SELECT 0x3807
+#define mmCPF_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmCP_PERFMON_CNTL 0x3808
+#define mmCP_PERFMON_CNTL_BASE_IDX 1
+#define mmCPC_PERFCOUNTER0_SELECT 0x3809
+#define mmCPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmCPF_TC_PERF_COUNTER_WINDOW_SELECT 0x380a
+#define mmCPF_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define mmCPG_TC_PERF_COUNTER_WINDOW_SELECT 0x380b
+#define mmCPG_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define mmCPF_LATENCY_STATS_SELECT 0x380c
+#define mmCPF_LATENCY_STATS_SELECT_BASE_IDX 1
+#define mmCPG_LATENCY_STATS_SELECT 0x380d
+#define mmCPG_LATENCY_STATS_SELECT_BASE_IDX 1
+#define mmCPC_LATENCY_STATS_SELECT 0x380e
+#define mmCPC_LATENCY_STATS_SELECT_BASE_IDX 1
+#define mmCP_DRAW_OBJECT 0x3810
+#define mmCP_DRAW_OBJECT_BASE_IDX 1
+#define mmCP_DRAW_OBJECT_COUNTER 0x3811
+#define mmCP_DRAW_OBJECT_COUNTER_BASE_IDX 1
+#define mmCP_DRAW_WINDOW_MASK_HI 0x3812
+#define mmCP_DRAW_WINDOW_MASK_HI_BASE_IDX 1
+#define mmCP_DRAW_WINDOW_HI 0x3813
+#define mmCP_DRAW_WINDOW_HI_BASE_IDX 1
+#define mmCP_DRAW_WINDOW_LO 0x3814
+#define mmCP_DRAW_WINDOW_LO_BASE_IDX 1
+#define mmCP_DRAW_WINDOW_CNTL 0x3815
+#define mmCP_DRAW_WINDOW_CNTL_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER0_SELECT 0x3840
+#define mmGRBM_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmGRBM_PERFCOUNTER1_SELECT 0x3841
+#define mmGRBM_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmGRBM_SE0_PERFCOUNTER_SELECT 0x3842
+#define mmGRBM_SE0_PERFCOUNTER_SELECT_BASE_IDX 1
+#define mmGRBM_SE1_PERFCOUNTER_SELECT 0x3843
+#define mmGRBM_SE1_PERFCOUNTER_SELECT_BASE_IDX 1
+#define mmGRBM_SE2_PERFCOUNTER_SELECT 0x3844
+#define mmGRBM_SE2_PERFCOUNTER_SELECT_BASE_IDX 1
+#define mmGRBM_SE3_PERFCOUNTER_SELECT 0x3845
+#define mmGRBM_SE3_PERFCOUNTER_SELECT_BASE_IDX 1
+#define mmWD_PERFCOUNTER0_SELECT 0x3880
+#define mmWD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmWD_PERFCOUNTER1_SELECT 0x3881
+#define mmWD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmWD_PERFCOUNTER2_SELECT 0x3882
+#define mmWD_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmWD_PERFCOUNTER3_SELECT 0x3883
+#define mmWD_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmIA_PERFCOUNTER0_SELECT 0x3884
+#define mmIA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmIA_PERFCOUNTER1_SELECT 0x3885
+#define mmIA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmIA_PERFCOUNTER2_SELECT 0x3886
+#define mmIA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmIA_PERFCOUNTER3_SELECT 0x3887
+#define mmIA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmIA_PERFCOUNTER0_SELECT1 0x3888
+#define mmIA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmVGT_PERFCOUNTER0_SELECT 0x388c
+#define mmVGT_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmVGT_PERFCOUNTER1_SELECT 0x388d
+#define mmVGT_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmVGT_PERFCOUNTER2_SELECT 0x388e
+#define mmVGT_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmVGT_PERFCOUNTER3_SELECT 0x388f
+#define mmVGT_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmVGT_PERFCOUNTER0_SELECT1 0x3890
+#define mmVGT_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmVGT_PERFCOUNTER1_SELECT1 0x3891
+#define mmVGT_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmVGT_PERFCOUNTER_SEID_MASK 0x3894
+#define mmVGT_PERFCOUNTER_SEID_MASK_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER0_SELECT 0x3900
+#define mmPA_SU_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER0_SELECT1 0x3901
+#define mmPA_SU_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER1_SELECT 0x3902
+#define mmPA_SU_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER1_SELECT1 0x3903
+#define mmPA_SU_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER2_SELECT 0x3904
+#define mmPA_SU_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmPA_SU_PERFCOUNTER3_SELECT 0x3905
+#define mmPA_SU_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER0_SELECT 0x3940
+#define mmPA_SC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER0_SELECT1 0x3941
+#define mmPA_SC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER1_SELECT 0x3942
+#define mmPA_SC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER2_SELECT 0x3943
+#define mmPA_SC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER3_SELECT 0x3944
+#define mmPA_SC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER4_SELECT 0x3945
+#define mmPA_SC_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER5_SELECT 0x3946
+#define mmPA_SC_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER6_SELECT 0x3947
+#define mmPA_SC_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define mmPA_SC_PERFCOUNTER7_SELECT 0x3948
+#define mmPA_SC_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER0_SELECT 0x3980
+#define mmSPI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER1_SELECT 0x3981
+#define mmSPI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER2_SELECT 0x3982
+#define mmSPI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER3_SELECT 0x3983
+#define mmSPI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER0_SELECT1 0x3984
+#define mmSPI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmSPI_PERFCOUNTER1_SELECT1 0x3985
+#define mmSPI_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmSPI_PERFCOUNTER2_SELECT1 0x3986
+#define mmSPI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define mmSPI_PERFCOUNTER3_SELECT1 0x3987
+#define mmSPI_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define mmSPI_PERFCOUNTER4_SELECT 0x3988
+#define mmSPI_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER5_SELECT 0x3989
+#define mmSPI_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define mmSPI_PERFCOUNTER_BINS 0x398a
+#define mmSPI_PERFCOUNTER_BINS_BASE_IDX 1
+#define mmSQ_PERFCOUNTER0_SELECT 0x39c0
+#define mmSQ_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER1_SELECT 0x39c1
+#define mmSQ_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER2_SELECT 0x39c2
+#define mmSQ_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER3_SELECT 0x39c3
+#define mmSQ_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER4_SELECT 0x39c4
+#define mmSQ_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER5_SELECT 0x39c5
+#define mmSQ_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER6_SELECT 0x39c6
+#define mmSQ_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER7_SELECT 0x39c7
+#define mmSQ_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER8_SELECT 0x39c8
+#define mmSQ_PERFCOUNTER8_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER9_SELECT 0x39c9
+#define mmSQ_PERFCOUNTER9_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER10_SELECT 0x39ca
+#define mmSQ_PERFCOUNTER10_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER11_SELECT 0x39cb
+#define mmSQ_PERFCOUNTER11_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER12_SELECT 0x39cc
+#define mmSQ_PERFCOUNTER12_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER13_SELECT 0x39cd
+#define mmSQ_PERFCOUNTER13_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER14_SELECT 0x39ce
+#define mmSQ_PERFCOUNTER14_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER15_SELECT 0x39cf
+#define mmSQ_PERFCOUNTER15_SELECT_BASE_IDX 1
+#define mmSQ_PERFCOUNTER_CTRL 0x39e0
+#define mmSQ_PERFCOUNTER_CTRL_BASE_IDX 1
+#define mmSQ_PERFCOUNTER_MASK 0x39e1
+#define mmSQ_PERFCOUNTER_MASK_BASE_IDX 1
+#define mmSQ_PERFCOUNTER_CTRL2 0x39e2
+#define mmSQ_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define mmSX_PERFCOUNTER0_SELECT 0x3a40
+#define mmSX_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmSX_PERFCOUNTER1_SELECT 0x3a41
+#define mmSX_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmSX_PERFCOUNTER2_SELECT 0x3a42
+#define mmSX_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmSX_PERFCOUNTER3_SELECT 0x3a43
+#define mmSX_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmSX_PERFCOUNTER0_SELECT1 0x3a44
+#define mmSX_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmSX_PERFCOUNTER1_SELECT1 0x3a45
+#define mmSX_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmGDS_PERFCOUNTER0_SELECT 0x3a80
+#define mmGDS_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmGDS_PERFCOUNTER1_SELECT 0x3a81
+#define mmGDS_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmGDS_PERFCOUNTER2_SELECT 0x3a82
+#define mmGDS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmGDS_PERFCOUNTER3_SELECT 0x3a83
+#define mmGDS_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmGDS_PERFCOUNTER0_SELECT1 0x3a84
+#define mmGDS_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTA_PERFCOUNTER0_SELECT 0x3ac0
+#define mmTA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmTA_PERFCOUNTER0_SELECT1 0x3ac1
+#define mmTA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTA_PERFCOUNTER1_SELECT 0x3ac2
+#define mmTA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmTD_PERFCOUNTER0_SELECT 0x3b00
+#define mmTD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmTD_PERFCOUNTER0_SELECT1 0x3b01
+#define mmTD_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTD_PERFCOUNTER1_SELECT 0x3b02
+#define mmTD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmTCP_PERFCOUNTER0_SELECT 0x3b40
+#define mmTCP_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmTCP_PERFCOUNTER0_SELECT1 0x3b41
+#define mmTCP_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTCP_PERFCOUNTER1_SELECT 0x3b42
+#define mmTCP_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmTCP_PERFCOUNTER1_SELECT1 0x3b43
+#define mmTCP_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmTCP_PERFCOUNTER2_SELECT 0x3b44
+#define mmTCP_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmTCP_PERFCOUNTER3_SELECT 0x3b45
+#define mmTCP_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmTCC_PERFCOUNTER0_SELECT 0x3b80
+#define mmTCC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmTCC_PERFCOUNTER0_SELECT1 0x3b81
+#define mmTCC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTCC_PERFCOUNTER1_SELECT 0x3b82
+#define mmTCC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmTCC_PERFCOUNTER1_SELECT1 0x3b83
+#define mmTCC_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmTCC_PERFCOUNTER2_SELECT 0x3b84
+#define mmTCC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmTCC_PERFCOUNTER3_SELECT 0x3b85
+#define mmTCC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmTCA_PERFCOUNTER0_SELECT 0x3b90
+#define mmTCA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmTCA_PERFCOUNTER0_SELECT1 0x3b91
+#define mmTCA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmTCA_PERFCOUNTER1_SELECT 0x3b92
+#define mmTCA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmTCA_PERFCOUNTER1_SELECT1 0x3b93
+#define mmTCA_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmTCA_PERFCOUNTER2_SELECT 0x3b94
+#define mmTCA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmTCA_PERFCOUNTER3_SELECT 0x3b95
+#define mmTCA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmCB_PERFCOUNTER_FILTER 0x3c00
+#define mmCB_PERFCOUNTER_FILTER_BASE_IDX 1
+#define mmCB_PERFCOUNTER0_SELECT 0x3c01
+#define mmCB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmCB_PERFCOUNTER0_SELECT1 0x3c02
+#define mmCB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmCB_PERFCOUNTER1_SELECT 0x3c03
+#define mmCB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmCB_PERFCOUNTER2_SELECT 0x3c04
+#define mmCB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmCB_PERFCOUNTER3_SELECT 0x3c05
+#define mmCB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmDB_PERFCOUNTER0_SELECT 0x3c40
+#define mmDB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmDB_PERFCOUNTER0_SELECT1 0x3c41
+#define mmDB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmDB_PERFCOUNTER1_SELECT 0x3c42
+#define mmDB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmDB_PERFCOUNTER1_SELECT1 0x3c43
+#define mmDB_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define mmDB_PERFCOUNTER2_SELECT 0x3c44
+#define mmDB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmDB_PERFCOUNTER3_SELECT 0x3c46
+#define mmDB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_CNTL 0x3c80
+#define mmRLC_SPM_PERFMON_CNTL_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_RING_BASE_LO 0x3c81
+#define mmRLC_SPM_PERFMON_RING_BASE_LO_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_RING_BASE_HI 0x3c82
+#define mmRLC_SPM_PERFMON_RING_BASE_HI_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_RING_SIZE 0x3c83
+#define mmRLC_SPM_PERFMON_RING_SIZE_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_SEGMENT_SIZE 0x3c84
+#define mmRLC_SPM_PERFMON_SEGMENT_SIZE_BASE_IDX 1
+#define mmRLC_SPM_SE_MUXSEL_ADDR 0x3c85
+#define mmRLC_SPM_SE_MUXSEL_ADDR_BASE_IDX 1
+#define mmRLC_SPM_SE_MUXSEL_DATA 0x3c86
+#define mmRLC_SPM_SE_MUXSEL_DATA_BASE_IDX 1
+#define mmRLC_SPM_CPG_PERFMON_SAMPLE_DELAY 0x3c87
+#define mmRLC_SPM_CPG_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_CPC_PERFMON_SAMPLE_DELAY 0x3c88
+#define mmRLC_SPM_CPC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_CPF_PERFMON_SAMPLE_DELAY 0x3c89
+#define mmRLC_SPM_CPF_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_CB_PERFMON_SAMPLE_DELAY 0x3c8a
+#define mmRLC_SPM_CB_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_DB_PERFMON_SAMPLE_DELAY 0x3c8b
+#define mmRLC_SPM_DB_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_PA_PERFMON_SAMPLE_DELAY 0x3c8c
+#define mmRLC_SPM_PA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_GDS_PERFMON_SAMPLE_DELAY 0x3c8d
+#define mmRLC_SPM_GDS_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_IA_PERFMON_SAMPLE_DELAY 0x3c8e
+#define mmRLC_SPM_IA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_SC_PERFMON_SAMPLE_DELAY 0x3c90
+#define mmRLC_SPM_SC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_TCC_PERFMON_SAMPLE_DELAY 0x3c91
+#define mmRLC_SPM_TCC_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_TCA_PERFMON_SAMPLE_DELAY 0x3c92
+#define mmRLC_SPM_TCA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_TCP_PERFMON_SAMPLE_DELAY 0x3c93
+#define mmRLC_SPM_TCP_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_TA_PERFMON_SAMPLE_DELAY 0x3c94
+#define mmRLC_SPM_TA_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_TD_PERFMON_SAMPLE_DELAY 0x3c95
+#define mmRLC_SPM_TD_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_VGT_PERFMON_SAMPLE_DELAY 0x3c96
+#define mmRLC_SPM_VGT_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_SPI_PERFMON_SAMPLE_DELAY 0x3c97
+#define mmRLC_SPM_SPI_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_SQG_PERFMON_SAMPLE_DELAY 0x3c98
+#define mmRLC_SPM_SQG_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_SX_PERFMON_SAMPLE_DELAY 0x3c9a
+#define mmRLC_SPM_SX_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_GLOBAL_MUXSEL_ADDR 0x3c9b
+#define mmRLC_SPM_GLOBAL_MUXSEL_ADDR_BASE_IDX 1
+#define mmRLC_SPM_GLOBAL_MUXSEL_DATA 0x3c9c
+#define mmRLC_SPM_GLOBAL_MUXSEL_DATA_BASE_IDX 1
+#define mmRLC_SPM_RING_RDPTR 0x3c9d
+#define mmRLC_SPM_RING_RDPTR_BASE_IDX 1
+#define mmRLC_SPM_SEGMENT_THRESHOLD 0x3c9e
+#define mmRLC_SPM_SEGMENT_THRESHOLD_BASE_IDX 1
+#define mmRLC_SPM_RMI_PERFMON_SAMPLE_DELAY 0x3ca3
+#define mmRLC_SPM_RMI_PERFMON_SAMPLE_DELAY_BASE_IDX 1
+#define mmRLC_SPM_PERFMON_SAMPLE_DELAY_MAX 0x3ca4
+#define mmRLC_SPM_PERFMON_SAMPLE_DELAY_MAX_BASE_IDX 1
+#define mmRLC_PERFMON_CLK_CNTL_UCODE 0x3cbe
+#define mmRLC_PERFMON_CLK_CNTL_UCODE_BASE_IDX 1
+#define mmRLC_PERFMON_CLK_CNTL 0x3cbf
+#define mmRLC_PERFMON_CLK_CNTL_BASE_IDX 1
+#define mmRLC_PERFMON_CNTL 0x3cc0
+#define mmRLC_PERFMON_CNTL_BASE_IDX 1
+#define mmRLC_PERFCOUNTER0_SELECT 0x3cc1
+#define mmRLC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmRLC_PERFCOUNTER1_SELECT 0x3cc2
+#define mmRLC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmRLC_GPU_IOV_PERF_CNT_CNTL 0x3cc3
+#define mmRLC_GPU_IOV_PERF_CNT_CNTL_BASE_IDX 1
+#define mmRLC_GPU_IOV_PERF_CNT_WR_ADDR 0x3cc4
+#define mmRLC_GPU_IOV_PERF_CNT_WR_ADDR_BASE_IDX 1
+#define mmRLC_GPU_IOV_PERF_CNT_WR_DATA 0x3cc5
+#define mmRLC_GPU_IOV_PERF_CNT_WR_DATA_BASE_IDX 1
+#define mmRLC_GPU_IOV_PERF_CNT_RD_ADDR 0x3cc6
+#define mmRLC_GPU_IOV_PERF_CNT_RD_ADDR_BASE_IDX 1
+#define mmRLC_GPU_IOV_PERF_CNT_RD_DATA 0x3cc7
+#define mmRLC_GPU_IOV_PERF_CNT_RD_DATA_BASE_IDX 1
+#define mmRMI_PERFCOUNTER0_SELECT 0x3d00
+#define mmRMI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define mmRMI_PERFCOUNTER0_SELECT1 0x3d01
+#define mmRMI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define mmRMI_PERFCOUNTER1_SELECT 0x3d02
+#define mmRMI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define mmRMI_PERFCOUNTER2_SELECT 0x3d03
+#define mmRMI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define mmRMI_PERFCOUNTER2_SELECT1 0x3d04
+#define mmRMI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define mmRMI_PERFCOUNTER3_SELECT 0x3d05
+#define mmRMI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define mmRMI_PERF_COUNTER_CNTL 0x3d06
+#define mmRMI_PERF_COUNTER_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_utcl2_atcl2pfcntldec
+// base address: 0x37500
+#define mmATC_L2_PERFCOUNTER0_CFG 0x3d40
+#define mmATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmATC_L2_PERFCOUNTER1_CFG 0x3d41
+#define mmATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x3d42
+#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_utcl2_vml2pldec
+// base address: 0x37530
+#define mmMC_VM_L2_PERFCOUNTER0_CFG 0x3d4c
+#define mmMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER1_CFG 0x3d4d
+#define mmMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER2_CFG 0x3d4e
+#define mmMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER3_CFG 0x3d4f
+#define mmMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER4_CFG 0x3d50
+#define mmMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER5_CFG 0x3d51
+#define mmMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER6_CFG 0x3d52
+#define mmMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER7_CFG 0x3d53
+#define mmMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3d54
+#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_rlcpdec
+// base address: 0x3b000
+#define mmRLC_CNTL 0x4c00
+#define mmRLC_CNTL_BASE_IDX 1
+#define mmRLC_STAT 0x4c04
+#define mmRLC_STAT_BASE_IDX 1
+#define mmRLC_SAFE_MODE 0x4c05
+#define mmRLC_SAFE_MODE_BASE_IDX 1
+#define mmRLC_MEM_SLP_CNTL 0x4c06
+#define mmRLC_MEM_SLP_CNTL_BASE_IDX 1
+#define mmSMU_RLC_RESPONSE 0x4c07
+#define mmSMU_RLC_RESPONSE_BASE_IDX 1
+#define mmRLC_RLCV_SAFE_MODE 0x4c08
+#define mmRLC_RLCV_SAFE_MODE_BASE_IDX 1
+#define mmRLC_SMU_SAFE_MODE 0x4c09
+#define mmRLC_SMU_SAFE_MODE_BASE_IDX 1
+#define mmRLC_RLCV_COMMAND 0x4c0a
+#define mmRLC_RLCV_COMMAND_BASE_IDX 1
+#define mmRLC_REFCLOCK_TIMESTAMP_LSB 0x4c0c
+#define mmRLC_REFCLOCK_TIMESTAMP_LSB_BASE_IDX 1
+#define mmRLC_REFCLOCK_TIMESTAMP_MSB 0x4c0d
+#define mmRLC_REFCLOCK_TIMESTAMP_MSB_BASE_IDX 1
+#define mmRLC_GPM_TIMER_INT_0 0x4c0e
+#define mmRLC_GPM_TIMER_INT_0_BASE_IDX 1
+#define mmRLC_GPM_TIMER_INT_1 0x4c0f
+#define mmRLC_GPM_TIMER_INT_1_BASE_IDX 1
+#define mmRLC_GPM_TIMER_INT_2 0x4c10
+#define mmRLC_GPM_TIMER_INT_2_BASE_IDX 1
+#define mmRLC_GPM_TIMER_CTRL 0x4c11
+#define mmRLC_GPM_TIMER_CTRL_BASE_IDX 1
+#define mmRLC_LB_CNTR_MAX 0x4c12
+#define mmRLC_LB_CNTR_MAX_BASE_IDX 1
+#define mmRLC_GPM_TIMER_STAT 0x4c13
+#define mmRLC_GPM_TIMER_STAT_BASE_IDX 1
+#define mmRLC_GPM_TIMER_INT_3 0x4c15
+#define mmRLC_GPM_TIMER_INT_3_BASE_IDX 1
+#define mmRLC_SERDES_WR_NONCU_MASTER_MASK_1 0x4c16
+#define mmRLC_SERDES_WR_NONCU_MASTER_MASK_1_BASE_IDX 1
+#define mmRLC_SERDES_NONCU_MASTER_BUSY_1 0x4c17
+#define mmRLC_SERDES_NONCU_MASTER_BUSY_1_BASE_IDX 1
+#define mmRLC_INT_STAT 0x4c18
+#define mmRLC_INT_STAT_BASE_IDX 1
+#define mmRLC_LB_CNTL 0x4c19
+#define mmRLC_LB_CNTL_BASE_IDX 1
+#define mmRLC_MGCG_CTRL 0x4c1a
+#define mmRLC_MGCG_CTRL_BASE_IDX 1
+#define mmRLC_LB_CNTR_INIT 0x4c1b
+#define mmRLC_LB_CNTR_INIT_BASE_IDX 1
+#define mmRLC_LOAD_BALANCE_CNTR 0x4c1c
+#define mmRLC_LOAD_BALANCE_CNTR_BASE_IDX 1
+#define mmRLC_JUMP_TABLE_RESTORE 0x4c1e
+#define mmRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
+#define mmRLC_PG_DELAY_2 0x4c1f
+#define mmRLC_PG_DELAY_2_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_LSB 0x4c24
+#define mmRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_MSB 0x4c25
+#define mmRLC_GPU_CLOCK_COUNT_MSB_BASE_IDX 1
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT 0x4c26
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_BASE_IDX 1
+#define mmRLC_UCODE_CNTL 0x4c27
+#define mmRLC_UCODE_CNTL_BASE_IDX 1
+#define mmRLC_GPM_THREAD_RESET 0x4c28
+#define mmRLC_GPM_THREAD_RESET_BASE_IDX 1
+#define mmRLC_GPM_CP_DMA_COMPLETE_T0 0x4c29
+#define mmRLC_GPM_CP_DMA_COMPLETE_T0_BASE_IDX 1
+#define mmRLC_GPM_CP_DMA_COMPLETE_T1 0x4c2a
+#define mmRLC_GPM_CP_DMA_COMPLETE_T1_BASE_IDX 1
+#define mmRLC_FIREWALL_VIOLATION 0x4c2b
+#define mmRLC_FIREWALL_VIOLATION_BASE_IDX 1
+#define mmRLC_CLK_COUNT_GFXCLK_LSB 0x4c30
+#define mmRLC_CLK_COUNT_GFXCLK_LSB_BASE_IDX 1
+#define mmRLC_CLK_COUNT_GFXCLK_MSB 0x4c31
+#define mmRLC_CLK_COUNT_GFXCLK_MSB_BASE_IDX 1
+#define mmRLC_CLK_COUNT_REFCLK_LSB 0x4c32
+#define mmRLC_CLK_COUNT_REFCLK_LSB_BASE_IDX 1
+#define mmRLC_CLK_COUNT_REFCLK_MSB 0x4c33
+#define mmRLC_CLK_COUNT_REFCLK_MSB_BASE_IDX 1
+#define mmRLC_CLK_COUNT_CTRL 0x4c34
+#define mmRLC_CLK_COUNT_CTRL_BASE_IDX 1
+#define mmRLC_CLK_COUNT_STAT 0x4c35
+#define mmRLC_CLK_COUNT_STAT_BASE_IDX 1
+#define mmRLC_GPM_STAT 0x4c40
+#define mmRLC_GPM_STAT_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_32_RES_SEL 0x4c41
+#define mmRLC_GPU_CLOCK_32_RES_SEL_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_32 0x4c42
+#define mmRLC_GPU_CLOCK_32_BASE_IDX 1
+#define mmRLC_PG_CNTL 0x4c43
+#define mmRLC_PG_CNTL_BASE_IDX 1
+#define mmRLC_GPM_THREAD_PRIORITY 0x4c44
+#define mmRLC_GPM_THREAD_PRIORITY_BASE_IDX 1
+#define mmRLC_GPM_THREAD_ENABLE 0x4c45
+#define mmRLC_GPM_THREAD_ENABLE_BASE_IDX 1
+#define mmRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define mmRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
+#define mmRLC_CGCG_CGLS_CTRL 0x4c49
+#define mmRLC_CGCG_CGLS_CTRL_BASE_IDX 1
+#define mmRLC_CGCG_RAMP_CTRL 0x4c4a
+#define mmRLC_CGCG_RAMP_CTRL_BASE_IDX 1
+#define mmRLC_DYN_PG_STATUS 0x4c4b
+#define mmRLC_DYN_PG_STATUS_BASE_IDX 1
+#define mmRLC_DYN_PG_REQUEST 0x4c4c
+#define mmRLC_DYN_PG_REQUEST_BASE_IDX 1
+#define mmRLC_PG_DELAY 0x4c4d
+#define mmRLC_PG_DELAY_BASE_IDX 1
+#define mmRLC_CU_STATUS 0x4c4e
+#define mmRLC_CU_STATUS_BASE_IDX 1
+#define mmRLC_LB_INIT_CU_MASK 0x4c4f
+#define mmRLC_LB_INIT_CU_MASK_BASE_IDX 1
+#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK 0x4c50
+#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK_BASE_IDX 1
+#define mmRLC_LB_PARAMS 0x4c51
+#define mmRLC_LB_PARAMS_BASE_IDX 1
+#define mmRLC_THREAD1_DELAY 0x4c52
+#define mmRLC_THREAD1_DELAY_BASE_IDX 1
+#define mmRLC_PG_ALWAYS_ON_CU_MASK 0x4c53
+#define mmRLC_PG_ALWAYS_ON_CU_MASK_BASE_IDX 1
+#define mmRLC_MAX_PG_CU 0x4c54
+#define mmRLC_MAX_PG_CU_BASE_IDX 1
+#define mmRLC_AUTO_PG_CTRL 0x4c55
+#define mmRLC_AUTO_PG_CTRL_BASE_IDX 1
+#define mmRLC_SMU_GRBM_REG_SAVE_CTRL 0x4c56
+#define mmRLC_SMU_GRBM_REG_SAVE_CTRL_BASE_IDX 1
+#define mmRLC_SERDES_RD_PENDING 0x4c58
+#define mmRLC_SERDES_RD_PENDING_BASE_IDX 1
+#define mmRLC_SERDES_RD_MASTER_INDEX 0x4c59
+#define mmRLC_SERDES_RD_MASTER_INDEX_BASE_IDX 1
+#define mmRLC_SERDES_RD_DATA_0 0x4c5a
+#define mmRLC_SERDES_RD_DATA_0_BASE_IDX 1
+#define mmRLC_SERDES_RD_DATA_1 0x4c5b
+#define mmRLC_SERDES_RD_DATA_1_BASE_IDX 1
+#define mmRLC_SERDES_RD_DATA_2 0x4c5c
+#define mmRLC_SERDES_RD_DATA_2_BASE_IDX 1
+#define mmRLC_SERDES_WR_CU_MASTER_MASK 0x4c5d
+#define mmRLC_SERDES_WR_CU_MASTER_MASK_BASE_IDX 1
+#define mmRLC_SERDES_WR_NONCU_MASTER_MASK 0x4c5e
+#define mmRLC_SERDES_WR_NONCU_MASTER_MASK_BASE_IDX 1
+#define mmRLC_SERDES_WR_CTRL 0x4c5f
+#define mmRLC_SERDES_WR_CTRL_BASE_IDX 1
+#define mmRLC_SERDES_WR_DATA 0x4c60
+#define mmRLC_SERDES_WR_DATA_BASE_IDX 1
+#define mmRLC_SERDES_CU_MASTER_BUSY 0x4c61
+#define mmRLC_SERDES_CU_MASTER_BUSY_BASE_IDX 1
+#define mmRLC_SERDES_NONCU_MASTER_BUSY 0x4c62
+#define mmRLC_SERDES_NONCU_MASTER_BUSY_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_0 0x4c63
+#define mmRLC_GPM_GENERAL_0_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_1 0x4c64
+#define mmRLC_GPM_GENERAL_1_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_2 0x4c65
+#define mmRLC_GPM_GENERAL_2_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_3 0x4c66
+#define mmRLC_GPM_GENERAL_3_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_4 0x4c67
+#define mmRLC_GPM_GENERAL_4_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_5 0x4c68
+#define mmRLC_GPM_GENERAL_5_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_6 0x4c69
+#define mmRLC_GPM_GENERAL_6_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_7 0x4c6a
+#define mmRLC_GPM_GENERAL_7_BASE_IDX 1
+#define mmRLC_GPM_SCRATCH_ADDR 0x4c6c
+#define mmRLC_GPM_SCRATCH_ADDR_BASE_IDX 1
+#define mmRLC_GPM_SCRATCH_DATA 0x4c6d
+#define mmRLC_GPM_SCRATCH_DATA_BASE_IDX 1
+#define mmRLC_STATIC_PG_STATUS 0x4c6e
+#define mmRLC_STATIC_PG_STATUS_BASE_IDX 1
+#define mmRLC_SPM_MC_CNTL 0x4c71
+#define mmRLC_SPM_MC_CNTL_BASE_IDX 1
+#define mmRLC_SPM_INT_CNTL 0x4c72
+#define mmRLC_SPM_INT_CNTL_BASE_IDX 1
+#define mmRLC_SPM_INT_STATUS 0x4c73
+#define mmRLC_SPM_INT_STATUS_BASE_IDX 1
+#define mmRLC_SMU_MESSAGE 0x4c76
+#define mmRLC_SMU_MESSAGE_BASE_IDX 1
+#define mmRLC_GPM_LOG_SIZE 0x4c77
+#define mmRLC_GPM_LOG_SIZE_BASE_IDX 1
+#define mmRLC_PG_DELAY_3 0x4c78
+#define mmRLC_PG_DELAY_3_BASE_IDX 1
+#define mmRLC_GPR_REG1 0x4c79
+#define mmRLC_GPR_REG1_BASE_IDX 1
+#define mmRLC_GPR_REG2 0x4c7a
+#define mmRLC_GPR_REG2_BASE_IDX 1
+#define mmRLC_GPM_LOG_CONT 0x4c7b
+#define mmRLC_GPM_LOG_CONT_BASE_IDX 1
+#define mmRLC_GPM_INT_DISABLE_TH0 0x4c7c
+#define mmRLC_GPM_INT_DISABLE_TH0_BASE_IDX 1
+#define mmRLC_GPM_INT_FORCE_TH0 0x4c7e
+#define mmRLC_GPM_INT_FORCE_TH0_BASE_IDX 1
+#define mmRLC_GPM_INT_FORCE_TH1 0x4c7f
+#define mmRLC_GPM_INT_FORCE_TH1_BASE_IDX 1
+#define mmRLC_SRM_CNTL 0x4c80
+#define mmRLC_SRM_CNTL_BASE_IDX 1
+#define mmRLC_SRM_ARAM_ADDR 0x4c83
+#define mmRLC_SRM_ARAM_ADDR_BASE_IDX 1
+#define mmRLC_SRM_ARAM_DATA 0x4c84
+#define mmRLC_SRM_ARAM_DATA_BASE_IDX 1
+#define mmRLC_SRM_DRAM_ADDR 0x4c85
+#define mmRLC_SRM_DRAM_ADDR_BASE_IDX 1
+#define mmRLC_SRM_DRAM_DATA 0x4c86
+#define mmRLC_SRM_DRAM_DATA_BASE_IDX 1
+#define mmRLC_SRM_GPM_COMMAND 0x4c87
+#define mmRLC_SRM_GPM_COMMAND_BASE_IDX 1
+#define mmRLC_SRM_GPM_COMMAND_STATUS 0x4c88
+#define mmRLC_SRM_GPM_COMMAND_STATUS_BASE_IDX 1
+#define mmRLC_SRM_RLCV_COMMAND 0x4c89
+#define mmRLC_SRM_RLCV_COMMAND_BASE_IDX 1
+#define mmRLC_SRM_RLCV_COMMAND_STATUS 0x4c8a
+#define mmRLC_SRM_RLCV_COMMAND_STATUS_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_0 0x4c8b
+#define mmRLC_SRM_INDEX_CNTL_ADDR_0_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_1 0x4c8c
+#define mmRLC_SRM_INDEX_CNTL_ADDR_1_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_2 0x4c8d
+#define mmRLC_SRM_INDEX_CNTL_ADDR_2_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_3 0x4c8e
+#define mmRLC_SRM_INDEX_CNTL_ADDR_3_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_4 0x4c8f
+#define mmRLC_SRM_INDEX_CNTL_ADDR_4_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_5 0x4c90
+#define mmRLC_SRM_INDEX_CNTL_ADDR_5_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_6 0x4c91
+#define mmRLC_SRM_INDEX_CNTL_ADDR_6_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_ADDR_7 0x4c92
+#define mmRLC_SRM_INDEX_CNTL_ADDR_7_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_0 0x4c93
+#define mmRLC_SRM_INDEX_CNTL_DATA_0_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_1 0x4c94
+#define mmRLC_SRM_INDEX_CNTL_DATA_1_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_2 0x4c95
+#define mmRLC_SRM_INDEX_CNTL_DATA_2_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_3 0x4c96
+#define mmRLC_SRM_INDEX_CNTL_DATA_3_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_4 0x4c97
+#define mmRLC_SRM_INDEX_CNTL_DATA_4_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_5 0x4c98
+#define mmRLC_SRM_INDEX_CNTL_DATA_5_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_6 0x4c99
+#define mmRLC_SRM_INDEX_CNTL_DATA_6_BASE_IDX 1
+#define mmRLC_SRM_INDEX_CNTL_DATA_7 0x4c9a
+#define mmRLC_SRM_INDEX_CNTL_DATA_7_BASE_IDX 1
+#define mmRLC_SRM_STAT 0x4c9b
+#define mmRLC_SRM_STAT_BASE_IDX 1
+#define mmRLC_SRM_GPM_ABORT 0x4c9c
+#define mmRLC_SRM_GPM_ABORT_BASE_IDX 1
+#define mmRLC_CSIB_ADDR_LO 0x4ca2
+#define mmRLC_CSIB_ADDR_LO_BASE_IDX 1
+#define mmRLC_CSIB_ADDR_HI 0x4ca3
+#define mmRLC_CSIB_ADDR_HI_BASE_IDX 1
+#define mmRLC_CSIB_LENGTH 0x4ca4
+#define mmRLC_CSIB_LENGTH_BASE_IDX 1
+#define mmRLC_SMU_COMMAND 0x4ca9
+#define mmRLC_SMU_COMMAND_BASE_IDX 1
+#define mmRLC_CP_SCHEDULERS 0x4caa
+#define mmRLC_CP_SCHEDULERS_BASE_IDX 1
+#define mmRLC_SMU_ARGUMENT_1 0x4cab
+#define mmRLC_SMU_ARGUMENT_1_BASE_IDX 1
+#define mmRLC_SMU_ARGUMENT_2 0x4cac
+#define mmRLC_SMU_ARGUMENT_2_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_8 0x4cad
+#define mmRLC_GPM_GENERAL_8_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_9 0x4cae
+#define mmRLC_GPM_GENERAL_9_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_10 0x4caf
+#define mmRLC_GPM_GENERAL_10_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_11 0x4cb0
+#define mmRLC_GPM_GENERAL_11_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_12 0x4cb1
+#define mmRLC_GPM_GENERAL_12_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_CNTL_0 0x4cb2
+#define mmRLC_GPM_UTCL1_CNTL_0_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_CNTL_1 0x4cb3
+#define mmRLC_GPM_UTCL1_CNTL_1_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_CNTL_2 0x4cb4
+#define mmRLC_GPM_UTCL1_CNTL_2_BASE_IDX 1
+#define mmRLC_SPM_UTCL1_CNTL 0x4cb5
+#define mmRLC_SPM_UTCL1_CNTL_BASE_IDX 1
+#define mmRLC_UTCL1_STATUS_2 0x4cb6
+#define mmRLC_UTCL1_STATUS_2_BASE_IDX 1
+#define mmRLC_LB_THR_CONFIG_2 0x4cb8
+#define mmRLC_LB_THR_CONFIG_2_BASE_IDX 1
+#define mmRLC_LB_THR_CONFIG_3 0x4cb9
+#define mmRLC_LB_THR_CONFIG_3_BASE_IDX 1
+#define mmRLC_LB_THR_CONFIG_4 0x4cba
+#define mmRLC_LB_THR_CONFIG_4_BASE_IDX 1
+#define mmRLC_SPM_UTCL1_ERROR_1 0x4cbc
+#define mmRLC_SPM_UTCL1_ERROR_1_BASE_IDX 1
+#define mmRLC_SPM_UTCL1_ERROR_2 0x4cbd
+#define mmRLC_SPM_UTCL1_ERROR_2_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH0_ERROR_1 0x4cbe
+#define mmRLC_GPM_UTCL1_TH0_ERROR_1_BASE_IDX 1
+#define mmRLC_LB_THR_CONFIG_1 0x4cbf
+#define mmRLC_LB_THR_CONFIG_1_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH0_ERROR_2 0x4cc0
+#define mmRLC_GPM_UTCL1_TH0_ERROR_2_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH1_ERROR_1 0x4cc1
+#define mmRLC_GPM_UTCL1_TH1_ERROR_1_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH1_ERROR_2 0x4cc2
+#define mmRLC_GPM_UTCL1_TH1_ERROR_2_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH2_ERROR_1 0x4cc3
+#define mmRLC_GPM_UTCL1_TH2_ERROR_1_BASE_IDX 1
+#define mmRLC_GPM_UTCL1_TH2_ERROR_2 0x4cc4
+#define mmRLC_GPM_UTCL1_TH2_ERROR_2_BASE_IDX 1
+#define mmRLC_CGCG_CGLS_CTRL_3D 0x4cc5
+#define mmRLC_CGCG_CGLS_CTRL_3D_BASE_IDX 1
+#define mmRLC_CGCG_RAMP_CTRL_3D 0x4cc6
+#define mmRLC_CGCG_RAMP_CTRL_3D_BASE_IDX 1
+#define mmRLC_SEMAPHORE_0 0x4cc7
+#define mmRLC_SEMAPHORE_0_BASE_IDX 1
+#define mmRLC_SEMAPHORE_1 0x4cc8
+#define mmRLC_SEMAPHORE_1_BASE_IDX 1
+#define mmRLC_CP_EOF_INT 0x4cca
+#define mmRLC_CP_EOF_INT_BASE_IDX 1
+#define mmRLC_CP_EOF_INT_CNT 0x4ccb
+#define mmRLC_CP_EOF_INT_CNT_BASE_IDX 1
+#define mmRLC_SPARE_INT 0x4ccc
+#define mmRLC_SPARE_INT_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_CNTL 0x4ccd
+#define mmRLC_PREWALKER_UTCL1_CNTL_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_TRIG 0x4cce
+#define mmRLC_PREWALKER_UTCL1_TRIG_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_ADDR_LSB 0x4ccf
+#define mmRLC_PREWALKER_UTCL1_ADDR_LSB_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_ADDR_MSB 0x4cd0
+#define mmRLC_PREWALKER_UTCL1_ADDR_MSB_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_SIZE_LSB 0x4cd1
+#define mmRLC_PREWALKER_UTCL1_SIZE_LSB_BASE_IDX 1
+#define mmRLC_PREWALKER_UTCL1_SIZE_MSB 0x4cd2
+#define mmRLC_PREWALKER_UTCL1_SIZE_MSB_BASE_IDX 1
+#define mmRLC_DSM_TRIG 0x4cd3
+#define mmRLC_DSM_TRIG_BASE_IDX 1
+#define mmRLC_UTCL1_STATUS 0x4cd4
+#define mmRLC_UTCL1_STATUS_BASE_IDX 1
+#define mmRLC_R2I_CNTL_0 0x4cd5
+#define mmRLC_R2I_CNTL_0_BASE_IDX 1
+#define mmRLC_R2I_CNTL_1 0x4cd6
+#define mmRLC_R2I_CNTL_1_BASE_IDX 1
+#define mmRLC_R2I_CNTL_2 0x4cd7
+#define mmRLC_R2I_CNTL_2_BASE_IDX 1
+#define mmRLC_R2I_CNTL_3 0x4cd8
+#define mmRLC_R2I_CNTL_3_BASE_IDX 1
+#define mmRLC_UTCL2_CNTL 0x4cd9
+#define mmRLC_UTCL2_CNTL_BASE_IDX 1
+#define mmRLC_LBPW_CU_STAT 0x4cda
+#define mmRLC_LBPW_CU_STAT_BASE_IDX 1
+#define mmRLC_DS_CNTL 0x4cdb
+#define mmRLC_DS_CNTL_BASE_IDX 1
+#define mmRLC_GPM_INT_STAT_TH0 0x4cdc
+#define mmRLC_GPM_INT_STAT_TH0_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_13 0x4cdd
+#define mmRLC_GPM_GENERAL_13_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_14 0x4cde
+#define mmRLC_GPM_GENERAL_14_BASE_IDX 1
+#define mmRLC_GPM_GENERAL_15 0x4cdf
+#define mmRLC_GPM_GENERAL_15_BASE_IDX 1
+#define mmRLC_SPARE_INT_1 0x4ce0
+#define mmRLC_SPARE_INT_1_BASE_IDX 1
+#define mmRLC_RLCV_SPARE_INT_1 0x4ce1
+#define mmRLC_RLCV_SPARE_INT_1_BASE_IDX 1
+#define mmRLC_SEMAPHORE_2 0x4ce3
+#define mmRLC_SEMAPHORE_2_BASE_IDX 1
+#define mmRLC_SEMAPHORE_3 0x4ce4
+#define mmRLC_SEMAPHORE_3_BASE_IDX 1
+#define mmRLC_SMU_ARGUMENT_3 0x4ce5
+#define mmRLC_SMU_ARGUMENT_3_BASE_IDX 1
+#define mmRLC_SMU_ARGUMENT_4 0x4ce6
+#define mmRLC_SMU_ARGUMENT_4_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_LSB_1 0x4ce8
+#define mmRLC_GPU_CLOCK_COUNT_LSB_1_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_MSB_1 0x4ce9
+#define mmRLC_GPU_CLOCK_COUNT_MSB_1_BASE_IDX 1
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_1 0x4cea
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_1_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_LSB_2 0x4ceb
+#define mmRLC_GPU_CLOCK_COUNT_LSB_2_BASE_IDX 1
+#define mmRLC_GPU_CLOCK_COUNT_MSB_2 0x4cec
+#define mmRLC_GPU_CLOCK_COUNT_MSB_2_BASE_IDX 1
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_2 0x4cef
+#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
+#define mmRLC_CPG_STAT_INVAL 0x4d09
+#define mmRLC_CPG_STAT_INVAL_BASE_IDX 1
+#define mmRLC_RLCV_SPARE_INT 0x4f30
+#define mmRLC_RLCV_SPARE_INT_BASE_IDX 1
+#define mmRLC_SMU_CLK_REQ 0x4f97
+#define mmRLC_SMU_CLK_REQ_BASE_IDX 1
+
+
+// addressBlock: gc_pwrdec
+// base address: 0x3c000
+#define mmCGTS_SM_CTRL_REG 0x5000
+#define mmCGTS_SM_CTRL_REG_BASE_IDX 1
+#define mmCGTS_RD_CTRL_REG 0x5001
+#define mmCGTS_RD_CTRL_REG_BASE_IDX 1
+#define mmCGTS_RD_REG 0x5002
+#define mmCGTS_RD_REG_BASE_IDX 1
+#define mmCGTS_TCC_DISABLE 0x5003
+#define mmCGTS_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE 0x5004
+#define mmCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_CU0_SP0_CTRL_REG 0x5008
+#define mmCGTS_CU0_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU0_LDS_SQ_CTRL_REG 0x5009
+#define mmCGTS_CU0_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU0_TA_SQC_CTRL_REG 0x500a
+#define mmCGTS_CU0_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU0_SP1_CTRL_REG 0x500b
+#define mmCGTS_CU0_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU0_TD_TCP_CTRL_REG 0x500c
+#define mmCGTS_CU0_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_SP0_CTRL_REG 0x500d
+#define mmCGTS_CU1_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_LDS_SQ_CTRL_REG 0x500e
+#define mmCGTS_CU1_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_TA_SQC_CTRL_REG 0x500f
+#define mmCGTS_CU1_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_SP1_CTRL_REG 0x5010
+#define mmCGTS_CU1_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_TD_TCP_CTRL_REG 0x5011
+#define mmCGTS_CU1_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_SP0_CTRL_REG 0x5012
+#define mmCGTS_CU2_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_LDS_SQ_CTRL_REG 0x5013
+#define mmCGTS_CU2_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_TA_SQC_CTRL_REG 0x5014
+#define mmCGTS_CU2_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_SP1_CTRL_REG 0x5015
+#define mmCGTS_CU2_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_TD_TCP_CTRL_REG 0x5016
+#define mmCGTS_CU2_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_SP0_CTRL_REG 0x5017
+#define mmCGTS_CU3_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_LDS_SQ_CTRL_REG 0x5018
+#define mmCGTS_CU3_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_TA_SQC_CTRL_REG 0x5019
+#define mmCGTS_CU3_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_SP1_CTRL_REG 0x501a
+#define mmCGTS_CU3_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_TD_TCP_CTRL_REG 0x501b
+#define mmCGTS_CU3_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_SP0_CTRL_REG 0x501c
+#define mmCGTS_CU4_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_LDS_SQ_CTRL_REG 0x501d
+#define mmCGTS_CU4_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_TA_SQC_CTRL_REG 0x501e
+#define mmCGTS_CU4_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_SP1_CTRL_REG 0x501f
+#define mmCGTS_CU4_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_TD_TCP_CTRL_REG 0x5020
+#define mmCGTS_CU4_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_SP0_CTRL_REG 0x5021
+#define mmCGTS_CU5_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_LDS_SQ_CTRL_REG 0x5022
+#define mmCGTS_CU5_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_TA_SQC_CTRL_REG 0x5023
+#define mmCGTS_CU5_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_SP1_CTRL_REG 0x5024
+#define mmCGTS_CU5_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_TD_TCP_CTRL_REG 0x5025
+#define mmCGTS_CU5_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_SP0_CTRL_REG 0x5026
+#define mmCGTS_CU6_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_LDS_SQ_CTRL_REG 0x5027
+#define mmCGTS_CU6_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_TA_SQC_CTRL_REG 0x5028
+#define mmCGTS_CU6_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_SP1_CTRL_REG 0x5029
+#define mmCGTS_CU6_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_TD_TCP_CTRL_REG 0x502a
+#define mmCGTS_CU6_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_SP0_CTRL_REG 0x502b
+#define mmCGTS_CU7_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_LDS_SQ_CTRL_REG 0x502c
+#define mmCGTS_CU7_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_TA_SQC_CTRL_REG 0x502d
+#define mmCGTS_CU7_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_SP1_CTRL_REG 0x502e
+#define mmCGTS_CU7_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_TD_TCP_CTRL_REG 0x502f
+#define mmCGTS_CU7_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_SP0_CTRL_REG 0x5030
+#define mmCGTS_CU8_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_LDS_SQ_CTRL_REG 0x5031
+#define mmCGTS_CU8_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_TA_SQC_CTRL_REG 0x5032
+#define mmCGTS_CU8_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_SP1_CTRL_REG 0x5033
+#define mmCGTS_CU8_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_TD_TCP_CTRL_REG 0x5034
+#define mmCGTS_CU8_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_SP0_CTRL_REG 0x5035
+#define mmCGTS_CU9_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_LDS_SQ_CTRL_REG 0x5036
+#define mmCGTS_CU9_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_TA_SQC_CTRL_REG 0x5037
+#define mmCGTS_CU9_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_SP1_CTRL_REG 0x5038
+#define mmCGTS_CU9_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_TD_TCP_CTRL_REG 0x5039
+#define mmCGTS_CU9_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_SP0_CTRL_REG 0x503a
+#define mmCGTS_CU10_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_LDS_SQ_CTRL_REG 0x503b
+#define mmCGTS_CU10_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_TA_SQC_CTRL_REG 0x503c
+#define mmCGTS_CU10_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_SP1_CTRL_REG 0x503d
+#define mmCGTS_CU10_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_TD_TCP_CTRL_REG 0x503e
+#define mmCGTS_CU10_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_SP0_CTRL_REG 0x503f
+#define mmCGTS_CU11_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_LDS_SQ_CTRL_REG 0x5040
+#define mmCGTS_CU11_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_TA_SQC_CTRL_REG 0x5041
+#define mmCGTS_CU11_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_SP1_CTRL_REG 0x5042
+#define mmCGTS_CU11_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_TD_TCP_CTRL_REG 0x5043
+#define mmCGTS_CU11_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_SP0_CTRL_REG 0x5044
+#define mmCGTS_CU12_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_LDS_SQ_CTRL_REG 0x5045
+#define mmCGTS_CU12_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_TA_SQC_CTRL_REG 0x5046
+#define mmCGTS_CU12_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_SP1_CTRL_REG 0x5047
+#define mmCGTS_CU12_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_TD_TCP_CTRL_REG 0x5048
+#define mmCGTS_CU12_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_SP0_CTRL_REG 0x5049
+#define mmCGTS_CU13_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_LDS_SQ_CTRL_REG 0x504a
+#define mmCGTS_CU13_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_TA_SQC_CTRL_REG 0x504b
+#define mmCGTS_CU13_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_SP1_CTRL_REG 0x504c
+#define mmCGTS_CU13_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_TD_TCP_CTRL_REG 0x504d
+#define mmCGTS_CU13_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_SP0_CTRL_REG 0x504e
+#define mmCGTS_CU14_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_LDS_SQ_CTRL_REG 0x504f
+#define mmCGTS_CU14_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_TA_SQC_CTRL_REG 0x5050
+#define mmCGTS_CU14_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_SP1_CTRL_REG 0x5051
+#define mmCGTS_CU14_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_TD_TCP_CTRL_REG 0x5052
+#define mmCGTS_CU14_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_SP0_CTRL_REG 0x5053
+#define mmCGTS_CU15_SP0_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_LDS_SQ_CTRL_REG 0x5054
+#define mmCGTS_CU15_LDS_SQ_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_TA_SQC_CTRL_REG 0x5055
+#define mmCGTS_CU15_TA_SQC_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_SP1_CTRL_REG 0x5056
+#define mmCGTS_CU15_SP1_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_TD_TCP_CTRL_REG 0x5057
+#define mmCGTS_CU15_TD_TCP_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU0_TCPI_CTRL_REG 0x5058
+#define mmCGTS_CU0_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU1_TCPI_CTRL_REG 0x5059
+#define mmCGTS_CU1_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU2_TCPI_CTRL_REG 0x505a
+#define mmCGTS_CU2_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU3_TCPI_CTRL_REG 0x505b
+#define mmCGTS_CU3_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU4_TCPI_CTRL_REG 0x505c
+#define mmCGTS_CU4_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU5_TCPI_CTRL_REG 0x505d
+#define mmCGTS_CU5_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU6_TCPI_CTRL_REG 0x505e
+#define mmCGTS_CU6_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU7_TCPI_CTRL_REG 0x505f
+#define mmCGTS_CU7_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU8_TCPI_CTRL_REG 0x5060
+#define mmCGTS_CU8_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU9_TCPI_CTRL_REG 0x5061
+#define mmCGTS_CU9_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU10_TCPI_CTRL_REG 0x5062
+#define mmCGTS_CU10_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU11_TCPI_CTRL_REG 0x5063
+#define mmCGTS_CU11_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU12_TCPI_CTRL_REG 0x5064
+#define mmCGTS_CU12_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU13_TCPI_CTRL_REG 0x5065
+#define mmCGTS_CU13_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU14_TCPI_CTRL_REG 0x5066
+#define mmCGTS_CU14_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTS_CU15_TCPI_CTRL_REG 0x5067
+#define mmCGTS_CU15_TCPI_CTRL_REG_BASE_IDX 1
+#define mmCGTT_SPI_PS_CLK_CTRL 0x507d
+#define mmCGTT_SPI_PS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPIS_CLK_CTRL 0x507e
+#define mmCGTT_SPIS_CLK_CTRL_BASE_IDX 1
+#define mmCGTX_SPI_DEBUG_CLK_CTRL 0x507f
+#define mmCGTX_SPI_DEBUG_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_CLK_CTRL 0x5080
+#define mmCGTT_SPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PC_CLK_CTRL 0x5081
+#define mmCGTT_PC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_BCI_CLK_CTRL 0x5082
+#define mmCGTT_BCI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_VGT_CLK_CTRL 0x5084
+#define mmCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_IA_CLK_CTRL 0x5085
+#define mmCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_WD_CLK_CTRL 0x5086
+#define mmCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PA_CLK_CTRL 0x5088
+#define mmCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL0 0x5089
+#define mmCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL1 0x508a
+#define mmCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL2 0x508b
+#define mmCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SQ_CLK_CTRL 0x508c
+#define mmCGTT_SQ_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SQG_CLK_CTRL 0x508d
+#define mmCGTT_SQG_CLK_CTRL_BASE_IDX 1
+#define mmSQ_ALU_CLK_CTRL 0x508e
+#define mmSQ_ALU_CLK_CTRL_BASE_IDX 1
+#define mmSQ_TEX_CLK_CTRL 0x508f
+#define mmSQ_TEX_CLK_CTRL_BASE_IDX 1
+#define mmSQ_LDS_CLK_CTRL 0x5090
+#define mmSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define mmSQ_POWER_THROTTLE 0x5091
+#define mmSQ_POWER_THROTTLE_BASE_IDX 1
+#define mmSQ_POWER_THROTTLE2 0x5092
+#define mmSQ_POWER_THROTTLE2_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL0 0x5094
+#define mmCGTT_SX_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL1 0x5095
+#define mmCGTT_SX_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL2 0x5096
+#define mmCGTT_SX_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL3 0x5097
+#define mmCGTT_SX_CLK_CTRL3_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL4 0x5098
+#define mmCGTT_SX_CLK_CTRL4_BASE_IDX 1
+#define mmTD_CGTT_CTRL 0x509c
+#define mmTD_CGTT_CTRL_BASE_IDX 1
+#define mmTA_CGTT_CTRL 0x509d
+#define mmTA_CGTT_CTRL_BASE_IDX 1
+#define mmCGTT_TCPI_CLK_CTRL 0x509e
+#define mmCGTT_TCPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_TCI_CLK_CTRL 0x509f
+#define mmCGTT_TCI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GDS_CLK_CTRL 0x50a0
+#define mmCGTT_GDS_CLK_CTRL_BASE_IDX 1
+#define mmDB_CGTT_CLK_CTRL_0 0x50a4
+#define mmDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define mmCB_CGTT_SCLK_CTRL 0x50a8
+#define mmCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmTCC_CGTT_SCLK_CTRL 0x50ac
+#define mmTCC_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmTCA_CGTT_SCLK_CTRL 0x50ad
+#define mmTCA_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmCGTT_CP_CLK_CTRL 0x50b0
+#define mmCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPF_CLK_CTRL 0x50b1
+#define mmCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPC_CLK_CTRL 0x50b2
+#define mmCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_RLC_CLK_CTRL 0x50b5
+#define mmCGTT_RLC_CLK_CTRL_BASE_IDX 1
+#define mmRLC_GFX_RM_CNTL 0x50b6
+#define mmRLC_GFX_RM_CNTL_BASE_IDX 1
+#define mmRMI_CGTT_SCLK_CTRL 0x50c0
+#define mmRMI_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmCGTT_TCPF_CLK_CTRL 0x50c1
+#define mmCGTT_TCPF_CLK_CTRL_BASE_IDX 1
+#define mmSE_CAC_CGTT_CLK_CTRL 0x50d0
+#define mmSE_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGC_CAC_CGTT_CLK_CTRL 0x50d8
+#define mmGC_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGRBM_CGTT_CLK_CNTL 0x50e0
+#define mmGRBM_CGTT_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_ea_pwrdec
+// base address: 0x3c000
+#define mmGCEA_CGTT_CLK_CTRL 0x50c4
+#define mmGCEA_CGTT_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_utcl2_vmsharedhvdec
+// base address: 0x3ea00
+#define mmMC_VM_FB_SIZE_OFFSET_VF0 0x5a80
+#define mmMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF1 0x5a81
+#define mmMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF2 0x5a82
+#define mmMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF3 0x5a83
+#define mmMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF4 0x5a84
+#define mmMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF5 0x5a85
+#define mmMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF6 0x5a86
+#define mmMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF7 0x5a87
+#define mmMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF8 0x5a88
+#define mmMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF9 0x5a89
+#define mmMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF10 0x5a8a
+#define mmMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF11 0x5a8b
+#define mmMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF12 0x5a8c
+#define mmMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF13 0x5a8d
+#define mmMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF14 0x5a8e
+#define mmMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define mmMC_VM_FB_SIZE_OFFSET_VF15 0x5a8f
+#define mmMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+#define mmVM_IOMMU_MMIO_CNTRL_1 0x5a90
+#define mmVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_LO_0 0x5a91
+#define mmMC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_LO_1 0x5a92
+#define mmMC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_LO_2 0x5a93
+#define mmMC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_LO_3 0x5a94
+#define mmMC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_HI_0 0x5a95
+#define mmMC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_HI_1 0x5a96
+#define mmMC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_HI_2 0x5a97
+#define mmMC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define mmMC_VM_MARC_BASE_HI_3 0x5a98
+#define mmMC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_LO_0 0x5a99
+#define mmMC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_LO_1 0x5a9a
+#define mmMC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_LO_2 0x5a9b
+#define mmMC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_LO_3 0x5a9c
+#define mmMC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_HI_0 0x5a9d
+#define mmMC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_HI_1 0x5a9e
+#define mmMC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_HI_2 0x5a9f
+#define mmMC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define mmMC_VM_MARC_RELOC_HI_3 0x5aa0
+#define mmMC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_LO_0 0x5aa1
+#define mmMC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_LO_1 0x5aa2
+#define mmMC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_LO_2 0x5aa3
+#define mmMC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_LO_3 0x5aa4
+#define mmMC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_HI_0 0x5aa5
+#define mmMC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_HI_1 0x5aa6
+#define mmMC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_HI_2 0x5aa7
+#define mmMC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define mmMC_VM_MARC_LEN_HI_3 0x5aa8
+#define mmMC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define mmVM_IOMMU_CONTROL_REGISTER 0x5aa9
+#define mmVM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x5aaa
+#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL 0x5aab
+#define mmVM_PCIE_ATS_CNTL_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_0 0x5aac
+#define mmVM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_1 0x5aad
+#define mmVM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_2 0x5aae
+#define mmVM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_3 0x5aaf
+#define mmVM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_4 0x5ab0
+#define mmVM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_5 0x5ab1
+#define mmVM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_6 0x5ab2
+#define mmVM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_7 0x5ab3
+#define mmVM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_8 0x5ab4
+#define mmVM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_9 0x5ab5
+#define mmVM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_10 0x5ab6
+#define mmVM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_11 0x5ab7
+#define mmVM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_12 0x5ab8
+#define mmVM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_13 0x5ab9
+#define mmVM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_14 0x5aba
+#define mmVM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1
+#define mmVM_PCIE_ATS_CNTL_VF_15 0x5abb
+#define mmVM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1
+#define mmUTCL2_CGTT_CLK_CTRL 0x5abc
+#define mmUTCL2_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmMC_SHARED_ACTIVE_FCN_ID 0x5abd
+#define mmMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmMC_VM_XGMI_GPUIOV_ENABLE 0x5abe
+#define mmMC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1
+
+
+// addressBlock: gc_hypdec
+// base address: 0x3e000
+#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
+#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define mmCP_PFP_UCODE_ADDR 0x5814
+#define mmCP_PFP_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_PFP_UCODE_DATA 0x5815
+#define mmCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define mmCP_PFP_UCODE_DATA 0x5815
+#define mmCP_PFP_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_ME_UCODE_ADDR 0x5816
+#define mmCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define mmCP_ME_RAM_RADDR 0x5816
+#define mmCP_ME_RAM_RADDR_BASE_IDX 1
+#define mmCP_ME_RAM_WADDR 0x5816
+#define mmCP_ME_RAM_WADDR_BASE_IDX 1
+#define mmCP_HYP_ME_UCODE_DATA 0x5817
+#define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+#define mmCP_ME_RAM_DATA 0x5817
+#define mmCP_ME_RAM_DATA_BASE_IDX 1
+#define mmCP_CE_UCODE_ADDR 0x5818
+#define mmCP_CE_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_CE_UCODE_ADDR 0x5818
+#define mmCP_HYP_CE_UCODE_ADDR_BASE_IDX 1
+#define mmCP_CE_UCODE_DATA 0x5819
+#define mmCP_CE_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_CE_UCODE_DATA 0x5819
+#define mmCP_HYP_CE_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_MEC1_UCODE_ADDR 0x581a
+#define mmCP_HYP_MEC1_UCODE_ADDR_BASE_IDX 1
+#define mmCP_MEC_ME1_UCODE_ADDR 0x581a
+#define mmCP_MEC_ME1_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_MEC1_UCODE_DATA 0x581b
+#define mmCP_HYP_MEC1_UCODE_DATA_BASE_IDX 1
+#define mmCP_MEC_ME1_UCODE_DATA 0x581b
+#define mmCP_MEC_ME1_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_MEC2_UCODE_ADDR 0x581c
+#define mmCP_HYP_MEC2_UCODE_ADDR_BASE_IDX 1
+#define mmCP_MEC_ME2_UCODE_ADDR 0x581c
+#define mmCP_MEC_ME2_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_MEC2_UCODE_DATA 0x581d
+#define mmCP_HYP_MEC2_UCODE_DATA_BASE_IDX 1
+#define mmCP_MEC_ME2_UCODE_DATA 0x581d
+#define mmCP_MEC_ME2_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_PFP_UCODE_CHKSUM 0x581e
+#define mmCP_HYP_PFP_UCODE_CHKSUM_BASE_IDX 1
+#define mmCP_HYP_CE_UCODE_CHKSUM 0x581f
+#define mmCP_HYP_CE_UCODE_CHKSUM_BASE_IDX 1
+#define mmCP_HYP_ME_UCODE_CHKSUM 0x5820
+#define mmCP_HYP_ME_UCODE_CHKSUM_BASE_IDX 1
+#define mmCP_HYP_MEC_ME1_UCODE_CHKSUM 0x5821
+#define mmCP_HYP_MEC_ME1_UCODE_CHKSUM_BASE_IDX 1
+#define mmCP_HYP_MEC_ME2_UCODE_CHKSUM 0x5822
+#define mmCP_HYP_MEC_ME2_UCODE_CHKSUM_BASE_IDX 1
+#define mmRLC_GPM_UCODE_ADDR 0x583c
+#define mmRLC_GPM_UCODE_ADDR_BASE_IDX 1
+#define mmRLC_GPM_UCODE_DATA 0x583d
+#define mmRLC_GPM_UCODE_DATA_BASE_IDX 1
+#define mmGRBM_GFX_INDEX_SR_SELECT 0x5a00
+#define mmGRBM_GFX_INDEX_SR_SELECT_BASE_IDX 1
+#define mmGRBM_GFX_INDEX_SR_DATA 0x5a01
+#define mmGRBM_GFX_INDEX_SR_DATA_BASE_IDX 1
+#define mmGRBM_GFX_CNTL_SR_SELECT 0x5a02
+#define mmGRBM_GFX_CNTL_SR_SELECT_BASE_IDX 1
+#define mmGRBM_GFX_CNTL_SR_DATA 0x5a03
+#define mmGRBM_GFX_CNTL_SR_DATA_BASE_IDX 1
+#define mmGRBM_CAM_INDEX 0x5a04
+#define mmGRBM_CAM_INDEX_BASE_IDX 1
+#define mmGRBM_HYP_CAM_INDEX 0x5a04
+#define mmGRBM_HYP_CAM_INDEX_BASE_IDX 1
+#define mmGRBM_CAM_DATA 0x5a05
+#define mmGRBM_CAM_DATA_BASE_IDX 1
+#define mmGRBM_HYP_CAM_DATA 0x5a05
+#define mmGRBM_HYP_CAM_DATA_BASE_IDX 1
+#define mmRLC_GPU_IOV_VF_ENABLE 0x5b00
+#define mmRLC_GPU_IOV_VF_ENABLE_BASE_IDX 1
+#define mmRLC_GPU_IOV_CFG_REG6 0x5b06
+#define mmRLC_GPU_IOV_CFG_REG6_BASE_IDX 1
+#define mmRLC_GPU_IOV_CFG_REG8 0x5b20
+#define mmRLC_GPU_IOV_CFG_REG8_BASE_IDX 1
+#define mmRLC_RLCV_TIMER_INT_0 0x5b25
+#define mmRLC_RLCV_TIMER_INT_0_BASE_IDX 1
+#define mmRLC_RLCV_TIMER_CTRL 0x5b26
+#define mmRLC_RLCV_TIMER_CTRL_BASE_IDX 1
+#define mmRLC_RLCV_TIMER_STAT 0x5b27
+#define mmRLC_RLCV_TIMER_STAT_BASE_IDX 1
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS 0x5b2a
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_BASE_IDX 1
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_SET 0x5b2b
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_BASE_IDX 1
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR 0x5b2c
+#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_BASE_IDX 1
+#define mmRLC_GPU_IOV_VF_MASK 0x5b2d
+#define mmRLC_GPU_IOV_VF_MASK_BASE_IDX 1
+#define mmRLC_HYP_SEMAPHORE_0 0x5b2e
+#define mmRLC_HYP_SEMAPHORE_0_BASE_IDX 1
+#define mmRLC_HYP_SEMAPHORE_1 0x5b2f
+#define mmRLC_HYP_SEMAPHORE_1_BASE_IDX 1
+#define mmRLC_CLK_CNTL 0x5b31
+#define mmRLC_CLK_CNTL_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCH_BLOCK 0x5b34
+#define mmRLC_GPU_IOV_SCH_BLOCK_BASE_IDX 1
+#define mmRLC_GPU_IOV_CFG_REG1 0x5b35
+#define mmRLC_GPU_IOV_CFG_REG1_BASE_IDX 1
+#define mmRLC_GPU_IOV_CFG_REG2 0x5b36
+#define mmRLC_GPU_IOV_CFG_REG2_BASE_IDX 1
+#define mmRLC_GPU_IOV_VM_BUSY_STATUS 0x5b37
+#define mmRLC_GPU_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCH_0 0x5b38
+#define mmRLC_GPU_IOV_SCH_0_BASE_IDX 1
+#define mmRLC_GPU_IOV_ACTIVE_FCN_ID 0x5b39
+#define mmRLC_GPU_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCH_3 0x5b3a
+#define mmRLC_GPU_IOV_SCH_3_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCH_1 0x5b3b
+#define mmRLC_GPU_IOV_SCH_1_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCH_2 0x5b3c
+#define mmRLC_GPU_IOV_SCH_2_BASE_IDX 1
+#define mmRLC_GPU_IOV_INT_STAT 0x5b3f
+#define mmRLC_GPU_IOV_INT_STAT_BASE_IDX 1
+#define mmRLC_RLCV_TIMER_INT_1 0x5b40
+#define mmRLC_RLCV_TIMER_INT_1_BASE_IDX 1
+#define mmRLC_GPU_IOV_UCODE_ADDR 0x5b42
+#define mmRLC_GPU_IOV_UCODE_ADDR_BASE_IDX 1
+#define mmRLC_GPU_IOV_UCODE_DATA 0x5b43
+#define mmRLC_GPU_IOV_UCODE_DATA_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCRATCH_ADDR 0x5b44
+#define mmRLC_GPU_IOV_SCRATCH_ADDR_BASE_IDX 1
+#define mmRLC_GPU_IOV_SCRATCH_DATA 0x5b45
+#define mmRLC_GPU_IOV_SCRATCH_DATA_BASE_IDX 1
+#define mmRLC_GPU_IOV_F32_CNTL 0x5b46
+#define mmRLC_GPU_IOV_F32_CNTL_BASE_IDX 1
+#define mmRLC_GPU_IOV_F32_RESET 0x5b47
+#define mmRLC_GPU_IOV_F32_RESET_BASE_IDX 1
+#define mmRLC_GPU_IOV_SDMA0_STATUS 0x5b48
+#define mmRLC_GPU_IOV_SDMA0_STATUS_BASE_IDX 1
+#define mmRLC_GPU_IOV_SDMA1_STATUS 0x5b49
+#define mmRLC_GPU_IOV_SDMA1_STATUS_BASE_IDX 1
+#define mmRLC_GPU_IOV_SMU_RESPONSE 0x5b4a
+#define mmRLC_GPU_IOV_SMU_RESPONSE_BASE_IDX 1
+#define mmRLC_GPU_IOV_VIRT_RESET_REQ 0x5b4c
+#define mmRLC_GPU_IOV_VIRT_RESET_REQ_BASE_IDX 1
+#define mmRLC_GPU_IOV_RLC_RESPONSE 0x5b4d
+#define mmRLC_GPU_IOV_RLC_RESPONSE_BASE_IDX 1
+#define mmRLC_GPU_IOV_INT_DISABLE 0x5b4e
+#define mmRLC_GPU_IOV_INT_DISABLE_BASE_IDX 1
+#define mmRLC_GPU_IOV_INT_FORCE 0x5b4f
+#define mmRLC_GPU_IOV_INT_FORCE_BASE_IDX 1
+#define mmRLC_GPU_IOV_SDMA0_BUSY_STATUS 0x5b50
+#define mmRLC_GPU_IOV_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define mmRLC_GPU_IOV_SDMA1_BUSY_STATUS 0x5b51
+#define mmRLC_GPU_IOV_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define mmRLC_HYP_SEMAPHORE_2 0x5b52
+#define mmRLC_HYP_SEMAPHORE_2_BASE_IDX 1
+#define mmRLC_HYP_SEMAPHORE_3 0x5b53
+#define mmRLC_HYP_SEMAPHORE_3_BASE_IDX 1
+
+
+// addressBlock: gccacind
+// base address: 0x0
+#define ixGC_CAC_CNTL 0x0000
+#define ixGC_CAC_OVR_SEL 0x0001
+#define ixGC_CAC_OVR_VAL 0x0002
+#define ixGC_CAC_WEIGHT_BCI_0 0x0003
+#define ixGC_CAC_WEIGHT_CB_0 0x0004
+#define ixGC_CAC_WEIGHT_CB_1 0x0005
+#define ixGC_CAC_WEIGHT_CP_0 0x0008
+#define ixGC_CAC_WEIGHT_CP_1 0x0009
+#define ixGC_CAC_WEIGHT_DB_0 0x000a
+#define ixGC_CAC_WEIGHT_DB_1 0x000b
+#define ixGC_CAC_WEIGHT_GDS_0 0x000e
+#define ixGC_CAC_WEIGHT_GDS_1 0x000f
+#define ixGC_CAC_WEIGHT_IA_0 0x0010
+#define ixGC_CAC_WEIGHT_LDS_0 0x0011
+#define ixGC_CAC_WEIGHT_LDS_1 0x0012
+#define ixGC_CAC_WEIGHT_PA_0 0x0013
+#define ixGC_CAC_WEIGHT_PC_0 0x0014
+#define ixGC_CAC_WEIGHT_SC_0 0x0015
+#define ixGC_CAC_WEIGHT_SPI_0 0x0016
+#define ixGC_CAC_WEIGHT_SPI_1 0x0017
+#define ixGC_CAC_WEIGHT_SPI_2 0x0018
+#define ixGC_CAC_WEIGHT_SQ_0 0x001a
+#define ixGC_CAC_WEIGHT_SQ_1 0x001b
+#define ixGC_CAC_WEIGHT_SQ_2 0x001c
+#define ixGC_CAC_WEIGHT_SQ_3 0x001d
+#define ixGC_CAC_WEIGHT_SQ_4 0x001e
+#define ixGC_CAC_WEIGHT_SX_0 0x001f
+#define ixGC_CAC_WEIGHT_SXRB_0 0x0020
+#define ixGC_CAC_WEIGHT_TA_0 0x0021
+#define ixGC_CAC_WEIGHT_TCC_0 0x0022
+#define ixGC_CAC_WEIGHT_TCC_1 0x0023
+#define ixGC_CAC_WEIGHT_TCC_2 0x0024
+#define ixGC_CAC_WEIGHT_TCP_0 0x0025
+#define ixGC_CAC_WEIGHT_TCP_1 0x0026
+#define ixGC_CAC_WEIGHT_TCP_2 0x0027
+#define ixGC_CAC_WEIGHT_TD_0 0x0028
+#define ixGC_CAC_WEIGHT_TD_1 0x0029
+#define ixGC_CAC_WEIGHT_TD_2 0x002a
+#define ixGC_CAC_WEIGHT_VGT_0 0x002b
+#define ixGC_CAC_WEIGHT_VGT_1 0x002c
+#define ixGC_CAC_WEIGHT_WD_0 0x002d
+#define ixGC_CAC_WEIGHT_CU_0 0x0032
+#define ixGC_CAC_ACC_BCI0 0x0042
+#define ixGC_CAC_ACC_CB0 0x0043
+#define ixGC_CAC_ACC_CB1 0x0044
+#define ixGC_CAC_ACC_CB2 0x0045
+#define ixGC_CAC_ACC_CB3 0x0046
+#define ixGC_CAC_ACC_CP0 0x004b
+#define ixGC_CAC_ACC_CP1 0x004c
+#define ixGC_CAC_ACC_CP2 0x004d
+#define ixGC_CAC_ACC_DB0 0x004e
+#define ixGC_CAC_ACC_DB1 0x004f
+#define ixGC_CAC_ACC_DB2 0x0050
+#define ixGC_CAC_ACC_DB3 0x0051
+#define ixGC_CAC_ACC_GDS0 0x0056
+#define ixGC_CAC_ACC_GDS1 0x0057
+#define ixGC_CAC_ACC_GDS2 0x0058
+#define ixGC_CAC_ACC_GDS3 0x0059
+#define ixGC_CAC_ACC_IA0 0x005a
+#define ixGC_CAC_ACC_LDS0 0x005b
+#define ixGC_CAC_ACC_LDS1 0x005c
+#define ixGC_CAC_ACC_LDS2 0x005d
+#define ixGC_CAC_ACC_LDS3 0x005e
+#define ixGC_CAC_ACC_PA0 0x005f
+#define ixGC_CAC_ACC_PA1 0x0060
+#define ixGC_CAC_ACC_PC0 0x0061
+#define ixGC_CAC_ACC_SC0 0x0062
+#define ixGC_CAC_ACC_SPI0 0x0063
+#define ixGC_CAC_ACC_SPI1 0x0064
+#define ixGC_CAC_ACC_SPI2 0x0065
+#define ixGC_CAC_ACC_SPI3 0x0066
+#define ixGC_CAC_ACC_SPI4 0x0067
+#define ixGC_CAC_ACC_SPI5 0x0068
+#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_0 0x006f
+#define ixGC_CAC_ACC_EA0 0x0070
+#define ixGC_CAC_ACC_EA1 0x0071
+#define ixGC_CAC_ACC_EA2 0x0072
+#define ixGC_CAC_ACC_EA3 0x0073
+#define ixGC_CAC_ACC_UTCL2_ATCL20 0x0074
+#define ixGC_CAC_OVRD_EA 0x0075
+#define ixGC_CAC_OVRD_UTCL2_ATCL2 0x0076
+#define ixGC_CAC_WEIGHT_EA_0 0x0077
+#define ixGC_CAC_WEIGHT_EA_1 0x0078
+#define ixGC_CAC_WEIGHT_RMI_0 0x0079
+#define ixGC_CAC_ACC_RMI0 0x007a
+#define ixGC_CAC_OVRD_RMI 0x007b
+#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_1 0x007c
+#define ixGC_CAC_ACC_UTCL2_ATCL21 0x007d
+#define ixGC_CAC_ACC_UTCL2_ATCL22 0x007e
+#define ixGC_CAC_ACC_UTCL2_ATCL23 0x007f
+#define ixGC_CAC_ACC_EA4 0x0080
+#define ixGC_CAC_ACC_EA5 0x0081
+#define ixGC_CAC_WEIGHT_EA_2 0x0082
+#define ixGC_CAC_ACC_SQ0_LOWER 0x0089
+#define ixGC_CAC_ACC_SQ0_UPPER 0x008a
+#define ixGC_CAC_ACC_SQ1_LOWER 0x008b
+#define ixGC_CAC_ACC_SQ1_UPPER 0x008c
+#define ixGC_CAC_ACC_SQ2_LOWER 0x008d
+#define ixGC_CAC_ACC_SQ2_UPPER 0x008e
+#define ixGC_CAC_ACC_SQ3_LOWER 0x008f
+#define ixGC_CAC_ACC_SQ3_UPPER 0x0090
+#define ixGC_CAC_ACC_SQ4_LOWER 0x0091
+#define ixGC_CAC_ACC_SQ4_UPPER 0x0092
+#define ixGC_CAC_ACC_SQ5_LOWER 0x0093
+#define ixGC_CAC_ACC_SQ5_UPPER 0x0094
+#define ixGC_CAC_ACC_SQ6_LOWER 0x0095
+#define ixGC_CAC_ACC_SQ6_UPPER 0x0096
+#define ixGC_CAC_ACC_SQ7_LOWER 0x0097
+#define ixGC_CAC_ACC_SQ7_UPPER 0x0098
+#define ixGC_CAC_ACC_SQ8_LOWER 0x0099
+#define ixGC_CAC_ACC_SQ8_UPPER 0x009a
+#define ixGC_CAC_ACC_SX0 0x009b
+#define ixGC_CAC_ACC_SXRB0 0x009c
+#define ixGC_CAC_ACC_SXRB1 0x009d
+#define ixGC_CAC_ACC_TA0 0x009e
+#define ixGC_CAC_ACC_TCC0 0x009f
+#define ixGC_CAC_ACC_TCC1 0x00a0
+#define ixGC_CAC_ACC_TCC2 0x00a1
+#define ixGC_CAC_ACC_TCC3 0x00a2
+#define ixGC_CAC_ACC_TCC4 0x00a3
+#define ixGC_CAC_ACC_TCP0 0x00a4
+#define ixGC_CAC_ACC_TCP1 0x00a5
+#define ixGC_CAC_ACC_TCP2 0x00a6
+#define ixGC_CAC_ACC_TCP3 0x00a7
+#define ixGC_CAC_ACC_TCP4 0x00a8
+#define ixGC_CAC_ACC_TD0 0x00a9
+#define ixGC_CAC_ACC_TD1 0x00aa
+#define ixGC_CAC_ACC_TD2 0x00ab
+#define ixGC_CAC_ACC_TD3 0x00ac
+#define ixGC_CAC_ACC_TD4 0x00ad
+#define ixGC_CAC_ACC_TD5 0x00ae
+#define ixGC_CAC_ACC_VGT0 0x00af
+#define ixGC_CAC_ACC_VGT1 0x00b0
+#define ixGC_CAC_ACC_VGT2 0x00b1
+#define ixGC_CAC_ACC_WD0 0x00b2
+#define ixGC_CAC_ACC_CU0 0x00ba
+#define ixGC_CAC_ACC_CU1 0x00bb
+#define ixGC_CAC_ACC_CU2 0x00bc
+#define ixGC_CAC_ACC_CU3 0x00bd
+#define ixGC_CAC_ACC_CU4 0x00be
+#define ixGC_CAC_OVRD_BCI 0x00da
+#define ixGC_CAC_OVRD_CB 0x00db
+#define ixGC_CAC_OVRD_CP 0x00dd
+#define ixGC_CAC_OVRD_DB 0x00de
+#define ixGC_CAC_OVRD_GDS 0x00e0
+#define ixGC_CAC_OVRD_IA 0x00e1
+#define ixGC_CAC_OVRD_LDS 0x00e2
+#define ixGC_CAC_OVRD_PA 0x00e3
+#define ixGC_CAC_OVRD_PC 0x00e4
+#define ixGC_CAC_OVRD_SC 0x00e5
+#define ixGC_CAC_OVRD_SPI 0x00e6
+#define ixGC_CAC_OVRD_CU 0x00e7
+#define ixGC_CAC_OVRD_SQ 0x00e8
+#define ixGC_CAC_OVRD_SX 0x00e9
+#define ixGC_CAC_OVRD_SXRB 0x00ea
+#define ixGC_CAC_OVRD_TA 0x00eb
+#define ixGC_CAC_OVRD_TCC 0x00ec
+#define ixGC_CAC_OVRD_TCP 0x00ed
+#define ixGC_CAC_OVRD_TD 0x00ee
+#define ixGC_CAC_OVRD_VGT 0x00ef
+#define ixGC_CAC_OVRD_WD 0x00f0
+#define ixGC_CAC_ACC_BCI1 0x00ff
+#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_2 0x0100
+#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_0 0x0101
+#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_1 0x0102
+#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_2 0x0103
+#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_3 0x0104
+#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_4 0x0105
+#define ixGC_CAC_WEIGHT_UTCL2_VML2_0 0x0106
+#define ixGC_CAC_WEIGHT_UTCL2_VML2_1 0x0107
+#define ixGC_CAC_WEIGHT_UTCL2_VML2_2 0x0108
+#define ixGC_CAC_ACC_UTCL2_ATCL24 0x0109
+#define ixGC_CAC_ACC_UTCL2_ROUTER0 0x010a
+#define ixGC_CAC_ACC_UTCL2_ROUTER1 0x010b
+#define ixGC_CAC_ACC_UTCL2_ROUTER2 0x010c
+#define ixGC_CAC_ACC_UTCL2_ROUTER3 0x010d
+#define ixGC_CAC_ACC_UTCL2_ROUTER4 0x010e
+#define ixGC_CAC_ACC_UTCL2_ROUTER5 0x010f
+#define ixGC_CAC_ACC_UTCL2_ROUTER6 0x0110
+#define ixGC_CAC_ACC_UTCL2_ROUTER7 0x0111
+#define ixGC_CAC_ACC_UTCL2_ROUTER8 0x0112
+#define ixGC_CAC_ACC_UTCL2_ROUTER9 0x0113
+#define ixGC_CAC_ACC_UTCL2_VML20 0x0114
+#define ixGC_CAC_ACC_UTCL2_VML21 0x0115
+#define ixGC_CAC_ACC_UTCL2_VML22 0x0116
+#define ixGC_CAC_ACC_UTCL2_VML23 0x0117
+#define ixGC_CAC_ACC_UTCL2_VML24 0x0118
+#define ixGC_CAC_OVRD_UTCL2_ROUTER 0x0119
+#define ixGC_CAC_OVRD_UTCL2_VML2 0x011a
+#define ixGC_CAC_WEIGHT_UTCL2_WALKER_0 0x011b
+#define ixGC_CAC_WEIGHT_UTCL2_WALKER_1 0x011c
+#define ixGC_CAC_WEIGHT_UTCL2_WALKER_2 0x011d
+#define ixGC_CAC_ACC_UTCL2_WALKER0 0x011e
+#define ixGC_CAC_ACC_UTCL2_WALKER1 0x011f
+#define ixGC_CAC_ACC_UTCL2_WALKER2 0x0120
+#define ixGC_CAC_ACC_UTCL2_WALKER3 0x0121
+#define ixGC_CAC_ACC_UTCL2_WALKER4 0x0122
+#define ixGC_CAC_OVRD_UTCL2_WALKER 0x0123
+#define ixPCC_STALL_PATTERN_1_2 0x0134
+#define ixPCC_STALL_PATTERN_3_4 0x0135
+#define ixPCC_STALL_PATTERN_5_6 0x0136
+#define ixPCC_STALL_PATTERN_7 0x0137
+#define ixPCC_THROT_REINCR_FIRST_PATN_1_8 0x0138
+#define ixPCC_THROT_REINCR_FIRST_PATN_9_16 0x0139
+#define ixPCC_THROT_REINCR_FIRST_PATN_17_20 0x0140
+#define ixPCC_THROT_DECR_FIRST_PATN_1_4 0x0141
+#define ixPCC_THROT_DECR_FIRST_PATN_5_7 0x0142
+
+
+// addressBlock: secacind
+// base address: 0x0
+#define ixSE_CAC_CNTL 0x0000
+#define ixSE_CAC_OVR_SEL 0x0001
+#define ixSE_CAC_OVR_VAL 0x0002
+
+
+// addressBlock: sqind
+// base address: 0x0
+#define ixSQ_WAVE_MODE 0x0011
+#define ixSQ_WAVE_STATUS 0x0012
+#define ixSQ_WAVE_TRAPSTS 0x0013
+#define ixSQ_WAVE_HW_ID 0x0014
+#define ixSQ_WAVE_GPR_ALLOC 0x0015
+#define ixSQ_WAVE_LDS_ALLOC 0x0016
+#define ixSQ_WAVE_IB_STS 0x0017
+#define ixSQ_WAVE_PC_LO 0x0018
+#define ixSQ_WAVE_PC_HI 0x0019
+#define ixSQ_WAVE_INST_DW0 0x001a
+#define ixSQ_WAVE_INST_DW1 0x001b
+#define ixSQ_WAVE_IB_DBG0 0x001c
+#define ixSQ_WAVE_IB_DBG1 0x001d
+#define ixSQ_WAVE_FLUSH_IB 0x001e
+#define ixSQ_WAVE_TTMP0 0x026c
+#define ixSQ_WAVE_TTMP1 0x026d
+#define ixSQ_WAVE_TTMP2 0x026e
+#define ixSQ_WAVE_TTMP3 0x026f
+#define ixSQ_WAVE_TTMP4 0x0270
+#define ixSQ_WAVE_TTMP5 0x0271
+#define ixSQ_WAVE_TTMP6 0x0272
+#define ixSQ_WAVE_TTMP7 0x0273
+#define ixSQ_WAVE_TTMP8 0x0274
+#define ixSQ_WAVE_TTMP9 0x0275
+#define ixSQ_WAVE_TTMP10 0x0276
+#define ixSQ_WAVE_TTMP11 0x0277
+#define ixSQ_WAVE_TTMP12 0x0278
+#define ixSQ_WAVE_TTMP13 0x0279
+#define ixSQ_WAVE_TTMP14 0x027a
+#define ixSQ_WAVE_TTMP15 0x027b
+#define ixSQ_WAVE_M0 0x027c
+#define ixSQ_WAVE_EXEC_LO 0x027e
+#define ixSQ_WAVE_EXEC_HI 0x027f
+#define ixSQ_INTERRUPT_WORD_AUTO_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_AUTO_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_AUTO_LO 0x20c0
+#define ixSQ_INTERRUPT_WORD_CMN_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_CMN_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_CTXID 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_HI 0x20c0
+#define ixSQ_INTERRUPT_WORD_WAVE_LO 0x20c0
+
+
+// addressBlock: didtind
+// base address: 0x0
+#define ixDIDT_SQ_CTRL0 0x0000
+#define ixDIDT_SQ_CTRL2 0x0002
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_SQ_STALL_AUTO_RELEASE_CTRL 0x0006
+#define ixDIDT_SQ_CTRL3 0x0007
+#define ixDIDT_SQ_STALL_PATTERN_1_2 0x0008
+#define ixDIDT_SQ_STALL_PATTERN_3_4 0x0009
+#define ixDIDT_SQ_STALL_PATTERN_5_6 0x000a
+#define ixDIDT_SQ_STALL_PATTERN_7 0x000b
+#define ixDIDT_SQ_MPD_SCALE_FACTOR 0x000c
+#define ixDIDT_SQ_THROTTLE_CNTL0 0x000d
+#define ixDIDT_SQ_THROTTLE_CNTL1 0x000e
+#define ixDIDT_SQ_THROTTLE_CNTL_STATUS 0x000f
+#define ixDIDT_SQ_WEIGHT0_3 0x0010
+#define ixDIDT_SQ_WEIGHT4_7 0x0011
+#define ixDIDT_SQ_WEIGHT8_11 0x0012
+#define ixDIDT_SQ_EDC_CTRL 0x0013
+#define ixDIDT_SQ_THROTTLE_CTRL 0x0014
+#define ixDIDT_SQ_EDC_STALL_PATTERN_1_2 0x0015
+#define ixDIDT_SQ_EDC_STALL_PATTERN_3_4 0x0016
+#define ixDIDT_SQ_EDC_STALL_PATTERN_5_6 0x0017
+#define ixDIDT_SQ_EDC_STALL_PATTERN_7 0x0018
+#define ixDIDT_SQ_EDC_STALL_DELAY_1 0x001a
+#define ixDIDT_SQ_EDC_STALL_DELAY_2 0x001b
+#define ixDIDT_DB_CTRL0 0x0020
+#define ixDIDT_DB_CTRL2 0x0022
+#define ixDIDT_DB_STALL_CTRL 0x0024
+#define ixDIDT_DB_TUNING_CTRL 0x0025
+#define ixDIDT_DB_STALL_AUTO_RELEASE_CTRL 0x0026
+#define ixDIDT_DB_CTRL3 0x0027
+#define ixDIDT_DB_STALL_PATTERN_1_2 0x0028
+#define ixDIDT_DB_STALL_PATTERN_3_4 0x0029
+#define ixDIDT_DB_STALL_PATTERN_5_6 0x002a
+#define ixDIDT_DB_STALL_PATTERN_7 0x002b
+#define ixDIDT_DB_MPD_SCALE_FACTOR 0x002c
+#define ixDIDT_DB_THROTTLE_CNTL0 0x002d
+#define ixDIDT_DB_THROTTLE_CNTL1 0x002e
+#define ixDIDT_DB_THROTTLE_CNTL_STATUS 0x002f
+#define ixDIDT_DB_WEIGHT0_3 0x0030
+#define ixDIDT_DB_WEIGHT4_7 0x0031
+#define ixDIDT_DB_WEIGHT8_11 0x0032
+#define ixDIDT_DB_EDC_CTRL 0x0033
+#define ixDIDT_DB_THROTTLE_CTRL 0x0034
+#define ixDIDT_DB_EDC_STALL_PATTERN_1_2 0x0035
+#define ixDIDT_DB_EDC_STALL_PATTERN_3_4 0x0036
+#define ixDIDT_DB_EDC_STALL_PATTERN_5_6 0x0037
+#define ixDIDT_DB_EDC_STALL_PATTERN_7 0x0038
+#define ixDIDT_DB_EDC_STALL_DELAY_1 0x003a
+#define ixDIDT_TD_CTRL0 0x0040
+#define ixDIDT_TD_CTRL2 0x0042
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TD_STALL_AUTO_RELEASE_CTRL 0x0046
+#define ixDIDT_TD_CTRL3 0x0047
+#define ixDIDT_TD_STALL_PATTERN_1_2 0x0048
+#define ixDIDT_TD_STALL_PATTERN_3_4 0x0049
+#define ixDIDT_TD_STALL_PATTERN_5_6 0x004a
+#define ixDIDT_TD_STALL_PATTERN_7 0x004b
+#define ixDIDT_TD_MPD_SCALE_FACTOR 0x004c
+#define ixDIDT_TD_THROTTLE_CNTL0 0x004d
+#define ixDIDT_TD_THROTTLE_CNTL1 0x004e
+#define ixDIDT_TD_THROTTLE_CNTL_STATUS 0x004f
+#define ixDIDT_TD_WEIGHT0_3 0x0050
+#define ixDIDT_TD_WEIGHT4_7 0x0051
+#define ixDIDT_TD_WEIGHT8_11 0x0052
+#define ixDIDT_TD_EDC_CTRL 0x0053
+#define ixDIDT_TD_THROTTLE_CTRL 0x0054
+#define ixDIDT_TD_EDC_STALL_PATTERN_1_2 0x0055
+#define ixDIDT_TD_EDC_STALL_PATTERN_3_4 0x0056
+#define ixDIDT_TD_EDC_STALL_PATTERN_5_6 0x0057
+#define ixDIDT_TD_EDC_STALL_PATTERN_7 0x0058
+#define ixDIDT_TD_EDC_STALL_DELAY_1 0x005a
+#define ixDIDT_TD_EDC_STALL_DELAY_2 0x005b
+#define ixDIDT_TCP_CTRL0 0x0060
+#define ixDIDT_TCP_CTRL2 0x0062
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+#define ixDIDT_TCP_STALL_AUTO_RELEASE_CTRL 0x0066
+#define ixDIDT_TCP_CTRL3 0x0067
+#define ixDIDT_TCP_STALL_PATTERN_1_2 0x0068
+#define ixDIDT_TCP_STALL_PATTERN_3_4 0x0069
+#define ixDIDT_TCP_STALL_PATTERN_5_6 0x006a
+#define ixDIDT_TCP_STALL_PATTERN_7 0x006b
+#define ixDIDT_TCP_MPD_SCALE_FACTOR 0x006c
+#define ixDIDT_TCP_THROTTLE_CNTL0 0x006d
+#define ixDIDT_TCP_THROTTLE_CNTL1 0x006e
+#define ixDIDT_TCP_THROTTLE_CNTL_STATUS 0x006f
+#define ixDIDT_TCP_WEIGHT0_3 0x0070
+#define ixDIDT_TCP_WEIGHT4_7 0x0071
+#define ixDIDT_TCP_WEIGHT8_11 0x0072
+#define ixDIDT_TCP_EDC_CTRL 0x0073
+#define ixDIDT_TCP_THROTTLE_CTRL 0x0074
+#define ixDIDT_TCP_EDC_STALL_PATTERN_1_2 0x0075
+#define ixDIDT_TCP_EDC_STALL_PATTERN_3_4 0x0076
+#define ixDIDT_TCP_EDC_STALL_PATTERN_5_6 0x0077
+#define ixDIDT_TCP_EDC_STALL_PATTERN_7 0x0078
+#define ixDIDT_TCP_EDC_STALL_DELAY_1 0x007a
+#define ixDIDT_TCP_EDC_STALL_DELAY_2 0x007b
+#define ixDIDT_SQ_STALL_EVENT_COUNTER 0x00a0
+#define ixDIDT_DB_STALL_EVENT_COUNTER 0x00a1
+#define ixDIDT_TD_STALL_EVENT_COUNTER 0x00a2
+#define ixDIDT_TCP_STALL_EVENT_COUNTER 0x00a3
+#define ixDIDT_DBR_STALL_EVENT_COUNTER 0x00a4
+#define ixDIDT_SQ_CTRL1 0x00b0
+#define ixDIDT_SQ_EDC_THRESHOLD 0x00b1
+#define ixDIDT_DB_CTRL1 0x00b2
+#define ixDIDT_DB_EDC_THRESHOLD 0x00b3
+#define ixDIDT_TD_CTRL1 0x00b4
+#define ixDIDT_TD_EDC_THRESHOLD 0x00b5
+#define ixDIDT_TCP_CTRL1 0x00b6
+#define ixDIDT_TCP_EDC_THRESHOLD 0x00b7
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h
new file mode 100644
index 0000000..6626fc2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h
@@ -0,0 +1,31160 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _gc_9_2_1_SH_MASK_HEADER
+#define _gc_9_2_1_SH_MASK_HEADER
+
+
+// addressBlock: gc_grbmdec
+//GRBM_CNTL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
+#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
+#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
+//GRBM_SKEW_CNTL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
+//GRBM_STATUS2
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING__SHIFT 0xa
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING__SHIFT 0xb
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING__SHIFT 0xc
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING__SHIFT 0xd
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
+#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
+#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
+#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
+#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
+#define GRBM_STATUS2__CPF_RQ_PENDING__SHIFT 0x13
+#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x18
+#define GRBM_STATUS2__TC_BUSY__SHIFT 0x19
+#define GRBM_STATUS2__TCC_CC_RESIDENT__SHIFT 0x1a
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
+#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__ME2PIPE0_RQ_PENDING_MASK 0x00000400L
+#define GRBM_STATUS2__ME2PIPE1_RQ_PENDING_MASK 0x00000800L
+#define GRBM_STATUS2__ME2PIPE2_RQ_PENDING_MASK 0x00001000L
+#define GRBM_STATUS2__ME2PIPE3_RQ_PENDING_MASK 0x00002000L
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
+#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
+#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
+#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
+#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
+#define GRBM_STATUS2__CPF_RQ_PENDING_MASK 0x00080000L
+#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS2__TC_BUSY_MASK 0x02000000L
+#define GRBM_STATUS2__TCC_CC_RESIDENT_MASK 0x04000000L
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
+//GRBM_PWR_CNTL
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
+#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
+#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
+#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
+#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
+//GRBM_STATUS
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
+#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
+#define GRBM_STATUS__WD_BUSY_NO_DMA__SHIFT 0x10
+#define GRBM_STATUS__VGT_BUSY__SHIFT 0x11
+#define GRBM_STATUS__IA_BUSY_NO_DMA__SHIFT 0x12
+#define GRBM_STATUS__IA_BUSY__SHIFT 0x13
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
+#define GRBM_STATUS__WD_BUSY__SHIFT 0x15
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__WD_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__VGT_BUSY_MASK 0x00020000L
+#define GRBM_STATUS__IA_BUSY_NO_DMA_MASK 0x00040000L
+#define GRBM_STATUS__IA_BUSY_MASK 0x00080000L
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__WD_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+//GRBM_STATUS_SE0
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE0__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE1
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE1__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+//GRBM_SOFT_RESET
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
+#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
+#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
+//GRBM_GFX_CLKEN_CNTL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
+//GRBM_WAIT_IDLE_CLOCKS
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
+//GRBM_STATUS_SE2
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE2__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE3
+#define GRBM_STATUS_SE3__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE3__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE3__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE3__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE3__VGT_BUSY__SHIFT 0x17
+#define GRBM_STATUS_SE3__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE3__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE3__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE3__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE3__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE3__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE3__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE3__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE3__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE3__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE3__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE3__VGT_BUSY_MASK 0x00800000L
+#define GRBM_STATUS_SE3__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE3__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE3__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE3__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE3__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE3__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE3__CB_BUSY_MASK 0x80000000L
+//GRBM_READ_ERROR
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x0003FFFCL
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+//GRBM_READ_ERROR2
+#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
+#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+//GRBM_INT_CNTL
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+//GRBM_TRAP_OP
+#define GRBM_TRAP_OP__RW__SHIFT 0x0
+#define GRBM_TRAP_OP__RW_MASK 0x00000001L
+//GRBM_TRAP_ADDR
+#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_ADDR_MSK
+#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_WD
+#define GRBM_TRAP_WD__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
+//GRBM_TRAP_WD_MSK
+#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
+//GRBM_DSM_BYPASS
+#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
+#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
+#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
+#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
+//GRBM_WRITE_ERROR
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
+#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
+#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x5
+#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
+#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
+#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
+#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
+#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
+#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000001CL
+#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x000001E0L
+#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
+#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
+#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
+#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
+#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
+//GRBM_IOV_ERROR
+#define GRBM_IOV_ERROR__IOV_ADDR__SHIFT 0x2
+#define GRBM_IOV_ERROR__IOV_VFID__SHIFT 0x14
+#define GRBM_IOV_ERROR__IOV_VF__SHIFT 0x1a
+#define GRBM_IOV_ERROR__IOV_OP__SHIFT 0x1b
+#define GRBM_IOV_ERROR__IOV_ERROR__SHIFT 0x1f
+#define GRBM_IOV_ERROR__IOV_ADDR_MASK 0x000FFFFCL
+#define GRBM_IOV_ERROR__IOV_VFID_MASK 0x03F00000L
+#define GRBM_IOV_ERROR__IOV_VF_MASK 0x04000000L
+#define GRBM_IOV_ERROR__IOV_OP_MASK 0x08000000L
+#define GRBM_IOV_ERROR__IOV_ERROR_MASK 0x80000000L
+//GRBM_CHIP_REVISION
+#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
+#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
+//GRBM_GFX_CNTL
+#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
+//GRBM_RSMU_CFG
+#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
+#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
+#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
+#define GRBM_RSMU_CFG__DEBUG_MASK__SHIFT 0x11
+#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
+#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
+#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
+#define GRBM_RSMU_CFG__DEBUG_MASK_MASK 0x00020000L
+//GRBM_IH_CREDIT
+#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//GRBM_PWR_CNTL2
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
+//GRBM_UTCL2_INVAL_RANGE_START
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
+//GRBM_UTCL2_INVAL_RANGE_END
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
+//GRBM_RSMU_READ_ERROR
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
+//GRBM_CHICKEN_BITS
+#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ__SHIFT 0x0
+#define GRBM_CHICKEN_BITS__DISABLE_CP_VMID_RESET_REQ_MASK 0x00000001L
+//GRBM_FENCE_RANGE0
+#define GRBM_FENCE_RANGE0__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE0__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE0__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE0__END_MASK 0xFFFF0000L
+//GRBM_FENCE_RANGE1
+#define GRBM_FENCE_RANGE1__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE1__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE1__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE1__END_MASK 0xFFFF0000L
+//GRBM_NOWHERE
+#define GRBM_NOWHERE__DATA__SHIFT 0x0
+#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG1
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG2
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG3
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG4
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG5
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG6
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG7
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cpdec
+//CP_CPC_STATUS
+#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
+#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
+#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
+#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
+#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
+#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
+#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
+#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
+#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
+#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
+#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
+#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
+#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
+#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
+#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
+#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
+#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
+#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
+#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
+#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
+#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
+#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
+#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
+#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
+#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
+#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
+#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
+#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
+//CP_CPC_BUSY_STAT
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY__SHIFT 0x1
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY__SHIFT 0x11
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPOHRE_BUSY_MASK 0x00000002L
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPOHRE_BUSY_MASK 0x00020000L
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
+//CP_CPC_STALLED_STAT1
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
+//CP_CPF_STATUS
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
+#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
+#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
+#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
+#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
+#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
+#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
+#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
+#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
+#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
+#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
+#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
+#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
+#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
+#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
+#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
+#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
+#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
+#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
+#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
+#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
+#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
+#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
+#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
+#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
+//CP_CPF_BUSY_STAT
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS__SHIFT 0x9
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
+#define CP_CPF_BUSY_STAT__OUTSTANDING_READ_TAGS_MASK 0x00000200L
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
+//CP_CPF_STALLED_STAT1
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
+//CP_CPC_GRBM_FREE_COUNT
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+//CP_MEC_CNTL
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
+#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
+#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
+#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
+#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
+#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
+#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
+#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
+#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
+//CP_MEC_ME1_HEADER_DUMP
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_HEADER_DUMP
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_CPC_SCRATCH_INDEX
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+//CP_CPC_SCRATCH_DATA
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_CPF_GRBM_FREE_COUNT
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
+//CP_CPC_HALT_HYST_COUNT
+#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
+#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
+//CP_CE_COMPARE_COUNT
+#define CP_CE_COMPARE_COUNT__COMPARE_COUNT__SHIFT 0x0
+#define CP_CE_COMPARE_COUNT__COMPARE_COUNT_MASK 0xFFFFFFFFL
+//CP_CE_DE_COUNT
+#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
+#define CP_CE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_CE_COUNT
+#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT__SHIFT 0x0
+#define CP_DE_CE_COUNT__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_LAST_INVAL_COUNT
+#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT__SHIFT 0x0
+#define CP_DE_LAST_INVAL_COUNT__LAST_INVAL_COUNT_MASK 0xFFFFFFFFL
+//CP_DE_DE_COUNT
+#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT__SHIFT 0x0
+#define CP_DE_DE_COUNT__DRAW_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_STALLED_STAT3
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
+//CP_STALLED_STAT1
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV__SHIFT 0x4
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_MASK 0x00000010L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
+//CP_STALLED_STAT2
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE__SHIFT 0x15
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM__SHIFT 0x16
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_SC_EOP_DONE_MASK 0x00200000L
+#define CP_STALLED_STAT2__EOPD_FIFO_NEEDS_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+//CP_BUSY_STAT
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+//CP_STAT
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
+#define CP_STAT__DC_BUSY__SHIFT 0xd
+#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
+#define CP_STAT__PFP_BUSY__SHIFT 0xf
+#define CP_STAT__MEQ_BUSY__SHIFT 0x10
+#define CP_STAT__ME_BUSY__SHIFT 0x11
+#define CP_STAT__QUERY_BUSY__SHIFT 0x12
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
+#define CP_STAT__DMA_BUSY__SHIFT 0x16
+#define CP_STAT__RCIU_BUSY__SHIFT 0x17
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
+#define CP_STAT__CE_BUSY__SHIFT 0x1a
+#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
+#define CP_STAT__CP_BUSY__SHIFT 0x1f
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+//CP_ME_HEADER_DUMP
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_HEADER_DUMP
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_GRBM_FREE_COUNT
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
+//CP_CE_HEADER_DUMP
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP__SHIFT 0x0
+#define CP_CE_HEADER_DUMP__CE_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_INSTR_PNTR
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_ME_INSTR_PNTR
+#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CE_INSTR_PNTR
+#define CP_CE_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_CE_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC1_INSTR_PNTR
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC2_INSTR_PNTR
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CSF_STAT
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
+//CP_ME_CNTL
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
+#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
+#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
+#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
+#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
+#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
+#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
+#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
+#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
+#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
+#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
+#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+//CP_CNTX_STAT
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+//CP_ME_PREEMPTION
+#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
+#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
+//CP_ROQ_THRESHOLDS
+#define CP_ROQ_THRESHOLDS__IB1_START__SHIFT 0x0
+#define CP_ROQ_THRESHOLDS__IB2_START__SHIFT 0x8
+#define CP_ROQ_THRESHOLDS__IB1_START_MASK 0x000000FFL
+#define CP_ROQ_THRESHOLDS__IB2_START_MASK 0x0000FF00L
+//CP_MEQ_STQ_THRESHOLD
+#define CP_MEQ_STQ_THRESHOLD__STQ_START__SHIFT 0x0
+#define CP_MEQ_STQ_THRESHOLD__STQ_START_MASK 0x000000FFL
+//CP_RB2_RPTR
+#define CP_RB2_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB2_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB1_RPTR
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB0_RPTR
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_RPTR
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_WPTR_DELAY
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
+//CP_RB_WPTR_POLL_CNTL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//CP_ROQ1_THRESHOLDS
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
+#define CP_ROQ1_THRESHOLDS__RB2_START__SHIFT 0x8
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0x10
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x18
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000000FFL
+#define CP_ROQ1_THRESHOLDS__RB2_START_MASK 0x0000FF00L
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x00FF0000L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0xFF000000L
+//CP_ROQ2_THRESHOLDS
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START__SHIFT 0x0
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x8
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0x10
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START__SHIFT 0x18
+#define CP_ROQ2_THRESHOLDS__R2_IB1_START_MASK 0x000000FFL
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x0000FF00L
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x00FF0000L
+#define CP_ROQ2_THRESHOLDS__R2_IB2_START_MASK 0xFF000000L
+//CP_STQ_THRESHOLDS
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
+//CP_QUEUE_THRESHOLDS
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT 0x0
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT 0x8
+#define CP_QUEUE_THRESHOLDS__ROQ_IB1_START_MASK 0x0000003FL
+#define CP_QUEUE_THRESHOLDS__ROQ_IB2_START_MASK 0x00003F00L
+//CP_MEQ_THRESHOLDS
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
+//CP_ROQ_AVAIL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x000007FFL
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x07FF0000L
+//CP_STQ_AVAIL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
+//CP_ROQ2_AVAIL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x000007FFL
+//CP_MEQ_AVAIL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
+//CP_CMD_INDEX
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
+//CP_CMD_DATA
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
+#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
+//CP_ROQ_RB_STAT
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x000003FFL
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x03FF0000L
+//CP_ROQ_IB1_STAT
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x000003FFL
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x03FF0000L
+//CP_ROQ_IB2_STAT
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x000003FFL
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x03FF0000L
+//CP_STQ_STAT
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
+//CP_STQ_WR_STAT
+#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
+#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
+//CP_MEQ_STAT
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
+//CP_CEQ1_AVAIL
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING__SHIFT 0x0
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1__SHIFT 0x10
+#define CP_CEQ1_AVAIL__CEQ_CNT_RING_MASK 0x000007FFL
+#define CP_CEQ1_AVAIL__CEQ_CNT_IB1_MASK 0x07FF0000L
+//CP_CEQ2_AVAIL
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2__SHIFT 0x0
+#define CP_CEQ2_AVAIL__CEQ_CNT_IB2_MASK 0x000007FFL
+//CP_CE_ROQ_RB_STAT
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_CE_ROQ_RB_STAT__CEQ_RPTR_PRIMARY_MASK 0x000003FFL
+#define CP_CE_ROQ_RB_STAT__CEQ_WPTR_PRIMARY_MASK 0x03FF0000L
+//CP_CE_ROQ_IB1_STAT
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_CE_ROQ_IB1_STAT__CEQ_RPTR_INDIRECT1_MASK 0x000003FFL
+#define CP_CE_ROQ_IB1_STAT__CEQ_WPTR_INDIRECT1_MASK 0x03FF0000L
+//CP_CE_ROQ_IB2_STAT
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_CE_ROQ_IB2_STAT__CEQ_RPTR_INDIRECT2_MASK 0x000003FFL
+#define CP_CE_ROQ_IB2_STAT__CEQ_WPTR_INDIRECT2_MASK 0x03FF0000L
+
+
+// addressBlock: gc_padec
+//VGT_VTX_VECT_EJECT_REG
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT__SHIFT 0x0
+#define VGT_VTX_VECT_EJECT_REG__PRIM_COUNT_MASK 0x0000007FL
+//VGT_DMA_DATA_FIFO_DEPTH
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH__SHIFT 0x9
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000001FFL
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA2DRAW_FIFO_DEPTH_MASK 0x0007FE00L
+//VGT_DMA_REQ_FIFO_DEPTH
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_DRAW_INIT_FIFO_DEPTH
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_LAST_COPY_STATE
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define VGT_LAST_COPY_STATE__DST_STATE_ID__SHIFT 0x10
+#define VGT_LAST_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+#define VGT_LAST_COPY_STATE__DST_STATE_ID_MASK 0x00070000L
+//VGT_CACHE_INVALIDATION
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT 0x0
+#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT__SHIFT 0x4
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER__SHIFT 0x5
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT 0x6
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE__SHIFT 0x9
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD__SHIFT 0xb
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN__SHIFT 0xc
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH__SHIFT 0xd
+#define VGT_CACHE_INVALIDATION__ES_LIMIT__SHIFT 0x10
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG__SHIFT 0x15
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1__SHIFT 0x16
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2__SHIFT 0x19
+#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE__SHIFT 0x1c
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI__SHIFT 0x1d
+#define VGT_CACHE_INVALIDATION__CACHE_INVALIDATION_MASK 0x00000003L
+#define VGT_CACHE_INVALIDATION__DIS_INSTANCING_OPT_MASK 0x00000010L
+#define VGT_CACHE_INVALIDATION__VS_NO_EXTRA_BUFFER_MASK 0x00000020L
+#define VGT_CACHE_INVALIDATION__AUTO_INVLD_EN_MASK 0x000000C0L
+#define VGT_CACHE_INVALIDATION__USE_GS_DONE_MASK 0x00000200L
+#define VGT_CACHE_INVALIDATION__DIS_RANGE_FULL_INVLD_MASK 0x00000800L
+#define VGT_CACHE_INVALIDATION__GS_LATE_ALLOC_EN_MASK 0x00001000L
+#define VGT_CACHE_INVALIDATION__STREAMOUT_FULL_FLUSH_MASK 0x00002000L
+#define VGT_CACHE_INVALIDATION__ES_LIMIT_MASK 0x001F0000L
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_MASK 0x00200000L
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_1_MASK 0x01C00000L
+#define VGT_CACHE_INVALIDATION__OPT_FLOW_CNTL_2_MASK 0x0E000000L
+#define VGT_CACHE_INVALIDATION__EN_WAVE_MERGE_MASK 0x10000000L
+#define VGT_CACHE_INVALIDATION__ENABLE_PING_PONG_EOI_MASK 0x20000000L
+//VGT_STRMOUT_DELAY
+#define VGT_STRMOUT_DELAY__SKIP_DELAY__SHIFT 0x0
+#define VGT_STRMOUT_DELAY__SE0_WD_DELAY__SHIFT 0x8
+#define VGT_STRMOUT_DELAY__SE1_WD_DELAY__SHIFT 0xb
+#define VGT_STRMOUT_DELAY__SE2_WD_DELAY__SHIFT 0xe
+#define VGT_STRMOUT_DELAY__SE3_WD_DELAY__SHIFT 0x11
+#define VGT_STRMOUT_DELAY__SKIP_DELAY_MASK 0x000000FFL
+#define VGT_STRMOUT_DELAY__SE0_WD_DELAY_MASK 0x00000700L
+#define VGT_STRMOUT_DELAY__SE1_WD_DELAY_MASK 0x00003800L
+#define VGT_STRMOUT_DELAY__SE2_WD_DELAY_MASK 0x0001C000L
+#define VGT_STRMOUT_DELAY__SE3_WD_DELAY_MASK 0x000E0000L
+//VGT_FIFO_DEPTHS
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH__SHIFT 0x0
+#define VGT_FIFO_DEPTHS__RESERVED_0__SHIFT 0x7
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH__SHIFT 0x8
+#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH__SHIFT 0x16
+#define VGT_FIFO_DEPTHS__VS_DEALLOC_TBL_DEPTH_MASK 0x0000007FL
+#define VGT_FIFO_DEPTHS__RESERVED_0_MASK 0x00000080L
+#define VGT_FIFO_DEPTHS__CLIPP_FIFO_DEPTH_MASK 0x003FFF00L
+#define VGT_FIFO_DEPTHS__HSINPUT_FIFO_DEPTH_MASK 0x0FC00000L
+//VGT_GS_VERTEX_REUSE
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE__SHIFT 0x0
+#define VGT_GS_VERTEX_REUSE__VERT_REUSE_MASK 0x0000001FL
+//VGT_MC_LAT_CNTL
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
+//IA_CNTL_STATUS
+#define IA_CNTL_STATUS__IA_BUSY__SHIFT 0x0
+#define IA_CNTL_STATUS__IA_DMA_BUSY__SHIFT 0x1
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY__SHIFT 0x2
+#define IA_CNTL_STATUS__IA_GRP_BUSY__SHIFT 0x3
+#define IA_CNTL_STATUS__IA_ADC_BUSY__SHIFT 0x4
+#define IA_CNTL_STATUS__IA_BUSY_MASK 0x00000001L
+#define IA_CNTL_STATUS__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_CNTL_STATUS__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_CNTL_STATUS__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_CNTL_STATUS__IA_ADC_BUSY_MASK 0x00000010L
+//VGT_CNTL_STATUS
+#define VGT_CNTL_STATUS__VGT_BUSY__SHIFT 0x0
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY__SHIFT 0x1
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY__SHIFT 0x2
+#define VGT_CNTL_STATUS__VGT_PT_BUSY__SHIFT 0x3
+#define VGT_CNTL_STATUS__VGT_TE_BUSY__SHIFT 0x4
+#define VGT_CNTL_STATUS__VGT_VR_BUSY__SHIFT 0x5
+#define VGT_CNTL_STATUS__VGT_PI_BUSY__SHIFT 0x6
+#define VGT_CNTL_STATUS__VGT_GS_BUSY__SHIFT 0x7
+#define VGT_CNTL_STATUS__VGT_HS_BUSY__SHIFT 0x8
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY__SHIFT 0x9
+#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY__SHIFT 0xa
+#define VGT_CNTL_STATUS__VGT_BUSY_MASK 0x00000001L
+#define VGT_CNTL_STATUS__VGT_OUT_INDX_BUSY_MASK 0x00000002L
+#define VGT_CNTL_STATUS__VGT_OUT_BUSY_MASK 0x00000004L
+#define VGT_CNTL_STATUS__VGT_PT_BUSY_MASK 0x00000008L
+#define VGT_CNTL_STATUS__VGT_TE_BUSY_MASK 0x00000010L
+#define VGT_CNTL_STATUS__VGT_VR_BUSY_MASK 0x00000020L
+#define VGT_CNTL_STATUS__VGT_PI_BUSY_MASK 0x00000040L
+#define VGT_CNTL_STATUS__VGT_GS_BUSY_MASK 0x00000080L
+#define VGT_CNTL_STATUS__VGT_HS_BUSY_MASK 0x00000100L
+#define VGT_CNTL_STATUS__VGT_TE11_BUSY_MASK 0x00000200L
+#define VGT_CNTL_STATUS__VGT_PRIMGEN_BUSY_MASK 0x00000400L
+//WD_CNTL_STATUS
+#define WD_CNTL_STATUS__WD_BUSY__SHIFT 0x0
+#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY__SHIFT 0x1
+#define WD_CNTL_STATUS__WD_SPL_DI_BUSY__SHIFT 0x2
+#define WD_CNTL_STATUS__WD_ADC_BUSY__SHIFT 0x3
+#define WD_CNTL_STATUS__WD_BUSY_MASK 0x00000001L
+#define WD_CNTL_STATUS__WD_SPL_DMA_BUSY_MASK 0x00000002L
+#define WD_CNTL_STATUS__WD_SPL_DI_BUSY_MASK 0x00000004L
+#define WD_CNTL_STATUS__WD_ADC_BUSY_MASK 0x00000008L
+//CC_GC_PRIM_CONFIG
+#define CC_GC_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
+#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
+#define CC_GC_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
+#define CC_GC_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
+//GC_USER_PRIM_CONFIG
+#define GC_USER_PRIM_CONFIG__INACTIVE_IA__SHIFT 0x10
+#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA__SHIFT 0x18
+#define GC_USER_PRIM_CONFIG__INACTIVE_IA_MASK 0x00030000L
+#define GC_USER_PRIM_CONFIG__INACTIVE_VGT_PA_MASK 0x0F000000L
+//WD_QOS
+#define WD_QOS__DRAW_STALL__SHIFT 0x0
+#define WD_QOS__DRAW_STALL_MASK 0x00000001L
+//WD_UTCL1_CNTL
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define WD_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+//WD_UTCL1_STATUS
+#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//IA_UTCL1_CNTL
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define IA_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+//IA_UTCL1_STATUS
+#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//VGT_SYS_CONFIG
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+//VGT_VS_MAX_WAVE_ID
+#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_VS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//VGT_GS_MAX_WAVE_ID
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GFX_PIPE_CONTROL
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
+#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
+#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
+//CC_GC_SHADER_ARRAY_CONFIG
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
+//GC_USER_SHADER_ARRAY_CONFIG
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT 0x10
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK 0xFFFF0000L
+//VGT_DMA_PRIMITIVE_TYPE
+#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_DMA_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_DMA_CONTROL
+#define VGT_DMA_CONTROL__PRIMGROUP_SIZE__SHIFT 0x0
+#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP__SHIFT 0x11
+#define VGT_DMA_CONTROL__SWITCH_ON_EOI__SHIFT 0x13
+#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP__SHIFT 0x14
+#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC__SHIFT 0x15
+#define VGT_DMA_CONTROL__EN_INST_OPT_ADV__SHIFT 0x16
+#define VGT_DMA_CONTROL__HW_USE_ONLY__SHIFT 0x17
+#define VGT_DMA_CONTROL__PRIMGROUP_SIZE_MASK 0x0000FFFFL
+#define VGT_DMA_CONTROL__IA_SWITCH_ON_EOP_MASK 0x00020000L
+#define VGT_DMA_CONTROL__SWITCH_ON_EOI_MASK 0x00080000L
+#define VGT_DMA_CONTROL__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define VGT_DMA_CONTROL__EN_INST_OPT_BASIC_MASK 0x00200000L
+#define VGT_DMA_CONTROL__EN_INST_OPT_ADV_MASK 0x00400000L
+#define VGT_DMA_CONTROL__HW_USE_ONLY_MASK 0x00800000L
+//VGT_DMA_LS_HS_CONFIG
+#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_DMA_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+//WD_BUF_RESOURCE_1
+#define WD_BUF_RESOURCE_1__POS_BUF_SIZE__SHIFT 0x0
+#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE__SHIFT 0x10
+#define WD_BUF_RESOURCE_1__POS_BUF_SIZE_MASK 0x0000FFFFL
+#define WD_BUF_RESOURCE_1__INDEX_BUF_SIZE_MASK 0xFFFF0000L
+//WD_BUF_RESOURCE_2
+#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE__SHIFT 0x0
+#define WD_BUF_RESOURCE_2__ADDR_MODE__SHIFT 0xf
+#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE__SHIFT 0x10
+#define WD_BUF_RESOURCE_2__PARAM_BUF_SIZE_MASK 0x00001FFFL
+#define WD_BUF_RESOURCE_2__ADDR_MODE_MASK 0x00008000L
+#define WD_BUF_RESOURCE_2__CNTL_SB_BUF_SIZE_MASK 0xFFFF0000L
+//PA_CL_CNTL_STATUS
+#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED__SHIFT 0x0
+#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED__SHIFT 0x1
+#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED__SHIFT 0x2
+#define PA_CL_CNTL_STATUS__UTC_FAULT_DETECTED_MASK 0x00000001L
+#define PA_CL_CNTL_STATUS__UTC_RETRY_DETECTED_MASK 0x00000002L
+#define PA_CL_CNTL_STATUS__UTC_PRT_DETECTED_MASK 0x00000004L
+//PA_CL_ENHANCE
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE__SHIFT 0x11
+#define PA_CL_ENHANCE__OUTPUT_SWITCH_TO_LEGACY_EVENT__SHIFT 0x12
+#define PA_CL_ENHANCE__NO_SWITCH_TO_LEGACY_AFTER_VMID_RESET__SHIFT 0x13
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE__SHIFT 0x14
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE__SHIFT 0x15
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE_MASK 0x00020000L
+#define PA_CL_ENHANCE__OUTPUT_SWITCH_TO_LEGACY_EVENT_MASK 0x00040000L
+#define PA_CL_ENHANCE__NO_SWITCH_TO_LEGACY_AFTER_VMID_RESET_MASK 0x00080000L
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE_MASK 0x00100000L
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE_MASK 0x00200000L
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+//PA_CL_RESET_DEBUG
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x00000001L
+//PA_SU_CNTL_STATUS
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+//PA_SC_FIFO_DEPTH_CNTL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
+//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_TRAP_SCREEN_HV_LOCK
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_FORCE_EOV_MAX_CNTS
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
+//PA_SC_BINNER_EVENT_CNTL_0
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_1
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_1__ZPASS_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_GS_OUTPUT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_2
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_CONTEXT_DONE_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_SX_TS_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_3
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_3__SX_CB_RAT_ACK_REQUEST_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_LEGACY_PIPELINE_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_63_MASK 0xC0000000L
+//PA_SC_BINNER_TIMEOUT_COUNTER
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_BINNER_PERF_CNTL_0
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
+//PA_SC_BINNER_PERF_CNTL_1
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
+//PA_SC_BINNER_PERF_CNTL_2
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
+//PA_SC_BINNER_PERF_CNTL_3
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_ENHANCE_2
+#define PA_SC_ENHANCE_2__RESERVED_0__SHIFT 0x0
+#define PA_SC_ENHANCE_2__RESERVED_1__SHIFT 0x1
+#define PA_SC_ENHANCE_2__RESERVED_2__SHIFT 0x2
+#define PA_SC_ENHANCE_2__RESERVED_3__SHIFT 0x3
+#define PA_SC_ENHANCE_2__RESERVED_4__SHIFT 0x4
+#define PA_SC_ENHANCE_2__RESERVED_5__SHIFT 0x5
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_COMPOUND_INDEX_EN__SHIFT 0x6
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PIPELINE_PRIMID__SHIFT 0x7
+#define PA_SC_ENHANCE_2__RSVD__SHIFT 0x8
+#define PA_SC_ENHANCE_2__RESERVED_0_MASK 0x00000001L
+#define PA_SC_ENHANCE_2__RESERVED_1_MASK 0x00000002L
+#define PA_SC_ENHANCE_2__RESERVED_2_MASK 0x00000004L
+#define PA_SC_ENHANCE_2__RESERVED_3_MASK 0x00000008L
+#define PA_SC_ENHANCE_2__RESERVED_4_MASK 0x00000010L
+#define PA_SC_ENHANCE_2__RESERVED_5_MASK 0x00000020L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_COMPOUND_INDEX_EN_MASK 0x00000040L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PIPELINE_PRIMID_MASK 0x00000080L
+#define PA_SC_ENHANCE_2__RSVD_MASK 0xFFFFFF00L
+//PA_SC_FIFO_SIZE
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
+//PA_SC_IF_FIFO_SIZE
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
+//PA_SC_PKR_WAVE_TABLE_CNTL
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
+//PA_UTCL1_CNTL1
+#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
+#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define PA_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define PA_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define PA_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define PA_UTCL1_CNTL1__SPARE__SHIFT 0x10
+#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define PA_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define PA_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID__SHIFT 0x19
+#define PA_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define PA_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define PA_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define PA_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
+#define PA_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define PA_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define PA_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define PA_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define PA_UTCL1_CNTL1__SPARE_MASK 0x00010000L
+#define PA_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define PA_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define PA_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define PA_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define PA_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define PA_UTCL1_CNTL1__INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define PA_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define PA_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define PA_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define PA_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//PA_UTCL1_CNTL2
+#define PA_UTCL1_CNTL2__SPARE1__SHIFT 0x0
+#define PA_UTCL1_CNTL2__SPARE2__SHIFT 0x8
+#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define PA_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define PA_UTCL1_CNTL2__SPARE3__SHIFT 0xb
+#define PA_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT__SHIFT 0xd
+#define PA_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define PA_UTCL1_CNTL2__SPARE4__SHIFT 0x10
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define PA_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define PA_UTCL1_CNTL2__SPARE5__SHIFT 0x19
+#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define PA_UTCL1_CNTL2__RESERVED__SHIFT 0x1b
+#define PA_UTCL1_CNTL2__SPARE1_MASK 0x000000FFL
+#define PA_UTCL1_CNTL2__SPARE2_MASK 0x00000100L
+#define PA_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define PA_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define PA_UTCL1_CNTL2__SPARE3_MASK 0x00000800L
+#define PA_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define PA_UTCL1_CNTL2__ENABLE_SHOOTDOWN_OPT_MASK 0x00002000L
+#define PA_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define PA_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define PA_UTCL1_CNTL2__SPARE4_MASK 0x00030000L
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define PA_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define PA_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define PA_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define PA_UTCL1_CNTL2__SPARE5_MASK 0x02000000L
+#define PA_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define PA_UTCL1_CNTL2__RESERVED_MASK 0xF8000000L
+//PA_SIDEBAND_REQUEST_DELAYS
+#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY__SHIFT 0x0
+#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY__SHIFT 0x10
+#define PA_SIDEBAND_REQUEST_DELAYS__RETRY_DELAY_MASK 0x0000FFFFL
+#define PA_SIDEBAND_REQUEST_DELAYS__INITIAL_DELAY_MASK 0xFFFF0000L
+//PA_SC_ENHANCE
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
+//PA_SC_ENHANCE_1
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
+#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
+#define PA_SC_ENHANCE_1__ECO_SPARE0__SHIFT 0x5
+#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
+#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
+#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_XY_UNPACK__SHIFT 0xc
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xd
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
+#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION__SHIFT 0xf
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
+#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING__SHIFT 0x11
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG__SHIFT 0x17
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER__SHIFT 0x19
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1a
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE__SHIFT 0x1b
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX__SHIFT 0x1c
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1__SHIFT 0x1d
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI__SHIFT 0x1e
+#define PA_SC_ENHANCE_1__RSVD__SHIFT 0x1f
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
+#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
+#define PA_SC_ENHANCE_1__ECO_SPARE0_MASK 0x00000020L
+#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
+#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
+#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_XY_UNPACK_MASK 0x00001000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00002000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PIPELINE_RESET_LEGACY_MODE_TRANSITION_MASK 0x00008000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE_1__ALLOW_SCALE_LINE_WIDTH_PAD_WITH_BINNING_MASK 0x00020000L
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG_MASK 0x00800000L
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER_MASK 0x02000000L
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION_MASK 0x04000000L
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE_MASK 0x08000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_MASK 0x10000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1_MASK 0x20000000L
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI_MASK 0x40000000L
+#define PA_SC_ENHANCE_1__RSVD_MASK 0x80000000L
+//PA_SC_DSM_CNTL
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
+//PA_SC_TILE_STEERING_CREST_OVERRIDE
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
+
+
+// addressBlock: gc_sqdec
+//SQ_CONFIG
+#define SQ_CONFIG__UNUSED__SHIFT 0x0
+#define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7
+#define SQ_CONFIG__DEBUG_EN__SHIFT 0x8
+#define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9
+#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE__SHIFT 0xa
+#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY__SHIFT 0xb
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE__SHIFT 0xc
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE__SHIFT 0xd
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE__SHIFT 0xe
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE__SHIFT 0xf
+#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE__SHIFT 0x10
+#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE__SHIFT 0x11
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS__SHIFT 0x12
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS__SHIFT 0x13
+#define SQ_CONFIG__REPLAY_SLEEP_CNT__SHIFT 0x15
+#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP__SHIFT 0x1c
+#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d
+#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e
+#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f
+#define SQ_CONFIG__UNUSED_MASK 0x0000007FL
+#define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L
+#define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L
+#define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L
+#define SQ_CONFIG__DEBUG_ONE_INST_CLAUSE_MASK 0x00000400L
+#define SQ_CONFIG__OVERRIDE_LDS_IDX_BUSY_MASK 0x00000800L
+#define SQ_CONFIG__EARLY_TA_DONE_DISABLE_MASK 0x00001000L
+#define SQ_CONFIG__DUA_FLAT_LOCK_ENABLE_MASK 0x00002000L
+#define SQ_CONFIG__DUA_LDS_BYPASS_DISABLE_MASK 0x00004000L
+#define SQ_CONFIG__DUA_FLAT_LDS_PINGPONG_DISABLE_MASK 0x00008000L
+#define SQ_CONFIG__DISABLE_VMEM_SOFT_CLAUSE_MASK 0x00010000L
+#define SQ_CONFIG__DISABLE_SMEM_SOFT_CLAUSE_MASK 0x00020000L
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_VS_MASK 0x00040000L
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_VS_MASK 0x00180000L
+#define SQ_CONFIG__REPLAY_SLEEP_CNT_MASK 0x0FE00000L
+#define SQ_CONFIG__DISABLE_SP_VGPR_WRITE_SKIP_MASK 0x10000000L
+#define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING_MASK 0x20000000L
+#define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE_MASK 0x40000000L
+#define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE_MASK 0x80000000L
+//SQC_CONFIG
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
+#define SQC_CONFIG__IDENTITY_HASH_BANK__SHIFT 0x9
+#define SQC_CONFIG__IDENTITY_HASH_SET__SHIFT 0xa
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0xb
+#define SQC_CONFIG__EVICT_LRU__SHIFT 0xc
+#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xe
+#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xf
+#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0x10
+#define SQC_CONFIG__INST_PRF_COUNT__SHIFT 0x18
+#define SQC_CONFIG__INST_PRF_FILTER_DIS__SHIFT 0x1a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__IDENTITY_HASH_BANK_MASK 0x00000200L
+#define SQC_CONFIG__IDENTITY_HASH_SET_MASK 0x00000400L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000800L
+#define SQC_CONFIG__EVICT_LRU_MASK 0x00003000L
+#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00004000L
+#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00008000L
+#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x00FF0000L
+#define SQC_CONFIG__INST_PRF_COUNT_MASK 0x03000000L
+#define SQC_CONFIG__INST_PRF_FILTER_DIS_MASK 0x04000000L
+//LDS_CONFIG
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
+#define LDS_CONFIG__DISABLE_RAM_CLOCK_GATING__SHIFT 0x2
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
+#define LDS_CONFIG__DISABLE_RAM_CLOCK_GATING_MASK 0x00000004L
+//SQ_RANDOM_WAVE_PRI
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x007FFC00L
+//SQ_REG_CREDITS
+#define SQ_REG_CREDITS__SRBM_CREDITS__SHIFT 0x0
+#define SQ_REG_CREDITS__CMD_CREDITS__SHIFT 0x8
+#define SQ_REG_CREDITS__REG_BUSY__SHIFT 0x1c
+#define SQ_REG_CREDITS__SRBM_OVERFLOW__SHIFT 0x1d
+#define SQ_REG_CREDITS__IMMED_OVERFLOW__SHIFT 0x1e
+#define SQ_REG_CREDITS__CMD_OVERFLOW__SHIFT 0x1f
+#define SQ_REG_CREDITS__SRBM_CREDITS_MASK 0x0000003FL
+#define SQ_REG_CREDITS__CMD_CREDITS_MASK 0x00000F00L
+#define SQ_REG_CREDITS__REG_BUSY_MASK 0x10000000L
+#define SQ_REG_CREDITS__SRBM_OVERFLOW_MASK 0x20000000L
+#define SQ_REG_CREDITS__IMMED_OVERFLOW_MASK 0x40000000L
+#define SQ_REG_CREDITS__CMD_OVERFLOW_MASK 0x80000000L
+//SQ_FIFO_SIZES
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE__SHIFT 0x10
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000F00L
+#define SQ_FIFO_SIZES__EXPORT_BUF_SIZE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
+//SQ_DSM_CNTL
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQ_DSM_CNTL2
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
+//SQ_RUNTIME_CONFIG
+#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST__SHIFT 0x0
+#define SQ_RUNTIME_CONFIG__ENABLE_TEX_ARB_OLDEST_MASK 0x00000001L
+//SH_MEM_BASES
+#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
+#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
+#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
+#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
+//SH_MEM_CONFIG
+#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
+#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x3
+#define SH_MEM_CONFIG__RETRY_DISABLE__SHIFT 0xc
+#define SH_MEM_CONFIG__PRIVATE_NV__SHIFT 0xd
+#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
+#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x00000018L
+#define SH_MEM_CONFIG__RETRY_DISABLE_MASK 0x00001000L
+#define SH_MEM_CONFIG__PRIVATE_NV_MASK 0x00002000L
+//CC_GC_SHADER_RATE_CONFIG
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
+#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define CC_GC_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define CC_GC_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
+//GC_USER_SHADER_RATE_CONFIG
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE__SHIFT 0x3
+#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS__SHIFT 0x4
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+#define GC_USER_SHADER_RATE_CONFIG__SQC_BALANCE_DISABLE_MASK 0x00000008L
+#define GC_USER_SHADER_RATE_CONFIG__HALF_LDS_MASK 0x00000010L
+//SQ_INTERRUPT_AUTO_MASK
+#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
+#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
+//SQ_INTERRUPT_MSG_CTRL
+#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
+#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
+//SQ_UTCL1_CNTL1
+#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQ_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQ_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQ_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL__SHIFT 0x19
+#define SQ_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQ_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQ_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQ_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQ_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQ_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQ_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQ_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define SQ_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQ_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQ_UTCL1_CNTL1__REG_INVALIDATE_ALL_MASK 0x02000000L
+#define SQ_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQ_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQ_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQ_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQ_UTCL1_CNTL2
+#define SQ_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQ_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQ_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQ_UTCL1_CNTL2__RETRY_TIMER__SHIFT 0x10
+#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQ_UTCL1_CNTL2__PREFETCH_PAGE__SHIFT 0x1c
+#define SQ_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQ_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQ_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQ_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQ_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQ_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQ_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQ_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQ_UTCL1_CNTL2__RETRY_TIMER_MASK 0x007F0000L
+#define SQ_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define SQ_UTCL1_CNTL2__PREFETCH_PAGE_MASK 0xF0000000L
+//SQ_UTCL1_STATUS
+#define SQ_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQ_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQ_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQ_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define SQ_UTCL1_STATUS__UNUSED__SHIFT 0x10
+#define SQ_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQ_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQ_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define SQ_UTCL1_STATUS__RESERVED_MASK 0x0000FFF8L
+#define SQ_UTCL1_STATUS__UNUSED_MASK 0xFFFF0000L
+//SQ_SHADER_TBA_LO
+#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TBA_HI
+#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
+//SQ_SHADER_TMA_LO
+#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TMA_HI
+#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
+//SQC_DSM_CNTL
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTL__INST_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTL__DATA_CU0_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTL__DATA_CU0_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTL__DATA_CU1_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTL__DATA_CU1_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTL__DATA_CU2_WRITE_DATA_BUF_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTL__DATA_CU2_UTCL1_LFIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//SQC_DSM_CNTLA
+#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQC_DSM_CNTLA__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTLA__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTLA__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTLA__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTLA__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTLA__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTLA__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTLA__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTLA__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define SQC_DSM_CNTLA__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define SQC_DSM_CNTLA__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQC_DSM_CNTLB
+#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQC_DSM_CNTLB__INST_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SQC_DSM_CNTLB__INST_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define SQC_DSM_CNTLB__INST_UTCL1_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define SQC_DSM_CNTLB__INST_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define SQC_DSM_CNTLB__INST_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define SQC_DSM_CNTLB__INST_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define SQC_DSM_CNTLB__DATA_TAG_RAM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define SQC_DSM_CNTLB__DATA_HIT_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define SQC_DSM_CNTLB__DATA_MISS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define SQC_DSM_CNTLB__DATA_DIRTY_BIT_RAM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define SQC_DSM_CNTLB__DATA_BANK_RAM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQC_DSM_CNTL2
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2__INST_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2__DATA_CU0_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2__DATA_CU0_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2__DATA_CU1_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2__DATA_CU1_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2__DATA_CU2_WRITE_DATA_BUF_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2__DATA_CU2_UTCL1_LFIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//SQC_DSM_CNTL2A
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2A__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2A__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2A__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2A__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2A__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2A__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2A__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define SQC_DSM_CNTL2A__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define SQC_DSM_CNTL2A__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//SQC_DSM_CNTL2B
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY__SHIFT 0x11
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x14
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQC_DSM_CNTL2B__INST_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQC_DSM_CNTL2B__INST_UTCL1_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQC_DSM_CNTL2B__INST_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQC_DSM_CNTL2B__INST_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define SQC_DSM_CNTL2B__DATA_TAG_RAM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define SQC_DSM_CNTL2B__DATA_HIT_FIFO_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define SQC_DSM_CNTL2B__DATA_MISS_FIFO_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define SQC_DSM_CNTL2B__DATA_DIRTY_BIT_RAM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define SQC_DSM_CNTL2B__DATA_BANK_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//SQ_REG_TIMESTAMP
+#define SQ_REG_TIMESTAMP__TIMESTAMP__SHIFT 0x0
+#define SQ_REG_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
+//SQ_CMD_TIMESTAMP
+#define SQ_CMD_TIMESTAMP__TIMESTAMP__SHIFT 0x0
+#define SQ_CMD_TIMESTAMP__TIMESTAMP_MASK 0x000000FFL
+//SQ_IND_INDEX
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
+#define SQ_IND_INDEX__SIMD_ID__SHIFT 0x4
+#define SQ_IND_INDEX__THREAD_ID__SHIFT 0x6
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xc
+#define SQ_IND_INDEX__FORCE_READ__SHIFT 0xd
+#define SQ_IND_INDEX__READ_TIMEOUT__SHIFT 0xe
+#define SQ_IND_INDEX__UNINDEXED__SHIFT 0xf
+#define SQ_IND_INDEX__INDEX__SHIFT 0x10
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000000FL
+#define SQ_IND_INDEX__SIMD_ID_MASK 0x00000030L
+#define SQ_IND_INDEX__THREAD_ID_MASK 0x00000FC0L
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00001000L
+#define SQ_IND_INDEX__FORCE_READ_MASK 0x00002000L
+#define SQ_IND_INDEX__READ_TIMEOUT_MASK 0x00004000L
+#define SQ_IND_INDEX__UNINDEXED_MASK 0x00008000L
+#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
+//SQ_IND_DATA
+#define SQ_IND_DATA__DATA__SHIFT 0x0
+#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//SQ_CMD
+#define SQ_CMD__CMD__SHIFT 0x0
+#define SQ_CMD__MODE__SHIFT 0x4
+#define SQ_CMD__CHECK_VMID__SHIFT 0x7
+#define SQ_CMD__DATA__SHIFT 0x8
+#define SQ_CMD__WAVE_ID__SHIFT 0x10
+#define SQ_CMD__SIMD_ID__SHIFT 0x14
+#define SQ_CMD__QUEUE_ID__SHIFT 0x18
+#define SQ_CMD__VM_ID__SHIFT 0x1c
+#define SQ_CMD__CMD_MASK 0x00000007L
+#define SQ_CMD__MODE_MASK 0x00000070L
+#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
+#define SQ_CMD__DATA_MASK 0x00000F00L
+#define SQ_CMD__WAVE_ID_MASK 0x000F0000L
+#define SQ_CMD__SIMD_ID_MASK 0x00300000L
+#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
+#define SQ_CMD__VM_ID_MASK 0xF0000000L
+//SQ_TIME_HI
+#define SQ_TIME_HI__TIME__SHIFT 0x0
+#define SQ_TIME_HI__TIME_MASK 0xFFFFFFFFL
+//SQ_TIME_LO
+#define SQ_TIME_LO__TIME__SHIFT 0x0
+#define SQ_TIME_LO__TIME_MASK 0xFFFFFFFFL
+//SQ_DS_0
+#define SQ_DS_0__OFFSET0__SHIFT 0x0
+#define SQ_DS_0__OFFSET1__SHIFT 0x8
+#define SQ_DS_0__GDS__SHIFT 0x10
+#define SQ_DS_0__OP__SHIFT 0x11
+#define SQ_DS_0__ENCODING__SHIFT 0x1a
+#define SQ_DS_0__OFFSET0_MASK 0x000000FFL
+#define SQ_DS_0__OFFSET1_MASK 0x0000FF00L
+#define SQ_DS_0__GDS_MASK 0x00010000L
+#define SQ_DS_0__OP_MASK 0x01FE0000L
+#define SQ_DS_0__ENCODING_MASK 0xFC000000L
+//SQ_DS_1
+#define SQ_DS_1__ADDR__SHIFT 0x0
+#define SQ_DS_1__DATA0__SHIFT 0x8
+#define SQ_DS_1__DATA1__SHIFT 0x10
+#define SQ_DS_1__VDST__SHIFT 0x18
+#define SQ_DS_1__ADDR_MASK 0x000000FFL
+#define SQ_DS_1__DATA0_MASK 0x0000FF00L
+#define SQ_DS_1__DATA1_MASK 0x00FF0000L
+#define SQ_DS_1__VDST_MASK 0xFF000000L
+//SQ_EXP_0
+#define SQ_EXP_0__EN__SHIFT 0x0
+#define SQ_EXP_0__TGT__SHIFT 0x4
+#define SQ_EXP_0__COMPR__SHIFT 0xa
+#define SQ_EXP_0__DONE__SHIFT 0xb
+#define SQ_EXP_0__VM__SHIFT 0xc
+#define SQ_EXP_0__ENCODING__SHIFT 0x1a
+#define SQ_EXP_0__EN_MASK 0x0000000FL
+#define SQ_EXP_0__TGT_MASK 0x000003F0L
+#define SQ_EXP_0__COMPR_MASK 0x00000400L
+#define SQ_EXP_0__DONE_MASK 0x00000800L
+#define SQ_EXP_0__VM_MASK 0x00001000L
+#define SQ_EXP_0__ENCODING_MASK 0xFC000000L
+//SQ_EXP_1
+#define SQ_EXP_1__VSRC0__SHIFT 0x0
+#define SQ_EXP_1__VSRC1__SHIFT 0x8
+#define SQ_EXP_1__VSRC2__SHIFT 0x10
+#define SQ_EXP_1__VSRC3__SHIFT 0x18
+#define SQ_EXP_1__VSRC0_MASK 0x000000FFL
+#define SQ_EXP_1__VSRC1_MASK 0x0000FF00L
+#define SQ_EXP_1__VSRC2_MASK 0x00FF0000L
+#define SQ_EXP_1__VSRC3_MASK 0xFF000000L
+//SQ_FLAT_0
+#define SQ_FLAT_0__OFFSET__SHIFT 0x0
+#define SQ_FLAT_0__LDS__SHIFT 0xd
+#define SQ_FLAT_0__SEG__SHIFT 0xe
+#define SQ_FLAT_0__GLC__SHIFT 0x10
+#define SQ_FLAT_0__SLC__SHIFT 0x11
+#define SQ_FLAT_0__OP__SHIFT 0x12
+#define SQ_FLAT_0__ENCODING__SHIFT 0x1a
+#define SQ_FLAT_0__OFFSET_MASK 0x00000FFFL
+#define SQ_FLAT_0__LDS_MASK 0x00002000L
+#define SQ_FLAT_0__SEG_MASK 0x0000C000L
+#define SQ_FLAT_0__GLC_MASK 0x00010000L
+#define SQ_FLAT_0__SLC_MASK 0x00020000L
+#define SQ_FLAT_0__OP_MASK 0x01FC0000L
+#define SQ_FLAT_0__ENCODING_MASK 0xFC000000L
+//SQ_FLAT_1
+#define SQ_FLAT_1__ADDR__SHIFT 0x0
+#define SQ_FLAT_1__DATA__SHIFT 0x8
+#define SQ_FLAT_1__SADDR__SHIFT 0x10
+#define SQ_FLAT_1__NV__SHIFT 0x17
+#define SQ_FLAT_1__VDST__SHIFT 0x18
+#define SQ_FLAT_1__ADDR_MASK 0x000000FFL
+#define SQ_FLAT_1__DATA_MASK 0x0000FF00L
+#define SQ_FLAT_1__SADDR_MASK 0x007F0000L
+#define SQ_FLAT_1__NV_MASK 0x00800000L
+#define SQ_FLAT_1__VDST_MASK 0xFF000000L
+//SQ_GLBL_0
+#define SQ_GLBL_0__OFFSET__SHIFT 0x0
+#define SQ_GLBL_0__LDS__SHIFT 0xd
+#define SQ_GLBL_0__SEG__SHIFT 0xe
+#define SQ_GLBL_0__GLC__SHIFT 0x10
+#define SQ_GLBL_0__SLC__SHIFT 0x11
+#define SQ_GLBL_0__OP__SHIFT 0x12
+#define SQ_GLBL_0__ENCODING__SHIFT 0x1a
+#define SQ_GLBL_0__OFFSET_MASK 0x00001FFFL
+#define SQ_GLBL_0__LDS_MASK 0x00002000L
+#define SQ_GLBL_0__SEG_MASK 0x0000C000L
+#define SQ_GLBL_0__GLC_MASK 0x00010000L
+#define SQ_GLBL_0__SLC_MASK 0x00020000L
+#define SQ_GLBL_0__OP_MASK 0x01FC0000L
+#define SQ_GLBL_0__ENCODING_MASK 0xFC000000L
+//SQ_GLBL_1
+#define SQ_GLBL_1__ADDR__SHIFT 0x0
+#define SQ_GLBL_1__DATA__SHIFT 0x8
+#define SQ_GLBL_1__SADDR__SHIFT 0x10
+#define SQ_GLBL_1__NV__SHIFT 0x17
+#define SQ_GLBL_1__VDST__SHIFT 0x18
+#define SQ_GLBL_1__ADDR_MASK 0x000000FFL
+#define SQ_GLBL_1__DATA_MASK 0x0000FF00L
+#define SQ_GLBL_1__SADDR_MASK 0x007F0000L
+#define SQ_GLBL_1__NV_MASK 0x00800000L
+#define SQ_GLBL_1__VDST_MASK 0xFF000000L
+//SQ_INST
+#define SQ_INST__ENCODING__SHIFT 0x0
+#define SQ_INST__ENCODING_MASK 0xFFFFFFFFL
+//SQ_MIMG_0
+#define SQ_MIMG_0__OPM__SHIFT 0x0
+#define SQ_MIMG_0__DMASK__SHIFT 0x8
+#define SQ_MIMG_0__UNORM__SHIFT 0xc
+#define SQ_MIMG_0__GLC__SHIFT 0xd
+#define SQ_MIMG_0__DA__SHIFT 0xe
+#define SQ_MIMG_0__A16__SHIFT 0xf
+#define SQ_MIMG_0__TFE__SHIFT 0x10
+#define SQ_MIMG_0__LWE__SHIFT 0x11
+#define SQ_MIMG_0__OP__SHIFT 0x12
+#define SQ_MIMG_0__SLC__SHIFT 0x19
+#define SQ_MIMG_0__ENCODING__SHIFT 0x1a
+#define SQ_MIMG_0__OPM_MASK 0x00000001L
+#define SQ_MIMG_0__DMASK_MASK 0x00000F00L
+#define SQ_MIMG_0__UNORM_MASK 0x00001000L
+#define SQ_MIMG_0__GLC_MASK 0x00002000L
+#define SQ_MIMG_0__DA_MASK 0x00004000L
+#define SQ_MIMG_0__A16_MASK 0x00008000L
+#define SQ_MIMG_0__TFE_MASK 0x00010000L
+#define SQ_MIMG_0__LWE_MASK 0x00020000L
+#define SQ_MIMG_0__OP_MASK 0x01FC0000L
+#define SQ_MIMG_0__SLC_MASK 0x02000000L
+#define SQ_MIMG_0__ENCODING_MASK 0xFC000000L
+//SQ_MIMG_1
+#define SQ_MIMG_1__VADDR__SHIFT 0x0
+#define SQ_MIMG_1__VDATA__SHIFT 0x8
+#define SQ_MIMG_1__SRSRC__SHIFT 0x10
+#define SQ_MIMG_1__SSAMP__SHIFT 0x15
+#define SQ_MIMG_1__D16__SHIFT 0x1f
+#define SQ_MIMG_1__VADDR_MASK 0x000000FFL
+#define SQ_MIMG_1__VDATA_MASK 0x0000FF00L
+#define SQ_MIMG_1__SRSRC_MASK 0x001F0000L
+#define SQ_MIMG_1__SSAMP_MASK 0x03E00000L
+#define SQ_MIMG_1__D16_MASK 0x80000000L
+//SQ_MTBUF_0
+#define SQ_MTBUF_0__OFFSET__SHIFT 0x0
+#define SQ_MTBUF_0__OFFEN__SHIFT 0xc
+#define SQ_MTBUF_0__IDXEN__SHIFT 0xd
+#define SQ_MTBUF_0__GLC__SHIFT 0xe
+#define SQ_MTBUF_0__OP__SHIFT 0xf
+#define SQ_MTBUF_0__DFMT__SHIFT 0x13
+#define SQ_MTBUF_0__NFMT__SHIFT 0x17
+#define SQ_MTBUF_0__ENCODING__SHIFT 0x1a
+#define SQ_MTBUF_0__OFFSET_MASK 0x00000FFFL
+#define SQ_MTBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MTBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MTBUF_0__GLC_MASK 0x00004000L
+#define SQ_MTBUF_0__OP_MASK 0x00078000L
+#define SQ_MTBUF_0__DFMT_MASK 0x00780000L
+#define SQ_MTBUF_0__NFMT_MASK 0x03800000L
+#define SQ_MTBUF_0__ENCODING_MASK 0xFC000000L
+//SQ_MTBUF_1
+#define SQ_MTBUF_1__VADDR__SHIFT 0x0
+#define SQ_MTBUF_1__VDATA__SHIFT 0x8
+#define SQ_MTBUF_1__SRSRC__SHIFT 0x10
+#define SQ_MTBUF_1__SLC__SHIFT 0x16
+#define SQ_MTBUF_1__TFE__SHIFT 0x17
+#define SQ_MTBUF_1__SOFFSET__SHIFT 0x18
+#define SQ_MTBUF_1__VADDR_MASK 0x000000FFL
+#define SQ_MTBUF_1__VDATA_MASK 0x0000FF00L
+#define SQ_MTBUF_1__SRSRC_MASK 0x001F0000L
+#define SQ_MTBUF_1__SLC_MASK 0x00400000L
+#define SQ_MTBUF_1__TFE_MASK 0x00800000L
+#define SQ_MTBUF_1__SOFFSET_MASK 0xFF000000L
+//SQ_MUBUF_0
+#define SQ_MUBUF_0__OFFSET__SHIFT 0x0
+#define SQ_MUBUF_0__OFFEN__SHIFT 0xc
+#define SQ_MUBUF_0__IDXEN__SHIFT 0xd
+#define SQ_MUBUF_0__GLC__SHIFT 0xe
+#define SQ_MUBUF_0__LDS__SHIFT 0x10
+#define SQ_MUBUF_0__SLC__SHIFT 0x11
+#define SQ_MUBUF_0__OP__SHIFT 0x12
+#define SQ_MUBUF_0__ENCODING__SHIFT 0x1a
+#define SQ_MUBUF_0__OFFSET_MASK 0x00000FFFL
+#define SQ_MUBUF_0__OFFEN_MASK 0x00001000L
+#define SQ_MUBUF_0__IDXEN_MASK 0x00002000L
+#define SQ_MUBUF_0__GLC_MASK 0x00004000L
+#define SQ_MUBUF_0__LDS_MASK 0x00010000L
+#define SQ_MUBUF_0__SLC_MASK 0x00020000L
+#define SQ_MUBUF_0__OP_MASK 0x01FC0000L
+#define SQ_MUBUF_0__ENCODING_MASK 0xFC000000L
+//SQ_MUBUF_1
+#define SQ_MUBUF_1__VADDR__SHIFT 0x0
+#define SQ_MUBUF_1__VDATA__SHIFT 0x8
+#define SQ_MUBUF_1__SRSRC__SHIFT 0x10
+#define SQ_MUBUF_1__TFE__SHIFT 0x17
+#define SQ_MUBUF_1__SOFFSET__SHIFT 0x18
+#define SQ_MUBUF_1__VADDR_MASK 0x000000FFL
+#define SQ_MUBUF_1__VDATA_MASK 0x0000FF00L
+#define SQ_MUBUF_1__SRSRC_MASK 0x001F0000L
+#define SQ_MUBUF_1__TFE_MASK 0x00800000L
+#define SQ_MUBUF_1__SOFFSET_MASK 0xFF000000L
+//SQ_SCRATCH_0
+#define SQ_SCRATCH_0__OFFSET__SHIFT 0x0
+#define SQ_SCRATCH_0__LDS__SHIFT 0xd
+#define SQ_SCRATCH_0__SEG__SHIFT 0xe
+#define SQ_SCRATCH_0__GLC__SHIFT 0x10
+#define SQ_SCRATCH_0__SLC__SHIFT 0x11
+#define SQ_SCRATCH_0__OP__SHIFT 0x12
+#define SQ_SCRATCH_0__ENCODING__SHIFT 0x1a
+#define SQ_SCRATCH_0__OFFSET_MASK 0x00001FFFL
+#define SQ_SCRATCH_0__LDS_MASK 0x00002000L
+#define SQ_SCRATCH_0__SEG_MASK 0x0000C000L
+#define SQ_SCRATCH_0__GLC_MASK 0x00010000L
+#define SQ_SCRATCH_0__SLC_MASK 0x00020000L
+#define SQ_SCRATCH_0__OP_MASK 0x01FC0000L
+#define SQ_SCRATCH_0__ENCODING_MASK 0xFC000000L
+//SQ_SCRATCH_1
+#define SQ_SCRATCH_1__ADDR__SHIFT 0x0
+#define SQ_SCRATCH_1__DATA__SHIFT 0x8
+#define SQ_SCRATCH_1__SADDR__SHIFT 0x10
+#define SQ_SCRATCH_1__NV__SHIFT 0x17
+#define SQ_SCRATCH_1__VDST__SHIFT 0x18
+#define SQ_SCRATCH_1__ADDR_MASK 0x000000FFL
+#define SQ_SCRATCH_1__DATA_MASK 0x0000FF00L
+#define SQ_SCRATCH_1__SADDR_MASK 0x007F0000L
+#define SQ_SCRATCH_1__NV_MASK 0x00800000L
+#define SQ_SCRATCH_1__VDST_MASK 0xFF000000L
+//SQ_SMEM_0
+#define SQ_SMEM_0__SBASE__SHIFT 0x0
+#define SQ_SMEM_0__SDATA__SHIFT 0x6
+#define SQ_SMEM_0__SOFFSET_EN__SHIFT 0xe
+#define SQ_SMEM_0__NV__SHIFT 0xf
+#define SQ_SMEM_0__GLC__SHIFT 0x10
+#define SQ_SMEM_0__IMM__SHIFT 0x11
+#define SQ_SMEM_0__OP__SHIFT 0x12
+#define SQ_SMEM_0__ENCODING__SHIFT 0x1a
+#define SQ_SMEM_0__SBASE_MASK 0x0000003FL
+#define SQ_SMEM_0__SDATA_MASK 0x00001FC0L
+#define SQ_SMEM_0__SOFFSET_EN_MASK 0x00004000L
+#define SQ_SMEM_0__NV_MASK 0x00008000L
+#define SQ_SMEM_0__GLC_MASK 0x00010000L
+#define SQ_SMEM_0__IMM_MASK 0x00020000L
+#define SQ_SMEM_0__OP_MASK 0x03FC0000L
+#define SQ_SMEM_0__ENCODING_MASK 0xFC000000L
+//SQ_SMEM_1
+#define SQ_SMEM_1__OFFSET__SHIFT 0x0
+#define SQ_SMEM_1__SOFFSET__SHIFT 0x19
+#define SQ_SMEM_1__OFFSET_MASK 0x001FFFFFL
+#define SQ_SMEM_1__SOFFSET_MASK 0xFE000000L
+//SQ_SOP1
+#define SQ_SOP1__SSRC0__SHIFT 0x0
+#define SQ_SOP1__OP__SHIFT 0x8
+#define SQ_SOP1__SDST__SHIFT 0x10
+#define SQ_SOP1__ENCODING__SHIFT 0x17
+#define SQ_SOP1__SSRC0_MASK 0x000000FFL
+#define SQ_SOP1__OP_MASK 0x0000FF00L
+#define SQ_SOP1__SDST_MASK 0x007F0000L
+#define SQ_SOP1__ENCODING_MASK 0xFF800000L
+//SQ_SOP2
+#define SQ_SOP2__SSRC0__SHIFT 0x0
+#define SQ_SOP2__SSRC1__SHIFT 0x8
+#define SQ_SOP2__SDST__SHIFT 0x10
+#define SQ_SOP2__OP__SHIFT 0x17
+#define SQ_SOP2__ENCODING__SHIFT 0x1e
+#define SQ_SOP2__SSRC0_MASK 0x000000FFL
+#define SQ_SOP2__SSRC1_MASK 0x0000FF00L
+#define SQ_SOP2__SDST_MASK 0x007F0000L
+#define SQ_SOP2__OP_MASK 0x3F800000L
+#define SQ_SOP2__ENCODING_MASK 0xC0000000L
+//SQ_SOPC
+#define SQ_SOPC__SSRC0__SHIFT 0x0
+#define SQ_SOPC__SSRC1__SHIFT 0x8
+#define SQ_SOPC__OP__SHIFT 0x10
+#define SQ_SOPC__ENCODING__SHIFT 0x17
+#define SQ_SOPC__SSRC0_MASK 0x000000FFL
+#define SQ_SOPC__SSRC1_MASK 0x0000FF00L
+#define SQ_SOPC__OP_MASK 0x007F0000L
+#define SQ_SOPC__ENCODING_MASK 0xFF800000L
+//SQ_SOPK
+#define SQ_SOPK__SIMM16__SHIFT 0x0
+#define SQ_SOPK__SDST__SHIFT 0x10
+#define SQ_SOPK__OP__SHIFT 0x17
+#define SQ_SOPK__ENCODING__SHIFT 0x1c
+#define SQ_SOPK__SIMM16_MASK 0x0000FFFFL
+#define SQ_SOPK__SDST_MASK 0x007F0000L
+#define SQ_SOPK__OP_MASK 0x0F800000L
+#define SQ_SOPK__ENCODING_MASK 0xF0000000L
+//SQ_SOPP
+#define SQ_SOPP__SIMM16__SHIFT 0x0
+#define SQ_SOPP__OP__SHIFT 0x10
+#define SQ_SOPP__ENCODING__SHIFT 0x17
+#define SQ_SOPP__SIMM16_MASK 0x0000FFFFL
+#define SQ_SOPP__OP_MASK 0x007F0000L
+#define SQ_SOPP__ENCODING_MASK 0xFF800000L
+//SQ_VINTRP
+#define SQ_VINTRP__VSRC__SHIFT 0x0
+#define SQ_VINTRP__ATTRCHAN__SHIFT 0x8
+#define SQ_VINTRP__ATTR__SHIFT 0xa
+#define SQ_VINTRP__OP__SHIFT 0x10
+#define SQ_VINTRP__VDST__SHIFT 0x12
+#define SQ_VINTRP__ENCODING__SHIFT 0x1a
+#define SQ_VINTRP__VSRC_MASK 0x000000FFL
+#define SQ_VINTRP__ATTRCHAN_MASK 0x00000300L
+#define SQ_VINTRP__ATTR_MASK 0x0000FC00L
+#define SQ_VINTRP__OP_MASK 0x00030000L
+#define SQ_VINTRP__VDST_MASK 0x03FC0000L
+#define SQ_VINTRP__ENCODING_MASK 0xFC000000L
+//SQ_VOP1
+#define SQ_VOP1__SRC0__SHIFT 0x0
+#define SQ_VOP1__OP__SHIFT 0x9
+#define SQ_VOP1__VDST__SHIFT 0x11
+#define SQ_VOP1__ENCODING__SHIFT 0x19
+#define SQ_VOP1__SRC0_MASK 0x000001FFL
+#define SQ_VOP1__OP_MASK 0x0001FE00L
+#define SQ_VOP1__VDST_MASK 0x01FE0000L
+#define SQ_VOP1__ENCODING_MASK 0xFE000000L
+//SQ_VOP2
+#define SQ_VOP2__SRC0__SHIFT 0x0
+#define SQ_VOP2__VSRC1__SHIFT 0x9
+#define SQ_VOP2__VDST__SHIFT 0x11
+#define SQ_VOP2__OP__SHIFT 0x19
+#define SQ_VOP2__ENCODING__SHIFT 0x1f
+#define SQ_VOP2__SRC0_MASK 0x000001FFL
+#define SQ_VOP2__VSRC1_MASK 0x0001FE00L
+#define SQ_VOP2__VDST_MASK 0x01FE0000L
+#define SQ_VOP2__OP_MASK 0x7E000000L
+#define SQ_VOP2__ENCODING_MASK 0x80000000L
+//SQ_VOP3P_0
+#define SQ_VOP3P_0__VDST__SHIFT 0x0
+#define SQ_VOP3P_0__NEG_HI__SHIFT 0x8
+#define SQ_VOP3P_0__OP_SEL__SHIFT 0xb
+#define SQ_VOP3P_0__OP_SEL_HI_2__SHIFT 0xe
+#define SQ_VOP3P_0__CLAMP__SHIFT 0xf
+#define SQ_VOP3P_0__OP__SHIFT 0x10
+#define SQ_VOP3P_0__ENCODING__SHIFT 0x17
+#define SQ_VOP3P_0__VDST_MASK 0x000000FFL
+#define SQ_VOP3P_0__NEG_HI_MASK 0x00000700L
+#define SQ_VOP3P_0__OP_SEL_MASK 0x00003800L
+#define SQ_VOP3P_0__OP_SEL_HI_2_MASK 0x00004000L
+#define SQ_VOP3P_0__CLAMP_MASK 0x00008000L
+#define SQ_VOP3P_0__OP_MASK 0x007F0000L
+#define SQ_VOP3P_0__ENCODING_MASK 0xFF800000L
+//SQ_VOP3P_1
+#define SQ_VOP3P_1__SRC0__SHIFT 0x0
+#define SQ_VOP3P_1__SRC1__SHIFT 0x9
+#define SQ_VOP3P_1__SRC2__SHIFT 0x12
+#define SQ_VOP3P_1__OP_SEL_HI__SHIFT 0x1b
+#define SQ_VOP3P_1__NEG__SHIFT 0x1d
+#define SQ_VOP3P_1__SRC0_MASK 0x000001FFL
+#define SQ_VOP3P_1__SRC1_MASK 0x0003FE00L
+#define SQ_VOP3P_1__SRC2_MASK 0x07FC0000L
+#define SQ_VOP3P_1__OP_SEL_HI_MASK 0x18000000L
+#define SQ_VOP3P_1__NEG_MASK 0xE0000000L
+//SQ_VOP3_0
+#define SQ_VOP3_0__VDST__SHIFT 0x0
+#define SQ_VOP3_0__ABS__SHIFT 0x8
+#define SQ_VOP3_0__OP_SEL__SHIFT 0xb
+#define SQ_VOP3_0__CLAMP__SHIFT 0xf
+#define SQ_VOP3_0__OP__SHIFT 0x10
+#define SQ_VOP3_0__ENCODING__SHIFT 0x1a
+#define SQ_VOP3_0__VDST_MASK 0x000000FFL
+#define SQ_VOP3_0__ABS_MASK 0x00000700L
+#define SQ_VOP3_0__OP_SEL_MASK 0x00007800L
+#define SQ_VOP3_0__CLAMP_MASK 0x00008000L
+#define SQ_VOP3_0__OP_MASK 0x03FF0000L
+#define SQ_VOP3_0__ENCODING_MASK 0xFC000000L
+//SQ_VOP3_0_SDST_ENC
+#define SQ_VOP3_0_SDST_ENC__VDST__SHIFT 0x0
+#define SQ_VOP3_0_SDST_ENC__SDST__SHIFT 0x8
+#define SQ_VOP3_0_SDST_ENC__CLAMP__SHIFT 0xf
+#define SQ_VOP3_0_SDST_ENC__OP__SHIFT 0x10
+#define SQ_VOP3_0_SDST_ENC__ENCODING__SHIFT 0x1a
+#define SQ_VOP3_0_SDST_ENC__VDST_MASK 0x000000FFL
+#define SQ_VOP3_0_SDST_ENC__SDST_MASK 0x00007F00L
+#define SQ_VOP3_0_SDST_ENC__CLAMP_MASK 0x00008000L
+#define SQ_VOP3_0_SDST_ENC__OP_MASK 0x03FF0000L
+#define SQ_VOP3_0_SDST_ENC__ENCODING_MASK 0xFC000000L
+//SQ_VOP3_1
+#define SQ_VOP3_1__SRC0__SHIFT 0x0
+#define SQ_VOP3_1__SRC1__SHIFT 0x9
+#define SQ_VOP3_1__SRC2__SHIFT 0x12
+#define SQ_VOP3_1__OMOD__SHIFT 0x1b
+#define SQ_VOP3_1__NEG__SHIFT 0x1d
+#define SQ_VOP3_1__SRC0_MASK 0x000001FFL
+#define SQ_VOP3_1__SRC1_MASK 0x0003FE00L
+#define SQ_VOP3_1__SRC2_MASK 0x07FC0000L
+#define SQ_VOP3_1__OMOD_MASK 0x18000000L
+#define SQ_VOP3_1__NEG_MASK 0xE0000000L
+//SQ_VOPC
+#define SQ_VOPC__SRC0__SHIFT 0x0
+#define SQ_VOPC__VSRC1__SHIFT 0x9
+#define SQ_VOPC__OP__SHIFT 0x11
+#define SQ_VOPC__ENCODING__SHIFT 0x19
+#define SQ_VOPC__SRC0_MASK 0x000001FFL
+#define SQ_VOPC__VSRC1_MASK 0x0001FE00L
+#define SQ_VOPC__OP_MASK 0x01FE0000L
+#define SQ_VOPC__ENCODING_MASK 0xFE000000L
+//SQ_VOP_DPP
+#define SQ_VOP_DPP__SRC0__SHIFT 0x0
+#define SQ_VOP_DPP__DPP_CTRL__SHIFT 0x8
+#define SQ_VOP_DPP__BOUND_CTRL__SHIFT 0x13
+#define SQ_VOP_DPP__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_DPP__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_DPP__SRC1_NEG__SHIFT 0x16
+#define SQ_VOP_DPP__SRC1_ABS__SHIFT 0x17
+#define SQ_VOP_DPP__BANK_MASK__SHIFT 0x18
+#define SQ_VOP_DPP__ROW_MASK__SHIFT 0x1c
+#define SQ_VOP_DPP__SRC0_MASK 0x000000FFL
+#define SQ_VOP_DPP__DPP_CTRL_MASK 0x0001FF00L
+#define SQ_VOP_DPP__BOUND_CTRL_MASK 0x00080000L
+#define SQ_VOP_DPP__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_DPP__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_DPP__SRC1_NEG_MASK 0x00400000L
+#define SQ_VOP_DPP__SRC1_ABS_MASK 0x00800000L
+#define SQ_VOP_DPP__BANK_MASK_MASK 0x0F000000L
+#define SQ_VOP_DPP__ROW_MASK_MASK 0xF0000000L
+//SQ_VOP_SDWA
+#define SQ_VOP_SDWA__SRC0__SHIFT 0x0
+#define SQ_VOP_SDWA__DST_SEL__SHIFT 0x8
+#define SQ_VOP_SDWA__DST_UNUSED__SHIFT 0xb
+#define SQ_VOP_SDWA__CLAMP__SHIFT 0xd
+#define SQ_VOP_SDWA__OMOD__SHIFT 0xe
+#define SQ_VOP_SDWA__SRC0_SEL__SHIFT 0x10
+#define SQ_VOP_SDWA__SRC0_SEXT__SHIFT 0x13
+#define SQ_VOP_SDWA__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_SDWA__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_SDWA__S0__SHIFT 0x17
+#define SQ_VOP_SDWA__SRC1_SEL__SHIFT 0x18
+#define SQ_VOP_SDWA__SRC1_SEXT__SHIFT 0x1b
+#define SQ_VOP_SDWA__SRC1_NEG__SHIFT 0x1c
+#define SQ_VOP_SDWA__SRC1_ABS__SHIFT 0x1d
+#define SQ_VOP_SDWA__S1__SHIFT 0x1f
+#define SQ_VOP_SDWA__SRC0_MASK 0x000000FFL
+#define SQ_VOP_SDWA__DST_SEL_MASK 0x00000700L
+#define SQ_VOP_SDWA__DST_UNUSED_MASK 0x00001800L
+#define SQ_VOP_SDWA__CLAMP_MASK 0x00002000L
+#define SQ_VOP_SDWA__OMOD_MASK 0x0000C000L
+#define SQ_VOP_SDWA__SRC0_SEL_MASK 0x00070000L
+#define SQ_VOP_SDWA__SRC0_SEXT_MASK 0x00080000L
+#define SQ_VOP_SDWA__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_SDWA__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_SDWA__S0_MASK 0x00800000L
+#define SQ_VOP_SDWA__SRC1_SEL_MASK 0x07000000L
+#define SQ_VOP_SDWA__SRC1_SEXT_MASK 0x08000000L
+#define SQ_VOP_SDWA__SRC1_NEG_MASK 0x10000000L
+#define SQ_VOP_SDWA__SRC1_ABS_MASK 0x20000000L
+#define SQ_VOP_SDWA__S1_MASK 0x80000000L
+//SQ_VOP_SDWA_SDST_ENC
+#define SQ_VOP_SDWA_SDST_ENC__SRC0__SHIFT 0x0
+#define SQ_VOP_SDWA_SDST_ENC__SDST__SHIFT 0x8
+#define SQ_VOP_SDWA_SDST_ENC__SD__SHIFT 0xf
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL__SHIFT 0x10
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT__SHIFT 0x13
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG__SHIFT 0x14
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS__SHIFT 0x15
+#define SQ_VOP_SDWA_SDST_ENC__S0__SHIFT 0x17
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL__SHIFT 0x18
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT__SHIFT 0x1b
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG__SHIFT 0x1c
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS__SHIFT 0x1d
+#define SQ_VOP_SDWA_SDST_ENC__S1__SHIFT 0x1f
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_MASK 0x000000FFL
+#define SQ_VOP_SDWA_SDST_ENC__SDST_MASK 0x00007F00L
+#define SQ_VOP_SDWA_SDST_ENC__SD_MASK 0x00008000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEL_MASK 0x00070000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_SEXT_MASK 0x00080000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_NEG_MASK 0x00100000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC0_ABS_MASK 0x00200000L
+#define SQ_VOP_SDWA_SDST_ENC__S0_MASK 0x00800000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEL_MASK 0x07000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_SEXT_MASK 0x08000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_NEG_MASK 0x10000000L
+#define SQ_VOP_SDWA_SDST_ENC__SRC1_ABS_MASK 0x20000000L
+#define SQ_VOP_SDWA_SDST_ENC__S1_MASK 0x80000000L
+//SQ_LB_CTR_CTRL
+#define SQ_LB_CTR_CTRL__START__SHIFT 0x0
+#define SQ_LB_CTR_CTRL__LOAD__SHIFT 0x1
+#define SQ_LB_CTR_CTRL__CLEAR__SHIFT 0x2
+#define SQ_LB_CTR_CTRL__START_MASK 0x00000001L
+#define SQ_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define SQ_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+//SQ_LB_DATA0
+#define SQ_LB_DATA0__DATA__SHIFT 0x0
+#define SQ_LB_DATA0__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA1
+#define SQ_LB_DATA1__DATA__SHIFT 0x0
+#define SQ_LB_DATA1__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA2
+#define SQ_LB_DATA2__DATA__SHIFT 0x0
+#define SQ_LB_DATA2__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_DATA3
+#define SQ_LB_DATA3__DATA__SHIFT 0x0
+#define SQ_LB_DATA3__DATA_MASK 0xFFFFFFFFL
+//SQ_LB_CTR_SEL
+#define SQ_LB_CTR_SEL__SEL0__SHIFT 0x0
+#define SQ_LB_CTR_SEL__SEL1__SHIFT 0x4
+#define SQ_LB_CTR_SEL__SEL2__SHIFT 0x8
+#define SQ_LB_CTR_SEL__SEL3__SHIFT 0xc
+#define SQ_LB_CTR_SEL__SEL0_MASK 0x0000000FL
+#define SQ_LB_CTR_SEL__SEL1_MASK 0x000000F0L
+#define SQ_LB_CTR_SEL__SEL2_MASK 0x00000F00L
+#define SQ_LB_CTR_SEL__SEL3_MASK 0x0000F000L
+//SQ_LB_CTR0_CU
+#define SQ_LB_CTR0_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR0_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR0_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR0_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR1_CU
+#define SQ_LB_CTR1_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR1_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR1_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR1_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR2_CU
+#define SQ_LB_CTR2_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR2_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR2_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR2_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_LB_CTR3_CU
+#define SQ_LB_CTR3_CU__SH0_MASK__SHIFT 0x0
+#define SQ_LB_CTR3_CU__SH1_MASK__SHIFT 0x10
+#define SQ_LB_CTR3_CU__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_LB_CTR3_CU__SH1_MASK_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_CMN
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_CMN__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_CMN__TIME_DELTA_MASK 0x0010L
+//SQ_THREAD_TRACE_WORD_EVENT
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_EVENT__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_EVENT__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_EVENT__SH_ID_MASK 0x0020L
+#define SQ_THREAD_TRACE_WORD_EVENT__STAGE_MASK 0x01C0L
+#define SQ_THREAD_TRACE_WORD_EVENT__EVENT_TYPE_MASK 0xFC00L
+//SQ_THREAD_TRACE_WORD_INST
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE__SHIFT 0xb
+#define SQ_THREAD_TRACE_WORD_INST__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_INST__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_INST__WAVE_ID_MASK 0x01E0L
+#define SQ_THREAD_TRACE_WORD_INST__SIMD_ID_MASK 0x0600L
+#define SQ_THREAD_TRACE_WORD_INST__INST_TYPE_MASK 0xF800L
+//SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR__SHIFT 0xf
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__WAVE_ID_MASK 0x000001E0L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__SIMD_ID_MASK 0x00000600L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__TRAP_ERROR_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_INST_PC_1_OF_2__PC_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__WAVE_ID_MASK 0x00003C00L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__SIMD_ID_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2__DATA_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_ISSUE
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0__SHIFT 0x8
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5__SHIFT 0x12
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6__SHIFT 0x14
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7__SHIFT 0x16
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8__SHIFT 0x18
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9__SHIFT 0x1a
+#define SQ_THREAD_TRACE_WORD_ISSUE__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_ISSUE__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_ISSUE__SIMD_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST0_MASK 0x00000300L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST1_MASK 0x00000C00L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST2_MASK 0x00003000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST3_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST4_MASK 0x00030000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST5_MASK 0x000C0000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST6_MASK 0x00300000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST7_MASK 0x00C00000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST8_MASK 0x03000000L
+#define SQ_THREAD_TRACE_WORD_ISSUE__INST9_MASK 0x0C000000L
+//SQ_THREAD_TRACE_WORD_MISC
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE__SHIFT 0xd
+#define SQ_THREAD_TRACE_WORD_MISC__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_MISC__TIME_DELTA_MASK 0x0FF0L
+#define SQ_THREAD_TRACE_WORD_MISC__SH_ID_MASK 0x1000L
+#define SQ_THREAD_TRACE_WORD_MISC__MISC_TOKEN_TYPE_MASK 0xE000L
+//SQ_THREAD_TRACE_WORD_PERF_1_OF_2
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0__SHIFT 0xc
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO__SHIFT 0x19
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR_BANK_MASK 0x00000C00L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR0_MASK 0x01FFF000L
+#define SQ_THREAD_TRACE_WORD_PERF_1_OF_2__CNTR1_LO_MASK 0xFE000000L
+//SQ_THREAD_TRACE_WORD_REG_1_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID__SHIFT 0x7
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP__SHIFT 0xf
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_DROPPED_PREV_MASK 0x00000200L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_TYPE_MASK 0x00001C00L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_PRIV_MASK 0x00004000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_OP_MASK 0x00008000L
+#define SQ_THREAD_TRACE_WORD_REG_1_OF_2__REG_ADDR_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_REG_2_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_2_OF_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID__SHIFT 0x7
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR__SHIFT 0x9
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__PIPE_ID_MASK 0x00000060L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__ME_ID_MASK 0x00000180L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__REG_ADDR_MASK 0x0000FE00L
+#define SQ_THREAD_TRACE_WORD_REG_CS_1_OF_2__DATA_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2
+#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_REG_CS_2_OF_2__DATA_HI_MASK 0x0000FFFFL
+//SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2__TIME_LO_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_WORD_WAVE
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_WAVE__TOKEN_TYPE_MASK 0x000FL
+#define SQ_THREAD_TRACE_WORD_WAVE__TIME_DELTA_MASK 0x0010L
+#define SQ_THREAD_TRACE_WORD_WAVE__SH_ID_MASK 0x0020L
+#define SQ_THREAD_TRACE_WORD_WAVE__CU_ID_MASK 0x03C0L
+#define SQ_THREAD_TRACE_WORD_WAVE__WAVE_ID_MASK 0x3C00L
+#define SQ_THREAD_TRACE_WORD_WAVE__SIMD_ID_MASK 0xC000L
+//SQ_THREAD_TRACE_WORD_WAVE_START
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA__SHIFT 0x4
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID__SHIFT 0x5
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID__SHIFT 0xa
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID__SHIFT 0xe
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER__SHIFT 0x10
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED__SHIFT 0x15
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT__SHIFT 0x16
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID__SHIFT 0x1d
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TOKEN_TYPE_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TIME_DELTA_MASK 0x00000010L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SH_ID_MASK 0x00000020L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__CU_ID_MASK 0x000003C0L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__WAVE_ID_MASK 0x00003C00L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__SIMD_ID_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__DISPATCHER_MASK 0x001F0000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__VS_NO_ALLOC_OR_GROUPED_MASK 0x00200000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__COUNT_MASK 0x1FC00000L
+#define SQ_THREAD_TRACE_WORD_WAVE_START__TG_ID_MASK 0xE0000000L
+//SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_PC_2_OF_2__PC_HI_MASK 0x00FFFFFFL
+//SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2__DATA_HI_MASK 0xFFFFL
+//SQ_THREAD_TRACE_WORD_PERF_2_OF_2
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2__SHIFT 0x6
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3__SHIFT 0x13
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR1_HI_MASK 0x0000003FL
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR2_MASK 0x0007FFC0L
+#define SQ_THREAD_TRACE_WORD_PERF_2_OF_2__CNTR3_MASK 0xFFF80000L
+//SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2__TIME_HI_MASK 0xFFFFFFFFL
+//SQ_WREXEC_EXEC_HI
+#define SQ_WREXEC_EXEC_HI__ADDR_HI__SHIFT 0x0
+#define SQ_WREXEC_EXEC_HI__FIRST_WAVE__SHIFT 0x1a
+#define SQ_WREXEC_EXEC_HI__ATC__SHIFT 0x1b
+#define SQ_WREXEC_EXEC_HI__MTYPE__SHIFT 0x1c
+#define SQ_WREXEC_EXEC_HI__MSB__SHIFT 0x1f
+#define SQ_WREXEC_EXEC_HI__ADDR_HI_MASK 0x0000FFFFL
+#define SQ_WREXEC_EXEC_HI__FIRST_WAVE_MASK 0x04000000L
+#define SQ_WREXEC_EXEC_HI__ATC_MASK 0x08000000L
+#define SQ_WREXEC_EXEC_HI__MTYPE_MASK 0x70000000L
+#define SQ_WREXEC_EXEC_HI__MSB_MASK 0x80000000L
+//SQ_WREXEC_EXEC_LO
+#define SQ_WREXEC_EXEC_LO__ADDR_LO__SHIFT 0x0
+#define SQ_WREXEC_EXEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD0
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD1
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD1__STRIDE__SHIFT 0x10
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE__SHIFT 0x1e
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE__SHIFT 0x1f
+#define SQ_BUF_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x0000FFFFL
+#define SQ_BUF_RSRC_WORD1__STRIDE_MASK 0x3FFF0000L
+#define SQ_BUF_RSRC_WORD1__CACHE_SWIZZLE_MASK 0x40000000L
+#define SQ_BUF_RSRC_WORD1__SWIZZLE_ENABLE_MASK 0x80000000L
+//SQ_BUF_RSRC_WORD2
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD2__NUM_RECORDS_MASK 0xFFFFFFFFL
+//SQ_BUF_RSRC_WORD3
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT__SHIFT 0xc
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT__SHIFT 0xf
+#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE__SHIFT 0x13
+#define SQ_BUF_RSRC_WORD3__USER_VM_MODE__SHIFT 0x14
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE__SHIFT 0x15
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE__SHIFT 0x17
+#define SQ_BUF_RSRC_WORD3__NV__SHIFT 0x1b
+#define SQ_BUF_RSRC_WORD3__TYPE__SHIFT 0x1e
+#define SQ_BUF_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
+#define SQ_BUF_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
+#define SQ_BUF_RSRC_WORD3__NUM_FORMAT_MASK 0x00007000L
+#define SQ_BUF_RSRC_WORD3__DATA_FORMAT_MASK 0x00078000L
+#define SQ_BUF_RSRC_WORD3__USER_VM_ENABLE_MASK 0x00080000L
+#define SQ_BUF_RSRC_WORD3__USER_VM_MODE_MASK 0x00100000L
+#define SQ_BUF_RSRC_WORD3__INDEX_STRIDE_MASK 0x00600000L
+#define SQ_BUF_RSRC_WORD3__ADD_TID_ENABLE_MASK 0x00800000L
+#define SQ_BUF_RSRC_WORD3__NV_MASK 0x08000000L
+#define SQ_BUF_RSRC_WORD3__TYPE_MASK 0xC0000000L
+//SQ_IMG_RSRC_WORD0
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD0__BASE_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_IMG_RSRC_WORD1
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD1__MIN_LOD__SHIFT 0x8
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT__SHIFT 0x1a
+#define SQ_IMG_RSRC_WORD1__NV__SHIFT 0x1e
+#define SQ_IMG_RSRC_WORD1__META_DIRECT__SHIFT 0x1f
+#define SQ_IMG_RSRC_WORD1__BASE_ADDRESS_HI_MASK 0x000000FFL
+#define SQ_IMG_RSRC_WORD1__MIN_LOD_MASK 0x000FFF00L
+#define SQ_IMG_RSRC_WORD1__DATA_FORMAT_MASK 0x03F00000L
+#define SQ_IMG_RSRC_WORD1__NUM_FORMAT_MASK 0x3C000000L
+#define SQ_IMG_RSRC_WORD1__NV_MASK 0x40000000L
+#define SQ_IMG_RSRC_WORD1__META_DIRECT_MASK 0x80000000L
+//SQ_IMG_RSRC_WORD2
+#define SQ_IMG_RSRC_WORD2__WIDTH__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD2__HEIGHT__SHIFT 0xe
+#define SQ_IMG_RSRC_WORD2__PERF_MOD__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD2__WIDTH_MASK 0x00003FFFL
+#define SQ_IMG_RSRC_WORD2__HEIGHT_MASK 0x0FFFC000L
+#define SQ_IMG_RSRC_WORD2__PERF_MOD_MASK 0x70000000L
+//SQ_IMG_RSRC_WORD3
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y__SHIFT 0x3
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z__SHIFT 0x6
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W__SHIFT 0x9
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL__SHIFT 0xc
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL__SHIFT 0x10
+#define SQ_IMG_RSRC_WORD3__SW_MODE__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD3__TYPE__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD3__DST_SEL_X_MASK 0x00000007L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Y_MASK 0x00000038L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_Z_MASK 0x000001C0L
+#define SQ_IMG_RSRC_WORD3__DST_SEL_W_MASK 0x00000E00L
+#define SQ_IMG_RSRC_WORD3__BASE_LEVEL_MASK 0x0000F000L
+#define SQ_IMG_RSRC_WORD3__LAST_LEVEL_MASK 0x000F0000L
+#define SQ_IMG_RSRC_WORD3__SW_MODE_MASK 0x01F00000L
+#define SQ_IMG_RSRC_WORD3__TYPE_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD4
+#define SQ_IMG_RSRC_WORD4__DEPTH__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD4__PITCH__SHIFT 0xd
+#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE__SHIFT 0x1d
+#define SQ_IMG_RSRC_WORD4__DEPTH_MASK 0x00001FFFL
+#define SQ_IMG_RSRC_WORD4__PITCH_MASK 0x1FFFE000L
+#define SQ_IMG_RSRC_WORD4__BC_SWIZZLE_MASK 0xE0000000L
+//SQ_IMG_RSRC_WORD5
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH__SHIFT 0xd
+#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS__SHIFT 0x11
+#define SQ_IMG_RSRC_WORD5__META_LINEAR__SHIFT 0x19
+#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED__SHIFT 0x1a
+#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED__SHIFT 0x1b
+#define SQ_IMG_RSRC_WORD5__MAX_MIP__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD5__BASE_ARRAY_MASK 0x00001FFFL
+#define SQ_IMG_RSRC_WORD5__ARRAY_PITCH_MASK 0x0001E000L
+#define SQ_IMG_RSRC_WORD5__META_DATA_ADDRESS_MASK 0x01FE0000L
+#define SQ_IMG_RSRC_WORD5__META_LINEAR_MASK 0x02000000L
+#define SQ_IMG_RSRC_WORD5__META_PIPE_ALIGNED_MASK 0x04000000L
+#define SQ_IMG_RSRC_WORD5__META_RB_ALIGNED_MASK 0x08000000L
+#define SQ_IMG_RSRC_WORD5__MAX_MIP_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD6
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID__SHIFT 0xc
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN__SHIFT 0x14
+#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN__SHIFT 0x15
+#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB__SHIFT 0x16
+#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM__SHIFT 0x17
+#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS__SHIFT 0x18
+#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS__SHIFT 0x1c
+#define SQ_IMG_RSRC_WORD6__MIN_LOD_WARN_MASK 0x00000FFFL
+#define SQ_IMG_RSRC_WORD6__COUNTER_BANK_ID_MASK 0x000FF000L
+#define SQ_IMG_RSRC_WORD6__LOD_HDW_CNT_EN_MASK 0x00100000L
+#define SQ_IMG_RSRC_WORD6__COMPRESSION_EN_MASK 0x00200000L
+#define SQ_IMG_RSRC_WORD6__ALPHA_IS_ON_MSB_MASK 0x00400000L
+#define SQ_IMG_RSRC_WORD6__COLOR_TRANSFORM_MASK 0x00800000L
+#define SQ_IMG_RSRC_WORD6__LOST_ALPHA_BITS_MASK 0x0F000000L
+#define SQ_IMG_RSRC_WORD6__LOST_COLOR_BITS_MASK 0xF0000000L
+//SQ_IMG_RSRC_WORD7
+#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS__SHIFT 0x0
+#define SQ_IMG_RSRC_WORD7__META_DATA_ADDRESS_MASK 0xFFFFFFFFL
+//SQ_IMG_SAMP_WORD0
+#define SQ_IMG_SAMP_WORD0__CLAMP_X__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y__SHIFT 0x3
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z__SHIFT 0x6
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO__SHIFT 0x9
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED__SHIFT 0xf
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD__SHIFT 0x10
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC__SHIFT 0x13
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA__SHIFT 0x14
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS__SHIFT 0x15
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD__SHIFT 0x1b
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE__SHIFT 0x1d
+#define SQ_IMG_SAMP_WORD0__COMPAT_MODE__SHIFT 0x1f
+#define SQ_IMG_SAMP_WORD0__CLAMP_X_MASK 0x00000007L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Y_MASK 0x00000038L
+#define SQ_IMG_SAMP_WORD0__CLAMP_Z_MASK 0x000001C0L
+#define SQ_IMG_SAMP_WORD0__MAX_ANISO_RATIO_MASK 0x00000E00L
+#define SQ_IMG_SAMP_WORD0__DEPTH_COMPARE_FUNC_MASK 0x00007000L
+#define SQ_IMG_SAMP_WORD0__FORCE_UNNORMALIZED_MASK 0x00008000L
+#define SQ_IMG_SAMP_WORD0__ANISO_THRESHOLD_MASK 0x00070000L
+#define SQ_IMG_SAMP_WORD0__MC_COORD_TRUNC_MASK 0x00080000L
+#define SQ_IMG_SAMP_WORD0__FORCE_DEGAMMA_MASK 0x00100000L
+#define SQ_IMG_SAMP_WORD0__ANISO_BIAS_MASK 0x07E00000L
+#define SQ_IMG_SAMP_WORD0__TRUNC_COORD_MASK 0x08000000L
+#define SQ_IMG_SAMP_WORD0__DISABLE_CUBE_WRAP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD0__FILTER_MODE_MASK 0x60000000L
+#define SQ_IMG_SAMP_WORD0__COMPAT_MODE_MASK 0x80000000L
+//SQ_IMG_SAMP_WORD1
+#define SQ_IMG_SAMP_WORD1__MIN_LOD__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD1__MAX_LOD__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD1__PERF_MIP__SHIFT 0x18
+#define SQ_IMG_SAMP_WORD1__PERF_Z__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD1__MIN_LOD_MASK 0x00000FFFL
+#define SQ_IMG_SAMP_WORD1__MAX_LOD_MASK 0x00FFF000L
+#define SQ_IMG_SAMP_WORD1__PERF_MIP_MASK 0x0F000000L
+#define SQ_IMG_SAMP_WORD1__PERF_Z_MASK 0xF0000000L
+//SQ_IMG_SAMP_WORD2
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC__SHIFT 0xe
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER__SHIFT 0x14
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER__SHIFT 0x16
+#define SQ_IMG_SAMP_WORD2__Z_FILTER__SHIFT 0x18
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER__SHIFT 0x1a
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP__SHIFT 0x1c
+#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT__SHIFT 0x1d
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX__SHIFT 0x1e
+#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE__SHIFT 0x1f
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_MASK 0x00003FFFL
+#define SQ_IMG_SAMP_WORD2__LOD_BIAS_SEC_MASK 0x000FC000L
+#define SQ_IMG_SAMP_WORD2__XY_MAG_FILTER_MASK 0x00300000L
+#define SQ_IMG_SAMP_WORD2__XY_MIN_FILTER_MASK 0x00C00000L
+#define SQ_IMG_SAMP_WORD2__Z_FILTER_MASK 0x03000000L
+#define SQ_IMG_SAMP_WORD2__MIP_FILTER_MASK 0x0C000000L
+#define SQ_IMG_SAMP_WORD2__MIP_POINT_PRECLAMP_MASK 0x10000000L
+#define SQ_IMG_SAMP_WORD2__BLEND_ZERO_PRT_MASK 0x20000000L
+#define SQ_IMG_SAMP_WORD2__FILTER_PREC_FIX_MASK 0x40000000L
+#define SQ_IMG_SAMP_WORD2__ANISO_OVERRIDE_MASK 0x80000000L
+//SQ_IMG_SAMP_WORD3
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR__SHIFT 0x0
+#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA__SHIFT 0xc
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE__SHIFT 0x1e
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_PTR_MASK 0x00000FFFL
+#define SQ_IMG_SAMP_WORD3__SKIP_DEGAMMA_MASK 0x00001000L
+#define SQ_IMG_SAMP_WORD3__BORDER_COLOR_TYPE_MASK 0xC0000000L
+//SQ_FLAT_SCRATCH_WORD0
+#define SQ_FLAT_SCRATCH_WORD0__SIZE__SHIFT 0x0
+#define SQ_FLAT_SCRATCH_WORD0__SIZE_MASK 0x0007FFFFL
+//SQ_FLAT_SCRATCH_WORD1
+#define SQ_FLAT_SCRATCH_WORD1__OFFSET__SHIFT 0x0
+#define SQ_FLAT_SCRATCH_WORD1__OFFSET_MASK 0x00FFFFFFL
+//SQ_M0_GPR_IDX_WORD
+#define SQ_M0_GPR_IDX_WORD__INDEX__SHIFT 0x0
+#define SQ_M0_GPR_IDX_WORD__VSRC0_REL__SHIFT 0xc
+#define SQ_M0_GPR_IDX_WORD__VSRC1_REL__SHIFT 0xd
+#define SQ_M0_GPR_IDX_WORD__VSRC2_REL__SHIFT 0xe
+#define SQ_M0_GPR_IDX_WORD__VDST_REL__SHIFT 0xf
+#define SQ_M0_GPR_IDX_WORD__INDEX_MASK 0x000000FFL
+#define SQ_M0_GPR_IDX_WORD__VSRC0_REL_MASK 0x00001000L
+#define SQ_M0_GPR_IDX_WORD__VSRC1_REL_MASK 0x00002000L
+#define SQ_M0_GPR_IDX_WORD__VSRC2_REL_MASK 0x00004000L
+#define SQ_M0_GPR_IDX_WORD__VDST_REL_MASK 0x00008000L
+//SQC_ICACHE_UTCL1_CNTL1
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQC_ICACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQC_ICACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQC_ICACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQC_ICACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQC_ICACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQC_ICACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQC_ICACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQC_ICACHE_UTCL1_CNTL2
+#define SQC_ICACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQC_ICACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQC_ICACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQC_ICACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQC_ICACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQC_ICACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQC_ICACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQC_ICACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define SQC_ICACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define SQC_ICACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define SQC_ICACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//SQC_DCACHE_UTCL1_CNTL1
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID__SHIFT 0x13
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID__SHIFT 0x17
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE__SHIFT 0x18
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define SQC_DCACHE_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define SQC_DCACHE_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define SQC_DCACHE_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_VMID_MASK 0x00780000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_ALL_VMID_MASK 0x00800000L
+#define SQC_DCACHE_UTCL1_CNTL1__REG_INVALIDATE_TOGGLE_MASK 0x01000000L
+#define SQC_DCACHE_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define SQC_DCACHE_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define SQC_DCACHE_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//SQC_DCACHE_UTCL1_CNTL2
+#define SQC_DCACHE_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE__SHIFT 0x8
+#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE__SHIFT 0x10
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR__SHIFT 0x13
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID__SHIFT 0x15
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define SQC_DCACHE_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define SQC_DCACHE_UTCL1_CNTL2__LFIFO_SCAN_DISABLE_MASK 0x00000100L
+#define SQC_DCACHE_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define SQC_DCACHE_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define SQC_DCACHE_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define SQC_DCACHE_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define SQC_DCACHE_UTCL1_CNTL2__ARB_BURST_MODE_MASK 0x00030000L
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_RD_WR_MASK 0x00080000L
+#define SQC_DCACHE_UTCL1_CNTL2__ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define SQC_DCACHE_UTCL1_CNTL2__PERF_EVENT_VMID_MASK 0x01E00000L
+#define SQC_DCACHE_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//SQC_ICACHE_UTCL1_STATUS
+#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQC_ICACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQC_ICACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQC_ICACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//SQC_DCACHE_UTCL1_STATUS
+#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+
+
+// addressBlock: gc_shsdec
+//SX_DEBUG_1
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
+#define SX_DEBUG_1__DISABLE_SX_DB_FGCG__SHIFT 0xd
+#define SX_DEBUG_1__DEBUG_DATA__SHIFT 0xe
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
+#define SX_DEBUG_1__DISABLE_SX_DB_FGCG_MASK 0x00002000L
+#define SX_DEBUG_1__DEBUG_DATA_MASK 0xFFFFC000L
+//SPI_PS_MAX_WAVE_ID
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
+//SPI_START_PHASE
+#define SPI_START_PHASE__VGPR_START_PHASE__SHIFT 0x0
+#define SPI_START_PHASE__SGPR_START_PHASE__SHIFT 0x2
+#define SPI_START_PHASE__WAVE_START_PHASE__SHIFT 0x4
+#define SPI_START_PHASE__VGPR_START_PHASE_MASK 0x00000003L
+#define SPI_START_PHASE__SGPR_START_PHASE_MASK 0x0000000CL
+#define SPI_START_PHASE__WAVE_START_PHASE_MASK 0x00000030L
+//SPI_GFX_CNTL
+#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
+#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
+//SPI_DSM_CNTL
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SPI_DSM_CNTL__UNUSED__SHIFT 0x3
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define SPI_DSM_CNTL__UNUSED_MASK 0xFFFFFFF8L
+//SPI_DSM_CNTL2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x4
+#define SPI_DSM_CNTL2__UNUSED__SHIFT 0xa
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000003F0L
+#define SPI_DSM_CNTL2__UNUSED_MASK 0xFFFFFC00L
+//SPI_DEBUG_BUSY
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
+#define SPI_DEBUG_BUSY__VS_BUSY__SHIFT 0x2
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x3
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x4
+#define SPI_DEBUG_BUSY__CSG_BUSY__SHIFT 0x5
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x6
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x7
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0x8
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0x9
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xa
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xb
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xc
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xd
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0xe
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0xf
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY__SHIFT 0x10
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY__SHIFT 0x11
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x12
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x13
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x14
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x15
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__VS_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__CSG_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC0_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC1_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00040000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00080000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00200000L
+//SPI_CONFIG_PS_CU_EN
+#define SPI_CONFIG_PS_CU_EN__ENABLE__SHIFT 0x0
+#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN__SHIFT 0x1
+#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN__SHIFT 0x10
+#define SPI_CONFIG_PS_CU_EN__ENABLE_MASK 0x00000001L
+#define SPI_CONFIG_PS_CU_EN__PKR0_CU_EN_MASK 0x0000FFFEL
+#define SPI_CONFIG_PS_CU_EN__PKR1_CU_EN_MASK 0xFFFF0000L
+//SPI_WF_LIFETIME_CNTL
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
+#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
+#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
+//SPI_WF_LIFETIME_LIMIT_0
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_1
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_2
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_3
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_4
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_5
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_6
+#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_6__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_7
+#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_7__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_8
+#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_8__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_8__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_9
+#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_9__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_0
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_1
+#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_1__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_1__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_2
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_3
+#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_3__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_3__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_4
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_5
+#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_5__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_5__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_6
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_7
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_8
+#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_8__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_8__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_8__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_9
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_10
+#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_10__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_10__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_10__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_11
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_12
+#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_12__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_12__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_12__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_13
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_14
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_15
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_16
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_17
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_18
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_19
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_20
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
+//SPI_LB_CTR_CTRL
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
+#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
+#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
+#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
+//SPI_LB_CU_MASK
+#define SPI_LB_CU_MASK__CU_MASK__SHIFT 0x0
+#define SPI_LB_CU_MASK__CU_MASK_MASK 0xFFFFL
+//SPI_LB_DATA_REG
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
+//SPI_PG_ENABLE_STATIC_CU_MASK
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK__SHIFT 0x0
+#define SPI_PG_ENABLE_STATIC_CU_MASK__CU_MASK_MASK 0xFFFFL
+//SPI_GDS_CREDITS
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
+#define SPI_GDS_CREDITS__UNUSED__SHIFT 0x10
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
+#define SPI_GDS_CREDITS__UNUSED_MASK 0xFFFF0000L
+//SPI_SX_EXPORT_BUFFER_SIZES
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
+//SPI_SX_SCOREBOARD_BUFFER_SIZES
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
+//SPI_CSQ_WF_ACTIVE_STATUS
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
+//SPI_CSQ_WF_ACTIVE_COUNT_0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_1
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_2
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_3
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_4
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_4__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_5
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_5__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_6
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_6__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_7
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_7__EVENTS_MASK 0x07FF0000L
+//SPI_LB_DATA_WAVES
+#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
+#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
+#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
+#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_HSGS
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS__SHIFT 0x10
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_HS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERCU_WAVE_HSGS__CU_USED_GS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_VSPS
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS__SHIFT 0x10
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_VS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERCU_WAVE_VSPS__CU_USED_PS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERCU_WAVE_CS
+#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE__SHIFT 0x0
+#define SPI_LB_DATA_PERCU_WAVE_CS__ACTIVE_MASK 0xFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_LO
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_HI
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_PSMA_LO
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSMA_HI
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_GPR_MIN
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+//SPI_P1_TRAP_SCREEN_PSBA_LO
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSBA_HI
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_PSMA_LO
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSMA_HI
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_GPR_MIN
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+
+
+// addressBlock: gc_tpdec
+//TD_CNTL
+#define TD_CNTL__SYNC_PHASE_SH__SHIFT 0x0
+#define TD_CNTL__SYNC_PHASE_VC_SMX__SHIFT 0x4
+#define TD_CNTL__PAD_STALL_EN__SHIFT 0x8
+#define TD_CNTL__EXTEND_LDS_STALL__SHIFT 0x9
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST__SHIFT 0xb
+#define TD_CNTL__PRECISION_COMPATIBILITY__SHIFT 0xf
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
+#define TD_CNTL__LD_FLOAT_MODE__SHIFT 0x12
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
+#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT__SHIFT 0x18
+#define TD_CNTL__SYNC_PHASE_SH_MASK 0x00000003L
+#define TD_CNTL__SYNC_PHASE_VC_SMX_MASK 0x00000030L
+#define TD_CNTL__PAD_STALL_EN_MASK 0x00000100L
+#define TD_CNTL__EXTEND_LDS_STALL_MASK 0x00000600L
+#define TD_CNTL__LDS_STALL_PHASE_ADJUST_MASK 0x00001800L
+#define TD_CNTL__PRECISION_COMPATIBILITY_MASK 0x00008000L
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__LD_FLOAT_MODE_MASK 0x00040000L
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
+#define TD_CNTL__DISABLE_MM_QNAN_COMPARE_RESULT_MASK 0x01000000L
+//TD_STATUS
+#define TD_STATUS__BUSY__SHIFT 0x1f
+#define TD_STATUS__BUSY_MASK 0x80000000L
+//TD_DSM_CNTL
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define TD_DSM_CNTL__TD_SS_FIFO_HI_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define TD_DSM_CNTL__TD_CS_FIFO_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define TD_DSM_CNTL__TD_CS_FIFO_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+//TD_DSM_CNTL2
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TD_DSM_CNTL2__TD_INJECT_DELAY__SHIFT 0x1a
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TD_DSM_CNTL2__TD_SS_FIFO_LO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TD_DSM_CNTL2__TD_SS_FIFO_HI_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TD_DSM_CNTL2__TD_CS_FIFO_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TD_DSM_CNTL2__TD_CS_FIFO_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TD_DSM_CNTL2__TD_INJECT_DELAY_MASK 0xFC000000L
+//TD_SCRATCH
+#define TD_SCRATCH__SCRATCH__SHIFT 0x0
+#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_CNTL
+#define TA_CNTL__FX_XNACK_CREDIT__SHIFT 0x0
+#define TA_CNTL__SQ_XNACK_CREDIT__SHIFT 0x9
+#define TA_CNTL__TC_DATA_CREDIT__SHIFT 0xd
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
+#define TA_CNTL__FX_XNACK_CREDIT_MASK 0x0000007FL
+#define TA_CNTL__SQ_XNACK_CREDIT_MASK 0x00001E00L
+#define TA_CNTL__TC_DATA_CREDIT_MASK 0x0000E000L
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
+//TA_CNTL_AUX
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
+#define TA_CNTL_AUX__RESERVED__SHIFT 0x1
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
+#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
+#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS__SHIFT 0x9
+#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
+#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
+#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
+#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
+#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
+#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
+#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE__SHIFT 0x13
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
+#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE__SHIFT 0x1b
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
+#define TA_CNTL_AUX__RESERVED_MASK 0x0000000EL
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
+#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
+#define TA_CNTL_AUX__NONIMG_ANISO_BYPASS_MASK 0x00000200L
+#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
+#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
+#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
+#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
+#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
+#define TA_CNTL_AUX__ANISO_MIP_ADJ_MODE_MASK 0x00080000L
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
+#define TA_CNTL_AUX__DISABLE_DWORD_X2_COALESCE_MASK 0x08000000L
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
+//TA_RESERVED_010C
+#define TA_RESERVED_010C__Unused__SHIFT 0x0
+#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
+//TA_STATUS
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
+#define TA_STATUS__IN_BUSY__SHIFT 0x18
+#define TA_STATUS__FG_BUSY__SHIFT 0x19
+#define TA_STATUS__LA_BUSY__SHIFT 0x1a
+#define TA_STATUS__FL_BUSY__SHIFT 0x1b
+#define TA_STATUS__TA_BUSY__SHIFT 0x1c
+#define TA_STATUS__FA_BUSY__SHIFT 0x1d
+#define TA_STATUS__AL_BUSY__SHIFT 0x1e
+#define TA_STATUS__BUSY__SHIFT 0x1f
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__BUSY_MASK 0x80000000L
+//TA_SCRATCH
+#define TA_SCRATCH__SCRATCH__SHIFT 0x0
+#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gdsdec
+//GDS_CONFIG
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL__SHIFT 0x1
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL__SHIFT 0x3
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL__SHIFT 0x5
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL__SHIFT 0x7
+#define GDS_CONFIG__SH0_GPR_PHASE_SEL_MASK 0x00000006L
+#define GDS_CONFIG__SH1_GPR_PHASE_SEL_MASK 0x00000018L
+#define GDS_CONFIG__SH2_GPR_PHASE_SEL_MASK 0x00000060L
+#define GDS_CONFIG__SH3_GPR_PHASE_SEL_MASK 0x00000180L
+//GDS_CNTL_STATUS
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT__SHIFT 0x3
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT__SHIFT 0x4
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x5
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x6
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x7
+#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x8
+#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x9
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0xa
+#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0xb
+#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xc
+#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xd
+#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xe
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__DS_BANK_CONFLICT_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_ADDR_CONFLICT_MASK 0x00000010L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000040L
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000080L
+#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000100L
+#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000200L
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000400L
+#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000800L
+#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00001000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00002000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00004000L
+//GDS_ENHANCE2
+#define GDS_ENHANCE2__MISC__SHIFT 0x0
+#define GDS_ENHANCE2__UNUSED__SHIFT 0x10
+#define GDS_ENHANCE2__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE2__UNUSED_MASK 0xFFFF0000L
+//GDS_PROTECTION_FAULT
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
+#define GDS_PROTECTION_FAULT__SH_ID__SHIFT 0x3
+#define GDS_PROTECTION_FAULT__CU_ID__SHIFT 0x6
+#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xa
+#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xc
+#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
+#define GDS_PROTECTION_FAULT__SH_ID_MASK 0x00000038L
+#define GDS_PROTECTION_FAULT__CU_ID_MASK 0x000003C0L
+#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00000C00L
+#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0000F000L
+#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_VM_PROTECTION_FAULT
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
+#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
+#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
+#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
+#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
+#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
+#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
+#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
+#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_DSM_CNTL
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//GDS_DSM_CNTL2
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
+//GDS_WD_GDS_CSB
+#define GDS_WD_GDS_CSB__COUNTER__SHIFT 0x0
+#define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd
+#define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL
+#define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L
+
+
+// addressBlock: gc_rbdec
+//DB_DEBUG
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+//DB_DEBUG2
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER__SHIFT 0xe
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING__SHIFT 0xf
+#define DB_DEBUG2__RESERVED__SHIFT 0x10
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
+#define DB_DEBUG2__DISABLE_VR_OBJ_PRIM_ID__SHIFT 0x1a
+#define DB_DEBUG2__DISABLE_VR_PS_INVOKE__SHIFT 0x1b
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
+#define DB_DEBUG2__DISABLE_TILE_COVERED_FOR_PS_ITER_MASK 0x00004000L
+#define DB_DEBUG2__ENABLE_SUBTILE_GROUPING_MASK 0x00008000L
+#define DB_DEBUG2__RESERVED_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_VR_OBJ_PRIM_ID_MASK 0x04000000L
+#define DB_DEBUG2__DISABLE_VR_PS_INVOKE_MASK 0x08000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+//DB_DEBUG3
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
+#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION__SHIFT 0x1
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS__SHIFT 0x7
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT__SHIFT 0x9
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING__SHIFT 0xc
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION__SHIFT 0x10
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
+#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE__SHIFT 0x1e
+#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK__SHIFT 0x1f
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
+#define DB_DEBUG3__ROUND_ZRANGE_CORRECTION_MASK 0x00000002L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__DISABLE_TCP_CAM_BYPASS_MASK 0x00000080L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_REDUNDANT_PLANE_FLUSHES_OPT_MASK 0x00000200L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_OP_Z_DATA_FORWARDING_MASK 0x00001000L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
+#define DB_DEBUG3__ALLOW_RF2P_RW_COLLISION_MASK 0x00010000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
+#define DB_DEBUG3__DISABLE_OP_S_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DONT_DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DISABLE_4XAA_2P_DELAYED_WRITE_MASK 0x40000000L
+#define DB_DEBUG3__DISABLE_4XAA_2P_INTERLEAVED_PMASK_MASK 0x80000000L
+//DB_DEBUG4
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
+#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF__SHIFT 0x4
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0x5
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x6
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x7
+#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS__SHIFT 0x8
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
+#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK__SHIFT 0xc
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
+#define DB_DEBUG4__DISABLE_TS_WRITE_L0__SHIFT 0xf
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0x10
+#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT__SHIFT 0x11
+#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT__SHIFT 0x12
+#define DB_DEBUG4__DB_EXTRA_DEBUG4__SHIFT 0x13
+#define DB_DEBUG4__DISABLE_8PPC_OBJPRIMID_WHEN_NO_SHADER_EXPORTS__SHIFT 0x1e
+#define DB_DEBUG4__FULL_TILE_CACHE_EVICT_ON_HALF_FULL__SHIFT 0x1f
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
+#define DB_DEBUG4__DISABLE_4XAA_2P_ZD_HOLDOFF_MASK 0x00000010L
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00000020L
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000040L
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000080L
+#define DB_DEBUG4__DFSM_CONVERT_PASSTHROUGH_TO_BYPASS_MASK 0x00000100L
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
+#define DB_DEBUG4__DISABLE_SEPARATE_DFSM_CLK_MASK 0x00001000L
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
+#define DB_DEBUG4__DISABLE_TS_WRITE_L0_MASK 0x00008000L
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00010000L
+#define DB_DEBUG4__DISABLE_HIZ_Q1_TS_COLLISION_DETECT_MASK 0x00020000L
+#define DB_DEBUG4__DISABLE_HIZ_Q2_TS_COLLISION_DETECT_MASK 0x00040000L
+#define DB_DEBUG4__DB_EXTRA_DEBUG4_MASK 0x3FF80000L
+#define DB_DEBUG4__DISABLE_8PPC_OBJPRIMID_WHEN_NO_SHADER_EXPORTS_MASK 0x40000000L
+#define DB_DEBUG4__FULL_TILE_CACHE_EVICT_ON_HALF_FULL_MASK 0x80000000L
+//DB_CREDIT_LIMIT
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS__SHIFT 0x18
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
+#define DB_CREDIT_LIMIT__DB_CB_TILE_CREDITS_MASK 0x7F000000L
+//DB_WATERMARKS
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x5
+#define DB_WATERMARKS__FORCE_SUMMARIZE__SHIFT 0xb
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0xf
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x14
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE__SHIFT 0x1e
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD__SHIFT 0x1f
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x0000001FL
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x000007E0L
+#define DB_WATERMARKS__FORCE_SUMMARIZE_MASK 0x00007800L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x000F8000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0x0FF00000L
+#define DB_WATERMARKS__AUTO_FLUSH_HTILE_MASK 0x40000000L
+#define DB_WATERMARKS__AUTO_FLUSH_QUAD_MASK 0x80000000L
+//DB_SUBTILE_CONTROL
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
+//DB_FREE_CACHELINES
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x7
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0xe
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x14
+#define DB_FREE_CACHELINES__QUAD_READ_REQS__SHIFT 0x18
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x0000007FL
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x00003F80L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x000FC000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0x00F00000L
+#define DB_FREE_CACHELINES__QUAD_READ_REQS_MASK 0xFF000000L
+//DB_FIFO_DEPTH1
+#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS__SHIFT 0x0
+#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS__SHIFT 0x5
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0xa
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x15
+#define DB_FIFO_DEPTH1__DB_RMI_RDREQ_CREDITS_MASK 0x0000001FL
+#define DB_FIFO_DEPTH1__DB_RMI_WRREQ_CREDITS_MASK 0x000003E0L
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x0000FC00L
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0x001F0000L
+#define DB_FIFO_DEPTH1__LTILE_PROBE_FIFO_DEPTH_MASK 0x1FE00000L
+//DB_FIFO_DEPTH2
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0xf
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x00007F00L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF8000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
+//DB_EXCEPTION_CONTROL
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
+//DB_RING_CONTROL
+#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
+#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
+//DB_MEM_ARB_WATERMARKS
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
+//DB_RMI_CACHE_POLICY
+#define DB_RMI_CACHE_POLICY__Z_RD__SHIFT 0x0
+#define DB_RMI_CACHE_POLICY__S_RD__SHIFT 0x1
+#define DB_RMI_CACHE_POLICY__HTILE_RD__SHIFT 0x2
+#define DB_RMI_CACHE_POLICY__Z_WR__SHIFT 0x8
+#define DB_RMI_CACHE_POLICY__S_WR__SHIFT 0x9
+#define DB_RMI_CACHE_POLICY__HTILE_WR__SHIFT 0xa
+#define DB_RMI_CACHE_POLICY__ZPCPSD_WR__SHIFT 0xb
+#define DB_RMI_CACHE_POLICY__CC_RD__SHIFT 0x10
+#define DB_RMI_CACHE_POLICY__FMASK_RD__SHIFT 0x11
+#define DB_RMI_CACHE_POLICY__CMASK_RD__SHIFT 0x12
+#define DB_RMI_CACHE_POLICY__DCC_RD__SHIFT 0x13
+#define DB_RMI_CACHE_POLICY__CC_WR__SHIFT 0x18
+#define DB_RMI_CACHE_POLICY__FMASK_WR__SHIFT 0x19
+#define DB_RMI_CACHE_POLICY__CMASK_WR__SHIFT 0x1a
+#define DB_RMI_CACHE_POLICY__DCC_WR__SHIFT 0x1b
+#define DB_RMI_CACHE_POLICY__Z_RD_MASK 0x00000001L
+#define DB_RMI_CACHE_POLICY__S_RD_MASK 0x00000002L
+#define DB_RMI_CACHE_POLICY__HTILE_RD_MASK 0x00000004L
+#define DB_RMI_CACHE_POLICY__Z_WR_MASK 0x00000100L
+#define DB_RMI_CACHE_POLICY__S_WR_MASK 0x00000200L
+#define DB_RMI_CACHE_POLICY__HTILE_WR_MASK 0x00000400L
+#define DB_RMI_CACHE_POLICY__ZPCPSD_WR_MASK 0x00000800L
+#define DB_RMI_CACHE_POLICY__CC_RD_MASK 0x00010000L
+#define DB_RMI_CACHE_POLICY__FMASK_RD_MASK 0x00020000L
+#define DB_RMI_CACHE_POLICY__CMASK_RD_MASK 0x00040000L
+#define DB_RMI_CACHE_POLICY__DCC_RD_MASK 0x00080000L
+#define DB_RMI_CACHE_POLICY__CC_WR_MASK 0x01000000L
+#define DB_RMI_CACHE_POLICY__FMASK_WR_MASK 0x02000000L
+#define DB_RMI_CACHE_POLICY__CMASK_WR_MASK 0x04000000L
+#define DB_RMI_CACHE_POLICY__DCC_WR_MASK 0x08000000L
+//DB_DFSM_CONFIG
+#define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0
+#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT__SHIFT 0x1
+#define DB_DFSM_CONFIG__DISABLE_POPS__SHIFT 0x2
+#define DB_DFSM_CONFIG__FORCE_FLUSH__SHIFT 0x3
+#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH__SHIFT 0x8
+#define DB_DFSM_CONFIG__BYPASS_DFSM_MASK 0x00000001L
+#define DB_DFSM_CONFIG__DISABLE_PUNCHOUT_MASK 0x00000002L
+#define DB_DFSM_CONFIG__DISABLE_POPS_MASK 0x00000004L
+#define DB_DFSM_CONFIG__FORCE_FLUSH_MASK 0x00000008L
+#define DB_DFSM_CONFIG__MIDDLE_PIPE_MAX_DEPTH_MASK 0x00007F00L
+//DB_DFSM_WATERMARK
+#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK__SHIFT 0x10
+#define DB_DFSM_WATERMARK__DFSM_HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_WATERMARK__POPS_HIGH_WATERMARK_MASK 0xFFFF0000L
+//DB_DFSM_TILES_IN_FLIGHT
+#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
+#define DB_DFSM_TILES_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_TILES_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
+//DB_DFSM_PRIMS_IN_FLIGHT
+#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK__SHIFT 0x0
+#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT__SHIFT 0x10
+#define DB_DFSM_PRIMS_IN_FLIGHT__HIGH_WATERMARK_MASK 0x0000FFFFL
+#define DB_DFSM_PRIMS_IN_FLIGHT__HARD_LIMIT_MASK 0xFFFF0000L
+//DB_DFSM_WATCHDOG
+#define DB_DFSM_WATCHDOG__TIMER_TARGET__SHIFT 0x0
+#define DB_DFSM_WATCHDOG__TIMER_TARGET_MASK 0xFFFFFFFFL
+//DB_DFSM_FLUSH_ENABLE
+#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS__SHIFT 0x0
+#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU__SHIFT 0x18
+#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS__SHIFT 0x1c
+#define DB_DFSM_FLUSH_ENABLE__PRIMARY_EVENTS_MASK 0x000003FFL
+#define DB_DFSM_FLUSH_ENABLE__AUX_FORCE_PASSTHRU_MASK 0x0F000000L
+#define DB_DFSM_FLUSH_ENABLE__AUX_EVENTS_MASK 0xF0000000L
+//DB_DFSM_FLUSH_AUX_EVENT
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A__SHIFT 0x0
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B__SHIFT 0x8
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C__SHIFT 0x10
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D__SHIFT 0x18
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_A_MASK 0x000000FFL
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_B_MASK 0x0000FF00L
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_C_MASK 0x00FF0000L
+#define DB_DFSM_FLUSH_AUX_EVENT__EVENT_D_MASK 0xFF000000L
+//CC_RB_REDUNDANCY
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//CC_RB_BACKEND_DISABLE
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
+//GB_ADDR_CONFIG
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG__NUM_GPUS__SHIFT 0x15
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE__SHIFT 0x18
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG__ROW_SIZE__SHIFT 0x1c
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES__SHIFT 0x1e
+#define GB_ADDR_CONFIG__SE_ENABLE__SHIFT 0x1f
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define GB_ADDR_CONFIG__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG__NUM_GPUS_MASK 0x00E00000L
+#define GB_ADDR_CONFIG__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+#define GB_ADDR_CONFIG__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG__SE_ENABLE_MASK 0x80000000L
+//GB_BACKEND_MAP
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
+//GB_GPU_ID
+#define GB_GPU_ID__GPU_ID__SHIFT 0x0
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
+//CC_RB_DAISY_CHAIN
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
+//GB_ADDR_CONFIG_READ
+#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE__SHIFT 0x10
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG_READ__NUM_GPUS__SHIFT 0x15
+#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE__SHIFT 0x18
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG_READ__ROW_SIZE__SHIFT 0x1c
+#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES__SHIFT 0x1e
+#define GB_ADDR_CONFIG_READ__SE_ENABLE__SHIFT 0x1f
+#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define GB_ADDR_CONFIG_READ__SHADER_ENGINE_TILE_SIZE_MASK 0x00070000L
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG_READ__NUM_GPUS_MASK 0x00E00000L
+#define GB_ADDR_CONFIG_READ__MULTI_GPU_TILE_SIZE_MASK 0x03000000L
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+#define GB_ADDR_CONFIG_READ__ROW_SIZE_MASK 0x30000000L
+#define GB_ADDR_CONFIG_READ__NUM_LOWER_PIPES_MASK 0x40000000L
+#define GB_ADDR_CONFIG_READ__SE_ENABLE_MASK 0x80000000L
+//GB_TILE_MODE0
+#define GB_TILE_MODE0__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE0__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE0__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE0__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE0__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE0__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE0__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE0__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE1
+#define GB_TILE_MODE1__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE1__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE1__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE1__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE1__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE1__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE1__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE1__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE1__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE2
+#define GB_TILE_MODE2__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE2__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE2__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE2__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE2__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE2__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE2__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE2__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE2__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE3
+#define GB_TILE_MODE3__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE3__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE3__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE3__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE3__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE3__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE3__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE3__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE3__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE4
+#define GB_TILE_MODE4__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE4__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE4__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE4__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE4__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE4__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE4__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE4__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE4__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE5
+#define GB_TILE_MODE5__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE5__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE5__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE5__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE5__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE5__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE5__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE5__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE5__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE6
+#define GB_TILE_MODE6__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE6__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE6__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE6__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE6__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE6__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE6__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE6__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE6__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE7
+#define GB_TILE_MODE7__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE7__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE7__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE7__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE7__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE7__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE7__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE7__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE7__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE8
+#define GB_TILE_MODE8__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE8__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE8__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE8__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE8__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE8__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE8__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE8__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE8__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE9
+#define GB_TILE_MODE9__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE9__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE9__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE9__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE9__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE9__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE9__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE9__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE9__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE10
+#define GB_TILE_MODE10__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE10__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE10__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE10__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE10__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE10__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE10__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE10__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE10__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE11
+#define GB_TILE_MODE11__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE11__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE11__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE11__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE11__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE11__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE11__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE11__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE11__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE12
+#define GB_TILE_MODE12__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE12__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE12__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE12__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE12__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE12__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE12__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE12__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE12__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE13
+#define GB_TILE_MODE13__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE13__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE13__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE13__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE13__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE13__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE13__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE13__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE13__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE14
+#define GB_TILE_MODE14__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE14__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE14__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE14__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE14__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE14__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE14__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE14__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE14__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE15
+#define GB_TILE_MODE15__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE15__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE15__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE15__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE15__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE15__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE15__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE15__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE15__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE16
+#define GB_TILE_MODE16__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE16__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE16__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE16__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE16__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE16__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE16__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE16__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE16__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE17
+#define GB_TILE_MODE17__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE17__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE17__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE17__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE17__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE17__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE17__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE17__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE17__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE18
+#define GB_TILE_MODE18__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE18__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE18__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE18__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE18__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE18__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE18__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE18__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE18__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE19
+#define GB_TILE_MODE19__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE19__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE19__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE19__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE19__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE19__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE19__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE19__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE19__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE20
+#define GB_TILE_MODE20__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE20__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE20__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE20__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE20__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE20__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE20__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE20__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE20__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE21
+#define GB_TILE_MODE21__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE21__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE21__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE21__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE21__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE21__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE21__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE21__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE21__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE22
+#define GB_TILE_MODE22__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE22__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE22__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE22__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE22__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE22__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE22__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE22__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE22__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE23
+#define GB_TILE_MODE23__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE23__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE23__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE23__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE23__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE23__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE23__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE23__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE23__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE24
+#define GB_TILE_MODE24__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE24__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE24__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE24__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE24__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE24__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE24__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE24__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE24__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE25
+#define GB_TILE_MODE25__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE25__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE25__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE25__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE25__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE25__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE25__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE25__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE25__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE26
+#define GB_TILE_MODE26__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE26__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE26__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE26__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE26__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE26__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE26__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE26__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE26__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE27
+#define GB_TILE_MODE27__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE27__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE27__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE27__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE27__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE27__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE27__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE27__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE27__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE28
+#define GB_TILE_MODE28__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE28__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE28__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE28__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE28__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE28__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE28__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE28__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE28__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE29
+#define GB_TILE_MODE29__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE29__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE29__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE29__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE29__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE29__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE29__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE29__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE29__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE30
+#define GB_TILE_MODE30__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE30__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE30__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE30__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE30__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE30__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE30__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE30__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE30__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_TILE_MODE31
+#define GB_TILE_MODE31__ARRAY_MODE__SHIFT 0x2
+#define GB_TILE_MODE31__PIPE_CONFIG__SHIFT 0x6
+#define GB_TILE_MODE31__TILE_SPLIT__SHIFT 0xb
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW__SHIFT 0x16
+#define GB_TILE_MODE31__SAMPLE_SPLIT__SHIFT 0x19
+#define GB_TILE_MODE31__ARRAY_MODE_MASK 0x0000003CL
+#define GB_TILE_MODE31__PIPE_CONFIG_MASK 0x000007C0L
+#define GB_TILE_MODE31__TILE_SPLIT_MASK 0x00003800L
+#define GB_TILE_MODE31__MICRO_TILE_MODE_NEW_MASK 0x01C00000L
+#define GB_TILE_MODE31__SAMPLE_SPLIT_MASK 0x06000000L
+//GB_MACROTILE_MODE0
+#define GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE0__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE0__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE0__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE0__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE0__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE1
+#define GB_MACROTILE_MODE1__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE1__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE1__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE1__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE1__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE1__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE1__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE2
+#define GB_MACROTILE_MODE2__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE2__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE2__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE2__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE2__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE2__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE2__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE3
+#define GB_MACROTILE_MODE3__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE3__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE3__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE3__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE3__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE3__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE3__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE4
+#define GB_MACROTILE_MODE4__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE4__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE4__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE4__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE4__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE4__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE4__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE5
+#define GB_MACROTILE_MODE5__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE5__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE5__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE5__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE5__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE5__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE5__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE6
+#define GB_MACROTILE_MODE6__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE6__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE6__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE6__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE6__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE6__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE6__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE7
+#define GB_MACROTILE_MODE7__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE7__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE7__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE7__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE7__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE7__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE7__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE8
+#define GB_MACROTILE_MODE8__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE8__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE8__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE8__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE8__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE8__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE8__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE9
+#define GB_MACROTILE_MODE9__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE9__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE9__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE9__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE9__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE9__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE9__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE10
+#define GB_MACROTILE_MODE10__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE10__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE10__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE10__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE10__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE10__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE10__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE11
+#define GB_MACROTILE_MODE11__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE11__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE11__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE11__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE11__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE11__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE11__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE12
+#define GB_MACROTILE_MODE12__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE12__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE12__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE12__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE12__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE12__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE12__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE13
+#define GB_MACROTILE_MODE13__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE13__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE13__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE13__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE13__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE13__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE13__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE14
+#define GB_MACROTILE_MODE14__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE14__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE14__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE14__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE14__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE14__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE14__NUM_BANKS_MASK 0x000000C0L
+//GB_MACROTILE_MODE15
+#define GB_MACROTILE_MODE15__BANK_WIDTH__SHIFT 0x0
+#define GB_MACROTILE_MODE15__BANK_HEIGHT__SHIFT 0x2
+#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT__SHIFT 0x4
+#define GB_MACROTILE_MODE15__NUM_BANKS__SHIFT 0x6
+#define GB_MACROTILE_MODE15__BANK_WIDTH_MASK 0x00000003L
+#define GB_MACROTILE_MODE15__BANK_HEIGHT_MASK 0x0000000CL
+#define GB_MACROTILE_MODE15__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define GB_MACROTILE_MODE15__NUM_BANKS_MASK 0x000000C0L
+//CB_HW_CONTROL
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT__SHIFT 0x0
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT__SHIFT 0x6
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT__SHIFT 0xc
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x10
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING__SHIFT 0x12
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE__SHIFT 0x14
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK__SHIFT 0x16
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG__SHIFT 0x17
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT__SHIFT 0x1c
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT__SHIFT 0x1d
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
+#define CB_HW_CONTROL__CM_CACHE_EVICT_POINT_MASK 0x0000000FL
+#define CB_HW_CONTROL__FC_CACHE_EVICT_POINT_MASK 0x000003C0L
+#define CB_HW_CONTROL__CC_CACHE_EVICT_POINT_MASK 0x0000F000L
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00010000L
+#define CB_HW_CONTROL__DISABLE_INTNORM_LE11BPC_CLAMPING_MASK 0x00040000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__FORCE_ALWAYS_TOGGLE_MASK 0x00100000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__DISABLE_FULL_WRITE_MASK_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_RESOLVE_OPT_FOR_SINGLE_FRAG_MASK 0x00800000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_WR_OVER_FC_RD_ON_CMASK_CONFLICT_MASK 0x10000000L
+#define CB_HW_CONTROL__PRIORITIZE_FC_EVICT_OVER_FOP_RD_ON_BANK_CONFLICT_MASK 0x20000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+//CB_HW_CONTROL_1
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS__SHIFT 0x0
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS__SHIFT 0x5
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0xb
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH__SHIFT 0x11
+#define CB_HW_CONTROL_1__RMI_CREDITS__SHIFT 0x1a
+#define CB_HW_CONTROL_1__CM_CACHE_NUM_TAGS_MASK 0x0000001FL
+#define CB_HW_CONTROL_1__FC_CACHE_NUM_TAGS_MASK 0x000007E0L
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0001F800L
+#define CB_HW_CONTROL_1__CM_TILE_FIFO_DEPTH_MASK 0x03FE0000L
+#define CB_HW_CONTROL_1__RMI_CREDITS_MASK 0xFC000000L
+//CB_HW_CONTROL_2
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH__SHIFT 0x0
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH__SHIFT 0x8
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH__SHIFT 0xf
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x18
+#define CB_HW_CONTROL_2__CHICKEN_BITS__SHIFT 0x1c
+#define CB_HW_CONTROL_2__CC_EVEN_ODD_FIFO_DEPTH_MASK 0x000000FFL
+#define CB_HW_CONTROL_2__FC_RDLAT_TILE_FIFO_DEPTH_MASK 0x00007F00L
+#define CB_HW_CONTROL_2__FC_RDLAT_QUAD_FIFO_DEPTH_MASK 0x007F8000L
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x0F000000L
+#define CB_HW_CONTROL_2__CHICKEN_BITS_MASK 0xF0000000L
+//CB_HW_CONTROL_3
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL__SHIFT 0x0
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
+#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT__SHIFT 0x2
+#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP__SHIFT 0x3
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR__SHIFT 0x4
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x5
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD__SHIFT 0x6
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x7
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION__SHIFT 0x8
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x9
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0xa
+#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION__SHIFT 0xb
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967__SHIFT 0xc
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657__SHIFT 0xd
+#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542__SHIFT 0xe
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xf
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0x10
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0x11
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC__SHIFT 0x12
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0x13
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM__SHIFT 0x14
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0x15
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC__SHIFT 0x16
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x17
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18
+#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19
+#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a
+#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX__SHIFT 0x1b
+#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS__SHIFT 0x1c
+#define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
+#define CB_HW_CONTROL_3__DISABLE_FAST_CLEAR_FETCH_OPT_MASK 0x00000004L
+#define CB_HW_CONTROL_3__DISABLE_QUAD_MARKER_DROP_STOP_MASK 0x00000008L
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_CAM_CLR_MASK 0x00000010L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000020L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_KEY_MOD_MASK 0x00000040L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000080L
+#define CB_HW_CONTROL_3__DISABLE_OVERWRITE_COMBINER_TARGET_MASK_VALIDATION_MASK 0x00000100L
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000200L
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000400L
+#define CB_HW_CONTROL_3__DISABLE_CMASK_LAST_QUAD_INSERTION_MASK 0x00000800L
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_511967_MASK 0x00001000L
+#define CB_HW_CONTROL_3__DISABLE_ROP3_FIXES_OF_BUG_520657_MASK 0x00002000L
+#define CB_HW_CONTROL_3__DISABLE_OC_FIXES_OF_BUG_522542_MASK 0x00004000L
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00008000L
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00010000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00020000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_FC_MASK 0x00040000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00080000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CM_MASK 0x00100000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00200000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_FC_MASK 0x00400000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00800000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L
+#define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L
+#define CB_HW_CONTROL_3__DISABLE_DUALSRC_WITH_OBJPRIMID_FIX_MASK 0x08000000L
+#define CB_HW_CONTROL_3__COLOR_CACHE_PREFETCH_NUM_CLS_MASK 0x30000000L
+//CB_HW_MEM_ARBITER_RD
+#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x14
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x17
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x1a
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
+#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_FC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CM_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x000C0000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00400000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x03800000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x1C000000L
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
+//CB_HW_MEM_ARBITER_WR
+#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x14
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x17
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x1a
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x1d
+#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_FC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CM_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x000C0000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00300000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00400000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x03800000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x1C000000L
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x20000000L
+//CB_DCC_CONFIG
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH__SHIFT 0x0
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE__SHIFT 0x5
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE__SHIFT 0x6
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE__SHIFT 0x7
+#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH__SHIFT 0x8
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
+#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT__SHIFT 0x18
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x1c
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DEPTH_MASK 0x0000001FL
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_DISABLE_MASK 0x00000020L
+#define CB_DCC_CONFIG__OVERWRITE_COMBINER_CC_POP_DISABLE_MASK 0x00000040L
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE_MASK 0x00000080L
+#define CB_DCC_CONFIG__FC_RDLAT_KEYID_FIFO_DEPTH_MASK 0x0000FF00L
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x007F0000L
+#define CB_DCC_CONFIG__DCC_CACHE_EVICT_POINT_MASK 0x0F000000L
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xF0000000L
+//GC_USER_RB_REDUNDANCY
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//GC_USER_RB_BACKEND_DISABLE
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x10
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0x00FF0000L
+
+
+// addressBlock: gc_ea_gceadec2
+//GCEA_PERFCOUNTER_RSLT_CNTL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//GCEA_DSM_CNTL
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//GCEA_DSM_CNTLA
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//GCEA_DSM_CNTLB
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+//GCEA_DSM_CNTL2
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//GCEA_DSM_CNTL2A
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//GCEA_DSM_CNTL2B
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//GCEA_TCC_XBR_CREDITS
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
+#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
+#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
+#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
+#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
+#define GCEA_TCC_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
+#define GCEA_TCC_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
+#define GCEA_TCC_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
+#define GCEA_TCC_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
+#define GCEA_TCC_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
+#define GCEA_TCC_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
+//GCEA_TCC_XBR_MAXBURST
+#define GCEA_TCC_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
+#define GCEA_TCC_XBR_MAXBURST__IO_RD__SHIFT 0x4
+#define GCEA_TCC_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
+#define GCEA_TCC_XBR_MAXBURST__IO_WR__SHIFT 0xc
+#define GCEA_TCC_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
+#define GCEA_TCC_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
+#define GCEA_TCC_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
+#define GCEA_TCC_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
+//GCEA_PROBE_CNTL
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
+//GCEA_PROBE_MAP
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC__SHIFT 0x0
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC__SHIFT 0x1
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC__SHIFT 0x2
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC__SHIFT 0x3
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC__SHIFT 0x4
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC__SHIFT 0x5
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC__SHIFT 0x6
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC__SHIFT 0x7
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC__SHIFT 0x8
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC__SHIFT 0x9
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC__SHIFT 0xa
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC__SHIFT 0xb
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC__SHIFT 0xc
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC__SHIFT 0xd
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC__SHIFT 0xe
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC__SHIFT 0xf
+#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTTCC_MASK 0x00000001L
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTTCC_MASK 0x00000002L
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTTCC_MASK 0x00000004L
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTTCC_MASK 0x00000008L
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTTCC_MASK 0x00000010L
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTTCC_MASK 0x00000020L
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTTCC_MASK 0x00000040L
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTTCC_MASK 0x00000080L
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTTCC_MASK 0x00000100L
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTTCC_MASK 0x00000200L
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTTCC_MASK 0x00000400L
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTTCC_MASK 0x00000800L
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTTCC_MASK 0x00001000L
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTTCC_MASK 0x00002000L
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTTCC_MASK 0x00004000L
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTTCC_MASK 0x00008000L
+#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//GCEA_MISC2
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+//GCEA_DRAM_BANK_ARB
+#define GCEA_DRAM_BANK_ARB__AGEBASED_BANKARB__SHIFT 0x0
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_CYCLIM__SHIFT 0x1
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_REQLIM__SHIFT 0x8
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_STALLMODE__SHIFT 0xe
+#define GCEA_DRAM_BANK_ARB__AGEBASED_BANKARB_MASK 0x00000001L
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_CYCLIM_MASK 0x000000FEL
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_REQLIM_MASK 0x00003F00L
+#define GCEA_DRAM_BANK_ARB__BANK_STAY_AWAY_STALLMODE_MASK 0x00004000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS1
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS1
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_MISCCREDITS
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED__SHIFT 0x10
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x17
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L
+//GCEA_SDP_ENABLE
+#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
+
+
+// addressBlock: gc_rmi_rmidec
+//RMI_GENERAL_CNTL
+#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG__SHIFT 0x11
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
+#define RMI_GENERAL_CNTL__RB1_HARVEST_EN__SHIFT 0x14
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE__SHIFT 0x19
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK__SHIFT 0x1a
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK__SHIFT 0x1b
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK__SHIFT 0x1c
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK__SHIFT 0x1d
+#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK__SHIFT 0x1e
+#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_MASK 0x00060000L
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
+#define RMI_GENERAL_CNTL__RB1_HARVEST_EN_MASK 0x00100000L
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
+#define RMI_GENERAL_CNTL__XBAR_MUX_CONFIG_UPDATE_MASK 0x02000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_OVERFLOW_ERROR_MASK_MASK 0x04000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_0_UNDERFLOW_ERROR_MASK_MASK 0x08000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_OVERFLOW_ERROR_MASK_MASK 0x10000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_1_UNDERFLOW_ERROR_MASK_MASK 0x20000000L
+#define RMI_GENERAL_CNTL__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK_MASK 0x40000000L
+//RMI_GENERAL_CNTL1
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xa
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN__SHIFT 0xb
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN__SHIFT 0xc
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000200L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000400L
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE0_RR_ARB_BURST_HINT_EN_MASK 0x00000800L
+#define RMI_GENERAL_CNTL1__UTCL1_PROBE1_RR_ARB_BURST_HINT_EN_MASK 0x00001000L
+//RMI_GENERAL_STATUS
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
+#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY__SHIFT 0x6
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x10
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY__SHIFT 0x11
+#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY__SHIFT 0x12
+#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY__SHIFT 0x13
+#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY__SHIFT 0x14
+#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED__SHIFT 0x15
+#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY__SHIFT 0x1d
+#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL__SHIFT 0x1e
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
+#define RMI_GENERAL_STATUS__RMI_UTCL1_BUSY_MASK 0x00000040L
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00010000L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_1_BUSY_MASK 0x00020000L
+#define RMI_GENERAL_STATUS__UTC_PROBE1_BUSY_MASK 0x00040000L
+#define RMI_GENERAL_STATUS__UTC_PROBE0_BUSY_MASK 0x00080000L
+#define RMI_GENERAL_STATUS__RMI_XNACK_BUSY_MASK 0x00100000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_NUM_USED_MASK 0x1FE00000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_EMPTY_MASK 0x20000000L
+#define RMI_GENERAL_STATUS__XNACK_FIFO_FULL_MASK 0x40000000L
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
+//RMI_SUBBLOCK_STATUS0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
+//RMI_SUBBLOCK_STATUS1
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
+//RMI_SUBBLOCK_STATUS2
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
+//RMI_SUBBLOCK_STATUS3
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
+//RMI_XBAR_CONFIG
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1__SHIFT 0xe
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
+#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB1_MASK 0x00004000L
+//RMI_PROBE_POP_LOGIC_CNTL
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
+//RMI_UTC_XNACK_N_MISC_CNTL
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
+//RMI_DEMUX_CNTL
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL__SHIFT 0x0
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x1
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x4
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL__SHIFT 0x10
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x11
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x14
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_MASK 0x00000001L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000002L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_OVERRIDE_MASK 0x00000030L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_MASK 0x00010000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00020000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00300000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
+//RMI_UTCL1_CNTL1
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//RMI_UTCL1_CNTL2
+#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//RMI_TCIW_FORMATTER0_CNTL
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA__SHIFT 0x13
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE__SHIFT 0x1c
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_MASK 0x07F80000L
+#define RMI_TCIW_FORMATTER0_CNTL__SKID_FIFO_0_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_REQ_SAFE_MODE_MASK 0x10000000L
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER0_CNTL__WR_COMBINE0_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
+//RMI_TCIW_FORMATTER1_CNTL
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA__SHIFT 0x13
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE__SHIFT 0x1b
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE__SHIFT 0x1c
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_MASK 0x07F80000L
+#define RMI_TCIW_FORMATTER1_CNTL__SKID_FIFO_1_FREE_SPACE_DELTA_UPDATE_MASK 0x08000000L
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_REQ_SAFE_MODE_MASK 0x10000000L
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
+//RMI_SCOREBOARD_CNTL
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1__SHIFT 0x4
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0__SHIFT 0x7
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN__SHIFT 0x8
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB1_MASK 0x00000010L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
+#define RMI_SCOREBOARD_CNTL__TIME_STAMP_FLUSH_RB0_MASK 0x00000080L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_EN_MASK 0x00000100L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
+//RMI_SCOREBOARD_STATUS0
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
+//RMI_SCOREBOARD_STATUS1
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
+//RMI_SCOREBOARD_STATUS2
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
+//RMI_XBAR_ARBITER_CONFIG
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
+//RMI_XBAR_ARBITER_CONFIG_1
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_RD_MASK 0x00FF0000L
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB1_WR_MASK 0xFF000000L
+//RMI_CLOCK_CNTRL
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK__SHIFT 0x14
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK__SHIFT 0x19
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_BUSY_MASK_MASK 0x01F00000L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB1_WAKEUP_MASK_MASK 0x3E000000L
+//RMI_UTCL1_STATUS
+#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//RMI_SPARE
+#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING__SHIFT 0x0
+#define RMI_SPARE__SPARE_BIT_1__SHIFT 0x1
+#define RMI_SPARE__SPARE_BIT_2__SHIFT 0x2
+#define RMI_SPARE__SPARE_BIT_3__SHIFT 0x3
+#define RMI_SPARE__SPARE_BIT_4__SHIFT 0x4
+#define RMI_SPARE__SPARE_BIT_5__SHIFT 0x5
+#define RMI_SPARE__SPARE_BIT_6__SHIFT 0x6
+#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define RMI_SPARE__SPARE_BIT_8_0__SHIFT 0x8
+#define RMI_SPARE__SPARE_BIT_16_0__SHIFT 0x10
+#define RMI_SPARE__RMI_ARBITER_STALL_TIMER_ENABLED_ALLOW_STREAMING_MASK 0x00000001L
+#define RMI_SPARE__SPARE_BIT_1_MASK 0x00000002L
+#define RMI_SPARE__SPARE_BIT_2_MASK 0x00000004L
+#define RMI_SPARE__SPARE_BIT_3_MASK 0x00000008L
+#define RMI_SPARE__SPARE_BIT_4_MASK 0x00000010L
+#define RMI_SPARE__SPARE_BIT_5_MASK 0x00000020L
+#define RMI_SPARE__SPARE_BIT_6_MASK 0x00000040L
+#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define RMI_SPARE__SPARE_BIT_8_0_MASK 0x0000FF00L
+#define RMI_SPARE__SPARE_BIT_16_0_MASK 0xFFFF0000L
+//RMI_SPARE_1
+#define RMI_SPARE_1__SPARE_BIT_8__SHIFT 0x0
+#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
+#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
+#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
+#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
+#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
+#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
+#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
+#define RMI_SPARE_1__SPARE_BIT_8_1__SHIFT 0x8
+#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
+#define RMI_SPARE_1__SPARE_BIT_8_MASK 0x00000001L
+#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
+#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
+#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
+#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
+#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
+#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
+#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
+#define RMI_SPARE_1__SPARE_BIT_8_1_MASK 0x0000FF00L
+#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
+//RMI_SPARE_2
+#define RMI_SPARE_2__SPARE_BIT_16__SHIFT 0x0
+#define RMI_SPARE_2__SPARE_BIT_17__SHIFT 0x1
+#define RMI_SPARE_2__SPARE_BIT_18__SHIFT 0x2
+#define RMI_SPARE_2__SPARE_BIT_19__SHIFT 0x3
+#define RMI_SPARE_2__SPARE_BIT_20__SHIFT 0x4
+#define RMI_SPARE_2__SPARE_BIT_21__SHIFT 0x5
+#define RMI_SPARE_2__SPARE_BIT_22__SHIFT 0x6
+#define RMI_SPARE_2__SPARE_BIT_23__SHIFT 0x7
+#define RMI_SPARE_2__SPARE_BIT_4_0__SHIFT 0x8
+#define RMI_SPARE_2__SPARE_BIT_4_1__SHIFT 0xc
+#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
+#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
+#define RMI_SPARE_2__SPARE_BIT_16_MASK 0x00000001L
+#define RMI_SPARE_2__SPARE_BIT_17_MASK 0x00000002L
+#define RMI_SPARE_2__SPARE_BIT_18_MASK 0x00000004L
+#define RMI_SPARE_2__SPARE_BIT_19_MASK 0x00000008L
+#define RMI_SPARE_2__SPARE_BIT_20_MASK 0x00000010L
+#define RMI_SPARE_2__SPARE_BIT_21_MASK 0x00000020L
+#define RMI_SPARE_2__SPARE_BIT_22_MASK 0x00000040L
+#define RMI_SPARE_2__SPARE_BIT_23_MASK 0x00000080L
+#define RMI_SPARE_2__SPARE_BIT_4_0_MASK 0x00000F00L
+#define RMI_SPARE_2__SPARE_BIT_4_1_MASK 0x0000F000L
+#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
+#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
+
+
+// addressBlock: gc_utcl2_atcl2dec
+//ATC_L2_CNTL
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x8
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00000700L
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+//ATC_L2_CNTL2
+#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
+#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
+//ATC_L2_CACHE_DATA0
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATC_L2_CACHE_DATA1
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA2
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CNTL3
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L
+//ATC_L2_STATUS
+#define ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x3FFFFFFEL
+//ATC_L2_STATUS2
+#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//ATC_L2_MISC_CG
+#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATC_L2_MEM_POWER_LS
+#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATC_L2_CGTT_CLK_CTRL
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: gc_utcl2_vml2pfdec
+//VM_L2_CNTL
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VM_L2_CNTL2
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VM_L2_CNTL3
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VM_L2_STATUS
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VM_DUMMY_PAGE_FAULT_CNTL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_CNTL
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VM_L2_PROTECTION_FAULT_CNTL2
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_STATUS
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+//VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VM_L2_CNTL4
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+//VM_L2_MM_GROUP_RT_CLASSES
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VM_L2_BANK_SELECT_RESERVED_CID
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_BANK_SELECT_RESERVED_CID2
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_CACHE_PARITY_CNTL
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VM_L2_CGTT_CLK_CTRL
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: gc_utcl2_vml2vcdec
+//VM_CONTEXT0_CNTL
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT1_CNTL
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT2_CNTL
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT3_CNTL
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT4_CNTL
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT5_CNTL
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT6_CNTL
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT7_CNTL
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT8_CNTL
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT9_CNTL
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT10_CNTL
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT11_CNTL
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT12_CNTL
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT13_CNTL
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT14_CNTL
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT15_CNTL
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXTS_DISABLE
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VM_INVALIDATE_ENG0_SEM
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG1_SEM
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG2_SEM
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG3_SEM
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG4_SEM
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG5_SEM
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG6_SEM
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG7_SEM
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG8_SEM
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG9_SEM
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG10_SEM
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG11_SEM
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG12_SEM
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG13_SEM
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG14_SEM
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG15_SEM
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG16_SEM
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG17_SEM
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG0_REQ
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG1_REQ
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG2_REQ
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG3_REQ
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG4_REQ
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG5_REQ
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG6_REQ
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG7_REQ
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG8_REQ
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG9_REQ
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG10_REQ
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG11_REQ
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG12_REQ
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG13_REQ
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG14_REQ
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG15_REQ
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG16_REQ
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG17_REQ
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG0_ACK
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG1_ACK
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG2_ACK
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG3_ACK
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG4_ACK
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG5_ACK
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG6_ACK
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG7_ACK
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG8_ACK
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG9_ACK
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG10_ACK
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG11_ACK
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG12_ACK
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG13_ACK
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG14_ACK
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG15_ACK
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG16_ACK
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG17_ACK
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: gc_utcl2_vmsharedpfdec
+//MC_VM_NB_MMIOBASE
+#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//MC_VM_NB_MMIOLIMIT
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//MC_VM_NB_PCI_CTRL
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//MC_VM_NB_PCI_ARB
+#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//MC_VM_FB_OFFSET
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//MC_VM_STEERING
+#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//MC_SHARED_VIRT_RESET_REQ
+#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//MC_MEM_POWER_LS
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_APT_CNTL
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+//MC_VM_LOCAL_HBM_ADDRESS_START
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_END
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+
+// addressBlock: gc_utcl2_vmsharedvcdec
+//MC_VM_FB_LOCATION_BASE
+#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//MC_VM_FB_LOCATION_TOP
+#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_TOP
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_BOT
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//MC_VM_AGP_BASE
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_MX_L1_TLB_CNTL
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: gc_ea_gceadec
+//GCEA_DRAM_RD_CLI2GRP_MAP0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_CLI2GRP_MAP1
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP1
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_GRP2VC_MAP
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_WR_GRP2VC_MAP
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_RD_LAZY
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//GCEA_DRAM_WR_LAZY
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//GCEA_DRAM_RD_CAM_CNTL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//GCEA_DRAM_WR_CAM_CNTL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//GCEA_DRAM_PAGE_BURST
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_AGE
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_WR_PRI_AGE
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_RD_PRI_QUEUING
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_QUEUING
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_FIXED
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_FIXED
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_URGENCY
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_WR_PRI_URGENCY
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI1
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI2
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI3
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI1
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI2
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI3
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_ADDRNORM_BASE_ADDR0
+#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x4
+#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x8
+#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define GCEA_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define GCEA_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define GCEA_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000700L
+#define GCEA_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//GCEA_ADDRNORM_LIMIT_ADDR0
+#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES__SHIFT 0xa
+#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define GCEA_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define GCEA_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES_MASK 0x00000C00L
+#define GCEA_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//GCEA_ADDRNORM_BASE_ADDR1
+#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x4
+#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x8
+#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define GCEA_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define GCEA_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define GCEA_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000700L
+#define GCEA_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//GCEA_ADDRNORM_LIMIT_ADDR1
+#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES__SHIFT 0xa
+#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define GCEA_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define GCEA_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES_MASK 0x00000C00L
+#define GCEA_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//GCEA_ADDRNORM_OFFSET_ADDR1
+#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define GCEA_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//GCEA_ADDRNORMDRAM_HOLE_CNTL
+#define GCEA_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define GCEA_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define GCEA_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define GCEA_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//GCEA_ADDRNORMDRAM_TRICHANNEL_CFG
+#define GCEA_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE__SHIFT 0x0
+#define GCEA_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE_MASK 0x0000003FL
+//GCEA_ADDRDEC_BANK_CFG
+#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
+#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
+#define GCEA_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
+#define GCEA_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
+//GCEA_ADDRDEC_MISC_CFG
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
+#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
+#define GCEA_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
+#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define GCEA_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define GCEA_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define GCEA_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define GCEA_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_BANK0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_BANK1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_BANK2
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_BANK3
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_BANK4
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_PC
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//GCEA_ADDRDECDRAM_ADDR_HASH_PC2
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
+//GCEA_ADDRDECDRAM_ADDR_HASH_CS0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDECDRAM_ADDR_HASH_CS1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDECDRAM_HARVEST_ENABLE
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define GCEA_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+//GCEA_ADDRDEC0_BASE_ADDR_CS0
+#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_CS1
+#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_CS2
+#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_CS3
+#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_SECCS0
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_SECCS1
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_SECCS2
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_BASE_ADDR_SECCS3
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_ADDR_MASK_CS01
+#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_ADDR_MASK_CS23
+#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_ADDR_MASK_SECCS01
+#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_ADDR_MASK_SECCS23
+#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC0_ADDR_CFG_CS01
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//GCEA_ADDRDEC0_ADDR_CFG_CS23
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define GCEA_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//GCEA_ADDRDEC0_ADDR_SEL_CS01
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//GCEA_ADDRDEC0_ADDR_SEL_CS23
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//GCEA_ADDRDEC0_COL_SEL_LO_CS01
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//GCEA_ADDRDEC0_COL_SEL_LO_CS23
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//GCEA_ADDRDEC0_COL_SEL_HI_CS01
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//GCEA_ADDRDEC0_COL_SEL_HI_CS23
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define GCEA_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//GCEA_ADDRDEC0_RM_SEL_CS01
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC0_RM_SEL_CS23
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC0_RM_SEL_SECCS01
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC0_RM_SEL_SECCS23
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC1_BASE_ADDR_CS0
+#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_CS1
+#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_CS2
+#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_CS3
+#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_SECCS0
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_SECCS1
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_SECCS2
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_BASE_ADDR_SECCS3
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define GCEA_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_ADDR_MASK_CS01
+#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_ADDR_MASK_CS23
+#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_ADDR_MASK_SECCS01
+#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_ADDR_MASK_SECCS23
+#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define GCEA_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//GCEA_ADDRDEC1_ADDR_CFG_CS01
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//GCEA_ADDRDEC1_ADDR_CFG_CS23
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define GCEA_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//GCEA_ADDRDEC1_ADDR_SEL_CS01
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//GCEA_ADDRDEC1_ADDR_SEL_CS23
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//GCEA_ADDRDEC1_COL_SEL_LO_CS01
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//GCEA_ADDRDEC1_COL_SEL_LO_CS23
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//GCEA_ADDRDEC1_COL_SEL_HI_CS01
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//GCEA_ADDRDEC1_COL_SEL_HI_CS23
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define GCEA_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//GCEA_ADDRDEC1_RM_SEL_CS01
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC1_RM_SEL_CS23
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC1_RM_SEL_SECCS01
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_ADDRDEC1_RM_SEL_SECCS23
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define GCEA_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//GCEA_IO_RD_CLI2GRP_MAP0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_CLI2GRP_MAP1
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP1
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_COMBINE_FLUSH
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//GCEA_IO_WR_COMBINE_FLUSH
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//GCEA_IO_GROUP_BURST
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_AGE
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_WR_PRI_AGE
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_RD_PRI_QUEUING
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_QUEUING
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_FIXED
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_FIXED
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_URGENCY
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_WR_PRI_URGENCY
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_RD_PRI_URGENCY_MASK
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_WR_PRI_URGENCY_MASK
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_RD_PRI_QUANT_PRI1
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI2
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI3
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI1
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI2
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI3
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_SDP_ARB_DRAM
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+//GCEA_SDP_ARB_FINAL
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+//GCEA_SDP_DRAM_PRIORITY
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_IO_PRIORITY
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_CREDITS
+#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
+#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_TAG_RESERVE0
+#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GCEA_SDP_TAG_RESERVE1
+#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GCEA_SDP_VCC_RESERVE0
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCC_RESERVE1
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_VCD_RESERVE0
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCD_RESERVE1
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_REQ_CNTL
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//GCEA_MISC
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//GCEA_LATENCY_SAMPLING
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//GCEA_PERFCOUNTER_LO
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_HI
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GCEA_PERFCOUNTER0_CFG
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER1_CFG
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+
+
+// addressBlock: gc_tcdec
+//TCP_INVALIDATE
+#define TCP_INVALIDATE__START__SHIFT 0x0
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+//TCP_STATUS
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
+#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
+#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
+#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
+#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
+#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
+#define TCP_STATUS__READ_BUSY__SHIFT 0x6
+#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
+#define TCP_STATUS__VM_BUSY__SHIFT 0x8
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
+#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
+#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
+#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
+#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
+#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
+#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
+#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
+//TCP_CNTL
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
+#define TCP_CNTL__L1_SIZE__SHIFT 0x2
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE__SHIFT 0x4
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT__SHIFT 0x16
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
+#define TCP_CNTL__INV_ALL_VMIDS__SHIFT 0x1d
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1e
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__L1_SIZE_MASK 0x0000000CL
+#define TCP_CNTL__FLAT_BUF_HASH_ENABLE_MASK 0x00000010L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
+#define TCP_CNTL__FORCE_EOW_TAGRAM_CNT_MASK 0x0FC00000L
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__INV_ALL_VMIDS_MASK 0x20000000L
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x40000000L
+//TCP_CHAN_STEER_LO
+#define TCP_CHAN_STEER_LO__CHAN0__SHIFT 0x0
+#define TCP_CHAN_STEER_LO__CHAN1__SHIFT 0x4
+#define TCP_CHAN_STEER_LO__CHAN2__SHIFT 0x8
+#define TCP_CHAN_STEER_LO__CHAN3__SHIFT 0xc
+#define TCP_CHAN_STEER_LO__CHAN4__SHIFT 0x10
+#define TCP_CHAN_STEER_LO__CHAN5__SHIFT 0x14
+#define TCP_CHAN_STEER_LO__CHAN6__SHIFT 0x18
+#define TCP_CHAN_STEER_LO__CHAN7__SHIFT 0x1c
+#define TCP_CHAN_STEER_LO__CHAN0_MASK 0x0000000FL
+#define TCP_CHAN_STEER_LO__CHAN1_MASK 0x000000F0L
+#define TCP_CHAN_STEER_LO__CHAN2_MASK 0x00000F00L
+#define TCP_CHAN_STEER_LO__CHAN3_MASK 0x0000F000L
+#define TCP_CHAN_STEER_LO__CHAN4_MASK 0x000F0000L
+#define TCP_CHAN_STEER_LO__CHAN5_MASK 0x00F00000L
+#define TCP_CHAN_STEER_LO__CHAN6_MASK 0x0F000000L
+#define TCP_CHAN_STEER_LO__CHAN7_MASK 0xF0000000L
+//TCP_CHAN_STEER_HI
+#define TCP_CHAN_STEER_HI__CHAN8__SHIFT 0x0
+#define TCP_CHAN_STEER_HI__CHAN9__SHIFT 0x4
+#define TCP_CHAN_STEER_HI__CHANA__SHIFT 0x8
+#define TCP_CHAN_STEER_HI__CHANB__SHIFT 0xc
+#define TCP_CHAN_STEER_HI__CHANC__SHIFT 0x10
+#define TCP_CHAN_STEER_HI__CHAND__SHIFT 0x14
+#define TCP_CHAN_STEER_HI__CHANE__SHIFT 0x18
+#define TCP_CHAN_STEER_HI__CHANF__SHIFT 0x1c
+#define TCP_CHAN_STEER_HI__CHAN8_MASK 0x0000000FL
+#define TCP_CHAN_STEER_HI__CHAN9_MASK 0x000000F0L
+#define TCP_CHAN_STEER_HI__CHANA_MASK 0x00000F00L
+#define TCP_CHAN_STEER_HI__CHANB_MASK 0x0000F000L
+#define TCP_CHAN_STEER_HI__CHANC_MASK 0x000F0000L
+#define TCP_CHAN_STEER_HI__CHAND_MASK 0x00F00000L
+#define TCP_CHAN_STEER_HI__CHANE_MASK 0x0F000000L
+#define TCP_CHAN_STEER_HI__CHANF_MASK 0xF0000000L
+//TCP_ADDR_CONFIG
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS__SHIFT 0x0
+#define TCP_ADDR_CONFIG__NUM_BANKS__SHIFT 0x4
+#define TCP_ADDR_CONFIG__COLHI_WIDTH__SHIFT 0x6
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI__SHIFT 0x9
+#define TCP_ADDR_CONFIG__NUM_TCC_BANKS_MASK 0x0000000FL
+#define TCP_ADDR_CONFIG__NUM_BANKS_MASK 0x00000030L
+#define TCP_ADDR_CONFIG__COLHI_WIDTH_MASK 0x000001C0L
+#define TCP_ADDR_CONFIG__RB_SPLIT_COLHI_MASK 0x00000200L
+//TCP_CREDIT
+#define TCP_CREDIT__LFIFO_CREDIT__SHIFT 0x0
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
+#define TCP_CREDIT__TD_CREDIT__SHIFT 0x1d
+#define TCP_CREDIT__LFIFO_CREDIT_MASK 0x000003FFL
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
+#define TCP_CREDIT__TD_CREDIT_MASK 0xE0000000L
+//TCP_BUFFER_ADDR_HASH_CNTL
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS__SHIFT 0x0
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS__SHIFT 0x8
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT__SHIFT 0x10
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT__SHIFT 0x18
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_BITS_MASK 0x00000007L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_BITS_MASK 0x00000700L
+#define TCP_BUFFER_ADDR_HASH_CNTL__CHANNEL_XOR_COUNT_MASK 0x00070000L
+#define TCP_BUFFER_ADDR_HASH_CNTL__BANK_XOR_COUNT_MASK 0x07000000L
+//TC_CFG_L1_LOAD_POLICY0
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L1_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L1_LOAD_POLICY1
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L1_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L1_STORE_POLICY
+#define TC_CFG_L1_STORE_POLICY__POLICY_0__SHIFT 0x0
+#define TC_CFG_L1_STORE_POLICY__POLICY_1__SHIFT 0x1
+#define TC_CFG_L1_STORE_POLICY__POLICY_2__SHIFT 0x2
+#define TC_CFG_L1_STORE_POLICY__POLICY_3__SHIFT 0x3
+#define TC_CFG_L1_STORE_POLICY__POLICY_4__SHIFT 0x4
+#define TC_CFG_L1_STORE_POLICY__POLICY_5__SHIFT 0x5
+#define TC_CFG_L1_STORE_POLICY__POLICY_6__SHIFT 0x6
+#define TC_CFG_L1_STORE_POLICY__POLICY_7__SHIFT 0x7
+#define TC_CFG_L1_STORE_POLICY__POLICY_8__SHIFT 0x8
+#define TC_CFG_L1_STORE_POLICY__POLICY_9__SHIFT 0x9
+#define TC_CFG_L1_STORE_POLICY__POLICY_10__SHIFT 0xa
+#define TC_CFG_L1_STORE_POLICY__POLICY_11__SHIFT 0xb
+#define TC_CFG_L1_STORE_POLICY__POLICY_12__SHIFT 0xc
+#define TC_CFG_L1_STORE_POLICY__POLICY_13__SHIFT 0xd
+#define TC_CFG_L1_STORE_POLICY__POLICY_14__SHIFT 0xe
+#define TC_CFG_L1_STORE_POLICY__POLICY_15__SHIFT 0xf
+#define TC_CFG_L1_STORE_POLICY__POLICY_16__SHIFT 0x10
+#define TC_CFG_L1_STORE_POLICY__POLICY_17__SHIFT 0x11
+#define TC_CFG_L1_STORE_POLICY__POLICY_18__SHIFT 0x12
+#define TC_CFG_L1_STORE_POLICY__POLICY_19__SHIFT 0x13
+#define TC_CFG_L1_STORE_POLICY__POLICY_20__SHIFT 0x14
+#define TC_CFG_L1_STORE_POLICY__POLICY_21__SHIFT 0x15
+#define TC_CFG_L1_STORE_POLICY__POLICY_22__SHIFT 0x16
+#define TC_CFG_L1_STORE_POLICY__POLICY_23__SHIFT 0x17
+#define TC_CFG_L1_STORE_POLICY__POLICY_24__SHIFT 0x18
+#define TC_CFG_L1_STORE_POLICY__POLICY_25__SHIFT 0x19
+#define TC_CFG_L1_STORE_POLICY__POLICY_26__SHIFT 0x1a
+#define TC_CFG_L1_STORE_POLICY__POLICY_27__SHIFT 0x1b
+#define TC_CFG_L1_STORE_POLICY__POLICY_28__SHIFT 0x1c
+#define TC_CFG_L1_STORE_POLICY__POLICY_29__SHIFT 0x1d
+#define TC_CFG_L1_STORE_POLICY__POLICY_30__SHIFT 0x1e
+#define TC_CFG_L1_STORE_POLICY__POLICY_31__SHIFT 0x1f
+#define TC_CFG_L1_STORE_POLICY__POLICY_0_MASK 0x00000001L
+#define TC_CFG_L1_STORE_POLICY__POLICY_1_MASK 0x00000002L
+#define TC_CFG_L1_STORE_POLICY__POLICY_2_MASK 0x00000004L
+#define TC_CFG_L1_STORE_POLICY__POLICY_3_MASK 0x00000008L
+#define TC_CFG_L1_STORE_POLICY__POLICY_4_MASK 0x00000010L
+#define TC_CFG_L1_STORE_POLICY__POLICY_5_MASK 0x00000020L
+#define TC_CFG_L1_STORE_POLICY__POLICY_6_MASK 0x00000040L
+#define TC_CFG_L1_STORE_POLICY__POLICY_7_MASK 0x00000080L
+#define TC_CFG_L1_STORE_POLICY__POLICY_8_MASK 0x00000100L
+#define TC_CFG_L1_STORE_POLICY__POLICY_9_MASK 0x00000200L
+#define TC_CFG_L1_STORE_POLICY__POLICY_10_MASK 0x00000400L
+#define TC_CFG_L1_STORE_POLICY__POLICY_11_MASK 0x00000800L
+#define TC_CFG_L1_STORE_POLICY__POLICY_12_MASK 0x00001000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_13_MASK 0x00002000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_14_MASK 0x00004000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_15_MASK 0x00008000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_16_MASK 0x00010000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_17_MASK 0x00020000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_18_MASK 0x00040000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_19_MASK 0x00080000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_20_MASK 0x00100000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_21_MASK 0x00200000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_22_MASK 0x00400000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_23_MASK 0x00800000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_24_MASK 0x01000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_25_MASK 0x02000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_26_MASK 0x04000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_27_MASK 0x08000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_28_MASK 0x10000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_29_MASK 0x20000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_30_MASK 0x40000000L
+#define TC_CFG_L1_STORE_POLICY__POLICY_31_MASK 0x80000000L
+//TC_CFG_L2_LOAD_POLICY0
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_LOAD_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L2_LOAD_POLICY1
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L2_LOAD_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L2_STORE_POLICY0
+#define TC_CFG_L2_STORE_POLICY0__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_STORE_POLICY0__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_STORE_POLICY0__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_STORE_POLICY0__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_STORE_POLICY0__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_STORE_POLICY0__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_STORE_POLICY0__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_STORE_POLICY0__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_STORE_POLICY0__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_STORE_POLICY0__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_STORE_POLICY0__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_STORE_POLICY0__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_STORE_POLICY0__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_STORE_POLICY0__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_STORE_POLICY0__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_STORE_POLICY0__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_STORE_POLICY0__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_STORE_POLICY0__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_STORE_POLICY0__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L2_STORE_POLICY1
+#define TC_CFG_L2_STORE_POLICY1__POLICY_16__SHIFT 0x0
+#define TC_CFG_L2_STORE_POLICY1__POLICY_17__SHIFT 0x2
+#define TC_CFG_L2_STORE_POLICY1__POLICY_18__SHIFT 0x4
+#define TC_CFG_L2_STORE_POLICY1__POLICY_19__SHIFT 0x6
+#define TC_CFG_L2_STORE_POLICY1__POLICY_20__SHIFT 0x8
+#define TC_CFG_L2_STORE_POLICY1__POLICY_21__SHIFT 0xa
+#define TC_CFG_L2_STORE_POLICY1__POLICY_22__SHIFT 0xc
+#define TC_CFG_L2_STORE_POLICY1__POLICY_23__SHIFT 0xe
+#define TC_CFG_L2_STORE_POLICY1__POLICY_24__SHIFT 0x10
+#define TC_CFG_L2_STORE_POLICY1__POLICY_25__SHIFT 0x12
+#define TC_CFG_L2_STORE_POLICY1__POLICY_26__SHIFT 0x14
+#define TC_CFG_L2_STORE_POLICY1__POLICY_27__SHIFT 0x16
+#define TC_CFG_L2_STORE_POLICY1__POLICY_28__SHIFT 0x18
+#define TC_CFG_L2_STORE_POLICY1__POLICY_29__SHIFT 0x1a
+#define TC_CFG_L2_STORE_POLICY1__POLICY_30__SHIFT 0x1c
+#define TC_CFG_L2_STORE_POLICY1__POLICY_31__SHIFT 0x1e
+#define TC_CFG_L2_STORE_POLICY1__POLICY_16_MASK 0x00000003L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_17_MASK 0x0000000CL
+#define TC_CFG_L2_STORE_POLICY1__POLICY_18_MASK 0x00000030L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_19_MASK 0x000000C0L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_20_MASK 0x00000300L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_21_MASK 0x00000C00L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_22_MASK 0x00003000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_23_MASK 0x0000C000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_24_MASK 0x00030000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_25_MASK 0x000C0000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_26_MASK 0x00300000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_27_MASK 0x00C00000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_28_MASK 0x03000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_29_MASK 0x0C000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_30_MASK 0x30000000L
+#define TC_CFG_L2_STORE_POLICY1__POLICY_31_MASK 0xC0000000L
+//TC_CFG_L2_ATOMIC_POLICY
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0__SHIFT 0x0
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1__SHIFT 0x2
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2__SHIFT 0x4
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3__SHIFT 0x6
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4__SHIFT 0x8
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5__SHIFT 0xa
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6__SHIFT 0xc
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7__SHIFT 0xe
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8__SHIFT 0x10
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9__SHIFT 0x12
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10__SHIFT 0x14
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11__SHIFT 0x16
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12__SHIFT 0x18
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13__SHIFT 0x1a
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14__SHIFT 0x1c
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15__SHIFT 0x1e
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_0_MASK 0x00000003L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_1_MASK 0x0000000CL
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_2_MASK 0x00000030L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_3_MASK 0x000000C0L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_4_MASK 0x00000300L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_5_MASK 0x00000C00L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_6_MASK 0x00003000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_7_MASK 0x0000C000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_8_MASK 0x00030000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_9_MASK 0x000C0000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_10_MASK 0x00300000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_11_MASK 0x00C00000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_12_MASK 0x03000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_13_MASK 0x0C000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_14_MASK 0x30000000L
+#define TC_CFG_L2_ATOMIC_POLICY__POLICY_15_MASK 0xC0000000L
+//TC_CFG_L1_VOLATILE
+#define TC_CFG_L1_VOLATILE__VOL__SHIFT 0x0
+#define TC_CFG_L1_VOLATILE__VOL_MASK 0x0000000FL
+//TC_CFG_L2_VOLATILE
+#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
+#define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL
+//TCI_STATUS
+#define TCI_STATUS__TCI_BUSY__SHIFT 0x0
+#define TCI_STATUS__TCI_BUSY_MASK 0x00000001L
+//TCI_CNTL_1
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES__SHIFT 0x0
+#define TCI_CNTL_1__REQ_FIFO_DEPTH__SHIFT 0x10
+#define TCI_CNTL_1__WDATA_RAM_DEPTH__SHIFT 0x18
+#define TCI_CNTL_1__WBINVL1_NUM_CYCLES_MASK 0x0000FFFFL
+#define TCI_CNTL_1__REQ_FIFO_DEPTH_MASK 0x00FF0000L
+#define TCI_CNTL_1__WDATA_RAM_DEPTH_MASK 0xFF000000L
+//TCI_CNTL_2
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2__SHIFT 0x0
+#define TCI_CNTL_2__TCA_MAX_CREDIT__SHIFT 0x1
+#define TCI_CNTL_2__L1_INVAL_ON_WBINVL2_MASK 0x00000001L
+#define TCI_CNTL_2__TCA_MAX_CREDIT_MASK 0x000001FEL
+//TCC_CTRL
+#define TCC_CTRL__CACHE_SIZE__SHIFT 0x0
+#define TCC_CTRL__RATE__SHIFT 0x2
+#define TCC_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
+#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
+#define TCC_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
+#define TCC_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
+#define TCC_CTRL__LINEAR_SET_HASH__SHIFT 0x15
+#define TCC_CTRL__MDC_SIZE__SHIFT 0x18
+#define TCC_CTRL__MDC_SECTOR_SIZE__SHIFT 0x1a
+#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
+#define TCC_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define TCC_CTRL__RATE_MASK 0x0000000CL
+#define TCC_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
+#define TCC_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
+#define TCC_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
+#define TCC_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
+#define TCC_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
+#define TCC_CTRL__MDC_SIZE_MASK 0x03000000L
+#define TCC_CTRL__MDC_SECTOR_SIZE_MASK 0x0C000000L
+#define TCC_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
+//TCC_CTRL2
+#define TCC_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
+#define TCC_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
+//TCC_REDUNDANCY
+#define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0
+#define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1
+#define TCC_REDUNDANCY__MC_SEL0_MASK 0x00000001L
+#define TCC_REDUNDANCY__MC_SEL1_MASK 0x00000002L
+//TCC_EXE_DISABLE
+#define TCC_EXE_DISABLE__EXE_DISABLE__SHIFT 0x1
+#define TCC_EXE_DISABLE__EXE_DISABLE_MASK 0x00000002L
+//TCC_DSM_CNTL
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL__SHIFT 0x15
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL__SHIFT 0x18
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL__SHIFT 0x1b
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCC_DSM_CNTL__CACHE_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_0_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_0_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCC_DSM_CNTL__CACHE_DATA_BANK_1_1_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_0_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCC_DSM_CNTL__CACHE_DIRTY_BANK_1_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCC_DSM_CNTL__HIGH_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_DATA_SEL_MASK 0x00600000L
+#define TCC_DSM_CNTL__LOW_RATE_TAG_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_DATA_SEL_MASK 0x03000000L
+#define TCC_DSM_CNTL__IN_USE_DEC_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_DATA_SEL_MASK 0x18000000L
+#define TCC_DSM_CNTL__IN_USE_TRANSFER_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
+//TCC_DSM_CNTLA
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL__SHIFT 0x6
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE__SHIFT 0x8
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL__SHIFT 0x9
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE__SHIFT 0xb
+#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xc
+#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0xe
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL__SHIFT 0xf
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x11
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x12
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x14
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL__SHIFT 0x15
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE__SHIFT 0x17
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL__SHIFT 0x18
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE__SHIFT 0x1a
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL__SHIFT 0x1b
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE__SHIFT 0x1d
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCC_DSM_CNTLA__SRC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCC_DSM_CNTLA__UC_ATOMIC_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_DATA_SEL_MASK 0x000000C0L
+#define TCC_DSM_CNTLA__WRITE_RETURN_IRRITATOR_SINGLE_WRITE_MASK 0x00000100L
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_DATA_SEL_MASK 0x00000600L
+#define TCC_DSM_CNTLA__WRITE_CACHE_READ_IRRITATOR_SINGLE_WRITE_MASK 0x00000800L
+#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00003000L
+#define TCC_DSM_CNTLA__SRC_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00004000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_DATA_SEL_MASK 0x00018000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_NEXT_RAM_IRRITATOR_SINGLE_WRITE_MASK 0x00020000L
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_DATA_SEL_MASK 0x000C0000L
+#define TCC_DSM_CNTLA__CACHE_TAG_PROBE_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00100000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_DATA_SEL_MASK 0x00600000L
+#define TCC_DSM_CNTLA__LATENCY_FIFO_IRRITATOR_SINGLE_WRITE_MASK 0x00800000L
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_DATA_SEL_MASK 0x03000000L
+#define TCC_DSM_CNTLA__RETURN_DATA_IRRITATOR_SINGLE_WRITE_MASK 0x04000000L
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_DATA_SEL_MASK 0x18000000L
+#define TCC_DSM_CNTLA__RETURN_CONTROL_IRRITATOR_SINGLE_WRITE_MASK 0x20000000L
+//TCC_DSM_CNTL2
+#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY__SHIFT 0x17
+#define TCC_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define TCC_DSM_CNTL2__CACHE_DATA_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2__CACHE_DATA_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_0_1_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_0_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TCC_DSM_CNTL2__CACHE_DATA_BANK_1_1_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_0_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCC_DSM_CNTL2__CACHE_DIRTY_BANK_1_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCC_DSM_CNTL2__HIGH_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define TCC_DSM_CNTL2__LOW_RATE_TAG_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define TCC_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//TCC_DSM_CNTL2A
+#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY__SHIFT 0x8
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY__SHIFT 0xb
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY__SHIFT 0xe
+#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY__SHIFT 0x11
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY__SHIFT 0x14
+#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY__SHIFT 0x17
+#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x1b
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY__SHIFT 0x1d
+#define TCC_DSM_CNTL2A__IN_USE_DEC_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2A__IN_USE_DEC_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2A__IN_USE_TRANSFER_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCC_DSM_CNTL2A__RETURN_DATA_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define TCC_DSM_CNTL2A__RETURN_DATA_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define TCC_DSM_CNTL2A__RETURN_CONTROL_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define TCC_DSM_CNTL2A__UC_ATOMIC_FIFO_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define TCC_DSM_CNTL2A__WRITE_RETURN_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define TCC_DSM_CNTL2A__WRITE_RETURN_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define TCC_DSM_CNTL2A__WRITE_CACHE_READ_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define TCC_DSM_CNTL2A__SRC_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x04000000L
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_ENABLE_ERROR_INJECT_MASK 0x18000000L
+#define TCC_DSM_CNTL2A__CACHE_TAG_PROBE_FIFO_SELECT_INJECT_DELAY_MASK 0x20000000L
+//TCC_DSM_CNTL2B
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCC_DSM_CNTL2B__LATENCY_FIFO_NEXT_RAM_SELECT_INJECT_DELAY_MASK 0x00000020L
+//TCC_WBINVL2
+#define TCC_WBINVL2__DONE__SHIFT 0x4
+#define TCC_WBINVL2__DONE_MASK 0x00000010L
+//TCC_SOFT_RESET
+#define TCC_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
+#define TCC_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
+//TCA_CTRL
+#define TCA_CTRL__HOLE_TIMEOUT__SHIFT 0x0
+#define TCA_CTRL__RB_STILL_4_PHASE__SHIFT 0x4
+#define TCA_CTRL__RB_AS_TCI__SHIFT 0x5
+#define TCA_CTRL__DISABLE_UTCL2_PRIORITY__SHIFT 0x6
+#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER__SHIFT 0x7
+#define TCA_CTRL__HOLE_TIMEOUT_MASK 0x0000000FL
+#define TCA_CTRL__RB_STILL_4_PHASE_MASK 0x00000010L
+#define TCA_CTRL__RB_AS_TCI_MASK 0x00000020L
+#define TCA_CTRL__DISABLE_UTCL2_PRIORITY_MASK 0x00000040L
+#define TCA_CTRL__DISABLE_RB_ONLY_TCA_ARBITER_MASK 0x00000080L
+//TCA_BURST_MASK
+#define TCA_BURST_MASK__ADDR_MASK__SHIFT 0x0
+#define TCA_BURST_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//TCA_BURST_CTRL
+#define TCA_BURST_CTRL__MAX_BURST__SHIFT 0x0
+#define TCA_BURST_CTRL__RB_DISABLE__SHIFT 0x3
+#define TCA_BURST_CTRL__TCP_DISABLE__SHIFT 0x4
+#define TCA_BURST_CTRL__SQC_DISABLE__SHIFT 0x5
+#define TCA_BURST_CTRL__CPF_DISABLE__SHIFT 0x6
+#define TCA_BURST_CTRL__CPG_DISABLE__SHIFT 0x7
+#define TCA_BURST_CTRL__IA_DISABLE__SHIFT 0x8
+#define TCA_BURST_CTRL__WD_DISABLE__SHIFT 0x9
+#define TCA_BURST_CTRL__SQG_DISABLE__SHIFT 0xa
+#define TCA_BURST_CTRL__UTCL2_DISABLE__SHIFT 0xb
+#define TCA_BURST_CTRL__TPI_DISABLE__SHIFT 0xc
+#define TCA_BURST_CTRL__RLC_DISABLE__SHIFT 0xd
+#define TCA_BURST_CTRL__PA_DISABLE__SHIFT 0xe
+#define TCA_BURST_CTRL__MAX_BURST_MASK 0x00000007L
+#define TCA_BURST_CTRL__RB_DISABLE_MASK 0x00000008L
+#define TCA_BURST_CTRL__TCP_DISABLE_MASK 0x00000010L
+#define TCA_BURST_CTRL__SQC_DISABLE_MASK 0x00000020L
+#define TCA_BURST_CTRL__CPF_DISABLE_MASK 0x00000040L
+#define TCA_BURST_CTRL__CPG_DISABLE_MASK 0x00000080L
+#define TCA_BURST_CTRL__IA_DISABLE_MASK 0x00000100L
+#define TCA_BURST_CTRL__WD_DISABLE_MASK 0x00000200L
+#define TCA_BURST_CTRL__SQG_DISABLE_MASK 0x00000400L
+#define TCA_BURST_CTRL__UTCL2_DISABLE_MASK 0x00000800L
+#define TCA_BURST_CTRL__TPI_DISABLE_MASK 0x00001000L
+#define TCA_BURST_CTRL__RLC_DISABLE_MASK 0x00002000L
+#define TCA_BURST_CTRL__PA_DISABLE_MASK 0x00004000L
+//TCA_DSM_CNTL
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x0
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL__SHIFT 0x3
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE__SHIFT 0x5
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000003L
+#define TCA_DSM_CNTL__HOLE_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000004L
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_DATA_SEL_MASK 0x00000018L
+#define TCA_DSM_CNTL__REQ_FIFO_SED_IRRITATOR_SINGLE_WRITE_MASK 0x00000020L
+//TCA_DSM_CNTL2
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x2
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY__SHIFT 0x5
+#define TCA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define TCA_DSM_CNTL2__HOLE_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define TCA_DSM_CNTL2__REQ_FIFO_SED_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define TCA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: gc_shdec
+//SPI_SHADER_PGM_RSRC3_PS
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_PS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_PS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_PGM_LO_PS
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_PS
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_PS
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
+//SPI_SHADER_PGM_RSRC2_PS
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC2_PS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_PS_0
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_1
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_2
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_3
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_4
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_5
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_6
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_7
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_8
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_9
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_10
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_11
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_12
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_13
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_14
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_15
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_16
+#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_17
+#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_18
+#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_19
+#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_20
+#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_21
+#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_22
+#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_23
+#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_24
+#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_25
+#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_26
+#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_27
+#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_28
+#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_29
+#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_30
+#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_31
+#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC3_VS
+#define SPI_SHADER_PGM_RSRC3_VS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_VS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_VS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_VS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_VS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_LATE_ALLOC_VS
+#define SPI_SHADER_LATE_ALLOC_VS__LIMIT__SHIFT 0x0
+#define SPI_SHADER_LATE_ALLOC_VS__LIMIT_MASK 0x0000003FL
+//SPI_SHADER_PGM_LO_VS
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_VS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_VS
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_VS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_VS
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_VS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_VS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_VS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_VS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_VS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_VS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_VS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_VS__VGPR_COMP_CNT_MASK 0x03000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CU_GROUP_ENABLE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_VS__CDBG_USER_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC1_VS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_VS
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN__SHIFT 0x9
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN__SHIFT 0xb
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN__SHIFT 0xd
+#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_VS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE0_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE1_EN_MASK 0x00000200L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE2_EN_MASK 0x00000400L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_BASE3_EN_MASK 0x00000800L
+#define SPI_SHADER_PGM_RSRC2_VS__SO_EN_MASK 0x00001000L
+#define SPI_SHADER_PGM_RSRC2_VS__EXCP_EN_MASK 0x003FE000L
+#define SPI_SHADER_PGM_RSRC2_VS__PC_BASE_EN_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC2_VS__DISPATCH_DRAW_EN_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC2_VS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_VS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_VS_0
+#define SPI_SHADER_USER_DATA_VS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_1
+#define SPI_SHADER_USER_DATA_VS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_2
+#define SPI_SHADER_USER_DATA_VS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_3
+#define SPI_SHADER_USER_DATA_VS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_4
+#define SPI_SHADER_USER_DATA_VS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_5
+#define SPI_SHADER_USER_DATA_VS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_6
+#define SPI_SHADER_USER_DATA_VS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_7
+#define SPI_SHADER_USER_DATA_VS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_8
+#define SPI_SHADER_USER_DATA_VS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_9
+#define SPI_SHADER_USER_DATA_VS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_10
+#define SPI_SHADER_USER_DATA_VS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_11
+#define SPI_SHADER_USER_DATA_VS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_12
+#define SPI_SHADER_USER_DATA_VS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_13
+#define SPI_SHADER_USER_DATA_VS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_14
+#define SPI_SHADER_USER_DATA_VS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_15
+#define SPI_SHADER_USER_DATA_VS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_16
+#define SPI_SHADER_USER_DATA_VS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_17
+#define SPI_SHADER_USER_DATA_VS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_18
+#define SPI_SHADER_USER_DATA_VS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_19
+#define SPI_SHADER_USER_DATA_VS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_20
+#define SPI_SHADER_USER_DATA_VS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_21
+#define SPI_SHADER_USER_DATA_VS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_22
+#define SPI_SHADER_USER_DATA_VS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_23
+#define SPI_SHADER_USER_DATA_VS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_24
+#define SPI_SHADER_USER_DATA_VS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_25
+#define SPI_SHADER_USER_DATA_VS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_26
+#define SPI_SHADER_USER_DATA_VS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_27
+#define SPI_SHADER_USER_DATA_VS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_28
+#define SPI_SHADER_USER_DATA_VS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_29
+#define SPI_SHADER_USER_DATA_VS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_30
+#define SPI_SHADER_USER_DATA_VS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_VS_31
+#define SPI_SHADER_USER_DATA_VS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_VS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC2_GS_VS
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS_VS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS_VS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_PGM_RSRC4_GS
+#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC4_GS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x00003F80L
+//SPI_SHADER_USER_DATA_ADDR_LO_GS
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_GS
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_ES
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_GS
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_GS__SIMD_DISABLE_MASK 0x3C000000L
+//SPI_SHADER_PGM_LO_GS
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_GS
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_GS
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_GS
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_ES_0
+#define SPI_SHADER_USER_DATA_ES_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_1
+#define SPI_SHADER_USER_DATA_ES_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_2
+#define SPI_SHADER_USER_DATA_ES_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_3
+#define SPI_SHADER_USER_DATA_ES_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_4
+#define SPI_SHADER_USER_DATA_ES_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_5
+#define SPI_SHADER_USER_DATA_ES_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_6
+#define SPI_SHADER_USER_DATA_ES_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_7
+#define SPI_SHADER_USER_DATA_ES_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_8
+#define SPI_SHADER_USER_DATA_ES_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_9
+#define SPI_SHADER_USER_DATA_ES_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_10
+#define SPI_SHADER_USER_DATA_ES_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_11
+#define SPI_SHADER_USER_DATA_ES_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_12
+#define SPI_SHADER_USER_DATA_ES_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_13
+#define SPI_SHADER_USER_DATA_ES_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_14
+#define SPI_SHADER_USER_DATA_ES_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_15
+#define SPI_SHADER_USER_DATA_ES_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_16
+#define SPI_SHADER_USER_DATA_ES_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_17
+#define SPI_SHADER_USER_DATA_ES_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_18
+#define SPI_SHADER_USER_DATA_ES_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_19
+#define SPI_SHADER_USER_DATA_ES_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_20
+#define SPI_SHADER_USER_DATA_ES_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_21
+#define SPI_SHADER_USER_DATA_ES_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_22
+#define SPI_SHADER_USER_DATA_ES_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_23
+#define SPI_SHADER_USER_DATA_ES_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_24
+#define SPI_SHADER_USER_DATA_ES_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_25
+#define SPI_SHADER_USER_DATA_ES_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_26
+#define SPI_SHADER_USER_DATA_ES_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_27
+#define SPI_SHADER_USER_DATA_ES_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_28
+#define SPI_SHADER_USER_DATA_ES_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_29
+#define SPI_SHADER_USER_DATA_ES_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_30
+#define SPI_SHADER_USER_DATA_ES_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ES_31
+#define SPI_SHADER_USER_DATA_ES_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ES_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_HS
+#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_HS__GROUP_FIFO_DEPTH_MASK 0x0000007FL
+//SPI_SHADER_USER_DATA_ADDR_LO_HS
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_HS
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_LS
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_HS
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC3_HS__SIMD_DISABLE_MASK 0x00003C00L
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
+//SPI_SHADER_PGM_LO_HS
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_HS
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_HS
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
+//SPI_SHADER_PGM_RSRC2_HS
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_HS__SKIP_USGPR0_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x10000000L
+//SPI_SHADER_USER_DATA_LS_0
+#define SPI_SHADER_USER_DATA_LS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_1
+#define SPI_SHADER_USER_DATA_LS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_2
+#define SPI_SHADER_USER_DATA_LS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_3
+#define SPI_SHADER_USER_DATA_LS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_4
+#define SPI_SHADER_USER_DATA_LS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_5
+#define SPI_SHADER_USER_DATA_LS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_6
+#define SPI_SHADER_USER_DATA_LS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_7
+#define SPI_SHADER_USER_DATA_LS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_8
+#define SPI_SHADER_USER_DATA_LS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_9
+#define SPI_SHADER_USER_DATA_LS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_10
+#define SPI_SHADER_USER_DATA_LS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_11
+#define SPI_SHADER_USER_DATA_LS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_12
+#define SPI_SHADER_USER_DATA_LS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_13
+#define SPI_SHADER_USER_DATA_LS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_14
+#define SPI_SHADER_USER_DATA_LS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_15
+#define SPI_SHADER_USER_DATA_LS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_16
+#define SPI_SHADER_USER_DATA_LS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_17
+#define SPI_SHADER_USER_DATA_LS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_18
+#define SPI_SHADER_USER_DATA_LS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_19
+#define SPI_SHADER_USER_DATA_LS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_20
+#define SPI_SHADER_USER_DATA_LS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_21
+#define SPI_SHADER_USER_DATA_LS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_22
+#define SPI_SHADER_USER_DATA_LS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_23
+#define SPI_SHADER_USER_DATA_LS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_24
+#define SPI_SHADER_USER_DATA_LS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_25
+#define SPI_SHADER_USER_DATA_LS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_26
+#define SPI_SHADER_USER_DATA_LS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_27
+#define SPI_SHADER_USER_DATA_LS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_28
+#define SPI_SHADER_USER_DATA_LS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_29
+#define SPI_SHADER_USER_DATA_LS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_30
+#define SPI_SHADER_USER_DATA_LS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_LS_31
+#define SPI_SHADER_USER_DATA_LS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_LS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_0
+#define SPI_SHADER_USER_DATA_COMMON_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_1
+#define SPI_SHADER_USER_DATA_COMMON_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_2
+#define SPI_SHADER_USER_DATA_COMMON_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_3
+#define SPI_SHADER_USER_DATA_COMMON_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_4
+#define SPI_SHADER_USER_DATA_COMMON_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_5
+#define SPI_SHADER_USER_DATA_COMMON_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_6
+#define SPI_SHADER_USER_DATA_COMMON_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_7
+#define SPI_SHADER_USER_DATA_COMMON_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_8
+#define SPI_SHADER_USER_DATA_COMMON_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_9
+#define SPI_SHADER_USER_DATA_COMMON_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_10
+#define SPI_SHADER_USER_DATA_COMMON_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_11
+#define SPI_SHADER_USER_DATA_COMMON_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_12
+#define SPI_SHADER_USER_DATA_COMMON_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_13
+#define SPI_SHADER_USER_DATA_COMMON_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_14
+#define SPI_SHADER_USER_DATA_COMMON_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_15
+#define SPI_SHADER_USER_DATA_COMMON_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_16
+#define SPI_SHADER_USER_DATA_COMMON_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_17
+#define SPI_SHADER_USER_DATA_COMMON_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_18
+#define SPI_SHADER_USER_DATA_COMMON_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_19
+#define SPI_SHADER_USER_DATA_COMMON_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_20
+#define SPI_SHADER_USER_DATA_COMMON_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_21
+#define SPI_SHADER_USER_DATA_COMMON_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_22
+#define SPI_SHADER_USER_DATA_COMMON_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_23
+#define SPI_SHADER_USER_DATA_COMMON_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_24
+#define SPI_SHADER_USER_DATA_COMMON_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_25
+#define SPI_SHADER_USER_DATA_COMMON_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_26
+#define SPI_SHADER_USER_DATA_COMMON_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_27
+#define SPI_SHADER_USER_DATA_COMMON_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_28
+#define SPI_SHADER_USER_DATA_COMMON_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_29
+#define SPI_SHADER_USER_DATA_COMMON_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_30
+#define SPI_SHADER_USER_DATA_COMMON_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_COMMON_31
+#define SPI_SHADER_USER_DATA_COMMON_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_COMMON_31__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_INITIATOR
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+//COMPUTE_DIM_X
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Y
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Z
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_START_X
+#define COMPUTE_START_X__START__SHIFT 0x0
+#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Y
+#define COMPUTE_START_Y__START__SHIFT 0x0
+#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Z
+#define COMPUTE_START_Z__START__SHIFT 0x0
+#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
+//COMPUTE_NUM_THREAD_X
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Y
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Z
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_PIPELINESTAT_ENABLE
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
+//COMPUTE_PERFCOUNT_ENABLE
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
+//COMPUTE_PGM_LO
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
+#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_HI
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_PKT_ADDR_LO
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_PKT_ADDR_HI
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_LO
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_HI
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//COMPUTE_PGM_RSRC1
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
+#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
+//COMPUTE_PGM_RSRC2
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
+#define COMPUTE_PGM_RSRC2__SKIP_USGPR0__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
+#define COMPUTE_PGM_RSRC2__SKIP_USGPR0_MASK 0x80000000L
+//COMPUTE_VMID
+#define COMPUTE_VMID__DATA__SHIFT 0x0
+#define COMPUTE_VMID__DATA_MASK 0x0000000FL
+//COMPUTE_RESOURCE_LIMITS
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE__SHIFT 0x1b
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DISABLE_MASK 0x78000000L
+//COMPUTE_STATIC_THREAD_MGMT_SE0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE1
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_TMPRING_SIZE
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
+//COMPUTE_STATIC_THREAD_MGMT_SE2
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE3
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SH1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_RESTART_X
+#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Y
+#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Z
+#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_THREAD_TRACE_ENABLE
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
+//COMPUTE_MISC_RESERVED
+#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
+#define COMPUTE_MISC_RESERVED__RESERVED2__SHIFT 0x2
+#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
+#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
+#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000003L
+#define COMPUTE_MISC_RESERVED__RESERVED2_MASK 0x00000004L
+#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
+#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
+//COMPUTE_DISPATCH_ID
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
+//COMPUTE_THREADGROUP_ID
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
+//COMPUTE_RELAUNCH
+#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
+//COMPUTE_WAVE_RESTORE_ADDR_LO
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
+//COMPUTE_WAVE_RESTORE_ADDR_HI
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
+//COMPUTE_SHADER_CHKSUM
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM__SHIFT 0x0
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_0
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_1
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_2
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_3
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_4
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_5
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_6
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_7
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_8
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_9
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_10
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_11
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_12
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_13
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_14
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_15
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_END
+#define COMPUTE_DISPATCH_END__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_END__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_NOWHERE
+#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
+#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cppdec
+//CP_DFY_CNTL
+#define CP_DFY_CNTL__POLICY__SHIFT 0x0
+#define CP_DFY_CNTL__MTYPE__SHIFT 0x2
+#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
+#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
+#define CP_DFY_CNTL__MODE__SHIFT 0x1d
+#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DFY_CNTL__POLICY_MASK 0x00000001L
+#define CP_DFY_CNTL__MTYPE_MASK 0x0000000CL
+#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
+#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
+#define CP_DFY_CNTL__MODE_MASK 0x60000000L
+#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
+//CP_DFY_STAT
+#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
+#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
+#define CP_DFY_STAT__BUSY__SHIFT 0x1f
+#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
+#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
+#define CP_DFY_STAT__BUSY_MASK 0x80000000L
+//CP_DFY_ADDR_HI
+#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_DFY_ADDR_LO
+#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
+#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
+//CP_DFY_DATA_0
+#define CP_DFY_DATA_0__DATA__SHIFT 0x0
+#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_1
+#define CP_DFY_DATA_1__DATA__SHIFT 0x0
+#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_2
+#define CP_DFY_DATA_2__DATA__SHIFT 0x0
+#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_3
+#define CP_DFY_DATA_3__DATA__SHIFT 0x0
+#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_4
+#define CP_DFY_DATA_4__DATA__SHIFT 0x0
+#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_5
+#define CP_DFY_DATA_5__DATA__SHIFT 0x0
+#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_6
+#define CP_DFY_DATA_6__DATA__SHIFT 0x0
+#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_7
+#define CP_DFY_DATA_7__DATA__SHIFT 0x0
+#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_8
+#define CP_DFY_DATA_8__DATA__SHIFT 0x0
+#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_9
+#define CP_DFY_DATA_9__DATA__SHIFT 0x0
+#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_10
+#define CP_DFY_DATA_10__DATA__SHIFT 0x0
+#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_11
+#define CP_DFY_DATA_11__DATA__SHIFT 0x0
+#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_12
+#define CP_DFY_DATA_12__DATA__SHIFT 0x0
+#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_13
+#define CP_DFY_DATA_13__DATA__SHIFT 0x0
+#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_14
+#define CP_DFY_DATA_14__DATA__SHIFT 0x0
+#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_15
+#define CP_DFY_DATA_15__DATA__SHIFT 0x0
+#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_CMD
+#define CP_DFY_CMD__OFFSET__SHIFT 0x0
+#define CP_DFY_CMD__SIZE__SHIFT 0x10
+#define CP_DFY_CMD__OFFSET_MASK 0x000001FFL
+#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
+//CP_EOPQ_WAIT_TIME
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
+//CP_CPC_MGCG_SYNC_CNTL
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
+//CPC_INT_INFO
+#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
+#define CPC_INT_INFO__TYPE__SHIFT 0x10
+#define CPC_INT_INFO__VMID__SHIFT 0x14
+#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
+#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
+#define CPC_INT_INFO__TYPE_MASK 0x00010000L
+#define CPC_INT_INFO__VMID_MASK 0x00F00000L
+#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
+//CP_VIRT_STATUS
+#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
+#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
+//CPC_INT_ADDR
+#define CPC_INT_ADDR__ADDR__SHIFT 0x0
+#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CPC_INT_PASID
+#define CPC_INT_PASID__PASID__SHIFT 0x0
+#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
+//CP_GFX_ERROR
+#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_GFX_ERROR__RSVD1_ERROR__SHIFT 0x5
+#define CP_GFX_ERROR__RSVD2_ERROR__SHIFT 0x6
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
+#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR__SHIFT 0x8
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
+#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR__SHIFT 0x10
+#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR__SHIFT 0x11
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
+#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR__SHIFT 0x16
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
+#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR__SHIFT 0x1c
+#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR__SHIFT 0x1d
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
+#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR__SHIFT 0x1f
+#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_GFX_ERROR__RSVD1_ERROR_MASK 0x00000020L
+#define CP_GFX_ERROR__RSVD2_ERROR_MASK 0x00000040L
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
+#define CP_GFX_ERROR__QU_STRM_UTCL1_ERROR_MASK 0x00000100L
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
+#define CP_GFX_ERROR__CE_DMA_UTCL1_ERROR_MASK 0x00010000L
+#define CP_GFX_ERROR__PFP_VGTDMA_UTCL1_ERROR_MASK 0x00020000L
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
+#define CP_GFX_ERROR__CE_TC_UTCL1_ERROR_MASK 0x00400000L
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
+#define CP_GFX_ERROR__C1_FETCHER_UTCL1_ERROR_MASK 0x10000000L
+#define CP_GFX_ERROR__C2_FETCHER_UTCL1_ERROR_MASK 0x20000000L
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
+#define CP_GFX_ERROR__CE_INIT_UTCL1_ERROR_MASK 0x80000000L
+//CPG_UTCL1_CNTL
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPG_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPG_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPG_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPC_UTCL1_CNTL
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPC_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPC_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPC_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPF_UTCL1_CNTL
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPF_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPF_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPF_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
+//CP_AQL_SMM_STATUS
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
+//CP_RB0_BASE
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB_BASE
+#define CP_RB_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB0_CNTL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB0_CNTL__BUF_SWAP__SHIFT 0x11
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB0_CNTL__BUF_SWAP_MASK 0x00060000L
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_CNTL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_RPTR_WR
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
+//CP_RB0_RPTR_ADDR
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB_RPTR_ADDR
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB0_RPTR_ADDR_HI
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_RPTR_ADDR_HI
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_BUFSZ_MASK
+#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_BUFSZ_MASK
+#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_WPTR_POLL_ADDR_LO
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_RB_WPTR_POLL_ADDR_HI
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
+//CP_INT_CNTL
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_DEVICE_ID
+#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
+//CP_ME0_PIPE_PRIORITY_CNTS
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_RING_PRIORITY_CNTS
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME0_PIPE0_PRIORITY
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING0_PRIORITY
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE1_PRIORITY
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING1_PRIORITY
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE2_PRIORITY
+#define CP_ME0_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING2_PRIORITY
+#define CP_RING2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_FATAL_ERROR
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
+#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
+#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
+//CP_RB_VMID
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
+#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
+//CP_ME0_PIPE0_VMID
+#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
+//CP_ME0_PIPE1_VMID
+#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
+//CP_RB0_WPTR
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB0_WPTR_HI
+#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR_HI
+#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR_HI
+#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB2_WPTR
+#define CP_RB2_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB2_WPTR__RB_WPTR_MASK 0x000FFFFFL
+//CP_RB_DOORBELL_CONTROL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_RANGE_LOWER
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
+//CP_RB_DOORBELL_RANGE_UPPER
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
+//CP_MEC_DOORBELL_RANGE_LOWER
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x0FFFFFFCL
+//CP_MEC_DOORBELL_RANGE_UPPER
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x0FFFFFFCL
+//CPG_UTCL1_ERROR
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CPC_UTCL1_ERROR
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CP_RB1_BASE
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB1_CNTL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB1_RPTR_ADDR
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB1_RPTR_ADDR_HI
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB2_BASE
+#define CP_RB2_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB2_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB2_CNTL
+#define CP_RB2_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB2_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB2_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB2_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB2_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB2_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB2_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB2_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB2_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB2_CNTL__CACHE_POLICY_MASK 0x01000000L
+#define CP_RB2_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB2_RPTR_ADDR
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB2_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB2_RPTR_ADDR_HI
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB2_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_ACTIVE
+#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_ACTIVE
+#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_INT_CNTL_RING0
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING1
+#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING1__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING1__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING1__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING1__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING2
+#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING2__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING2__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING2__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING2__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING2__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING2__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING2__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING2__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING2__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING2__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING2__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING2__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING2__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING2__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING2__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING2__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS_RING0
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING1
+#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING1__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING1__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING1__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING1__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING2
+#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING2__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING2__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING2__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING2__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING2__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING2__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING2__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING2__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING2__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING2__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING2__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING2__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING2__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING2__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING2__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING2__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING2__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_PWR_CNTL
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
+//CP_MEM_SLP_CNTL
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN__SHIFT 0x0
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN__SHIFT 0x1
+#define CP_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
+#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY__SHIFT 0x8
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define CP_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK 0x00000001L
+#define CP_MEM_SLP_CNTL__CP_MEM_DS_EN_MASK 0x00000002L
+#define CP_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
+#define CP_MEM_SLP_CNTL__CP_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define CP_MEM_SLP_CNTL__CP_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define CP_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
+//CP_ECC_FIRSTOCCURRENCE
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_FIRSTOCCURRENCE__QUEUE__SHIFT 0xc
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_FIRSTOCCURRENCE__QUEUE_MASK 0x00007000L
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE_RING0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING1
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING2
+#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING2__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_PQ_WPTR_POLL_CNTL
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
+#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
+#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL1
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
+//CP_ME1_PIPE0_INT_CNTL
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_CNTL
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_CNTL
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_CNTL
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_CNTL
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_CNTL
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_CNTL
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_CNTL
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE0_INT_STATUS
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_STATUS
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_STATUS
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_STATUS
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_STATUS
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_STATUS
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_STATUS
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_STATUS
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE_PRIORITY_CNTS
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME1_PIPE0_PRIORITY
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE1_PRIORITY
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE2_PRIORITY
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE3_PRIORITY
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE_PRIORITY_CNTS
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME2_PIPE0_PRIORITY
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE1_PRIORITY
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE2_PRIORITY
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE3_PRIORITY
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_CE_PRGRM_CNTR_START
+#define CP_CE_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_CE_PRGRM_CNTR_START__IP_START_MASK 0x000007FFL
+//CP_PFP_PRGRM_CNTR_START
+#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0x00001FFFL
+//CP_ME_PRGRM_CNTR_START
+#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0x00000FFFL
+//CP_MEC1_PRGRM_CNTR_START
+#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
+//CP_MEC2_PRGRM_CNTR_START
+#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x0000FFFFL
+//CP_CE_INTR_ROUTINE_START
+#define CP_CE_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_CE_INTR_ROUTINE_START__IR_START_MASK 0x000007FFL
+//CP_PFP_INTR_ROUTINE_START
+#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0x00001FFFL
+//CP_ME_INTR_ROUTINE_START
+#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0x00000FFFL
+//CP_MEC1_INTR_ROUTINE_START
+#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
+//CP_MEC2_INTR_ROUTINE_START
+#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x0000FFFFL
+//CP_CONTEXT_CNTL
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX__SHIFT 0x0
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX__SHIFT 0x10
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_WD_CNTX_MASK 0x00000007L
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_WD_CNTX_MASK 0x00070000L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
+//CP_MAX_CONTEXT
+#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
+#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
+//CP_IQ_WAIT_TIME1
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
+#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
+//CP_IQ_WAIT_TIME2
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
+#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
+#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
+//CP_RB0_BASE_HI
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_RB1_BASE_HI
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_VMID_RESET
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
+//CPC_INT_CNTL
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CPC_INT_STATUS
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_VMID_PREEMPT
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
+#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
+//CPC_INT_CNTX_ID
+#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PQ_STATUS
+#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CP_CPC_IC_BASE_LO
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_CPC_IC_BASE_HI
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_CPC_IC_BASE_CNTL
+#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x01000000L
+//CP_CPC_IC_OP_CNTL
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_MEC1_F32_INT_DIS
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+//CP_MEC2_F32_INT_DIS
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+//CP_VMID_STATUS
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_cppdec2
+//CP_RB_DOORBELL_CONTROL_SCH_0
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_0__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_1
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_1__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_2
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_2__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_3
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_3__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_4
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_4__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_5
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_5__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_6
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_6__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CONTROL_SCH_7
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL_SCH_7__DOORBELL_HIT_MASK 0x80000000L
+//CP_RB_DOORBELL_CLEAR
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
+//CP_GFX_MQD_CONTROL
+#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
+//CP_GFX_MQD_BASE_ADDR
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_MQD_BASE_ADDR_HI
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_STATUS
+#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CPG_UTCL1_STATUS
+#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPC_UTCL1_STATUS
+#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPF_UTCL1_STATUS
+#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CP_SD_CNTL
+#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
+#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
+#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
+#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
+#define CP_SD_CNTL__SPI_EN__SHIFT 0x4
+#define CP_SD_CNTL__WD_EN__SHIFT 0x5
+#define CP_SD_CNTL__IA_EN__SHIFT 0x6
+#define CP_SD_CNTL__PA_EN__SHIFT 0x7
+#define CP_SD_CNTL__RMI_EN__SHIFT 0x8
+#define CP_SD_CNTL__EA_EN__SHIFT 0x9
+#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
+#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
+#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
+#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
+#define CP_SD_CNTL__SPI_EN_MASK 0x00000010L
+#define CP_SD_CNTL__WD_EN_MASK 0x00000020L
+#define CP_SD_CNTL__IA_EN_MASK 0x00000040L
+#define CP_SD_CNTL__PA_EN_MASK 0x00000080L
+#define CP_SD_CNTL__RMI_EN_MASK 0x00000100L
+#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
+//CP_SOFT_RESET_CNTL
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
+//CP_CPC_GFX_CNTL
+#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
+#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
+#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
+#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
+#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
+#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
+#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
+#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
+
+
+// addressBlock: gc_spipdec
+//SPI_ARB_PRIORITY
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
+//SPI_ARB_CYCLES_0
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
+//SPI_ARB_CYCLES_1
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
+//SPI_CDBG_SYS_GFX
+#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_GFX__VS_EN__SHIFT 0x1
+#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_GFX__ES_EN__SHIFT 0x3
+#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_GFX__LS_EN__SHIFT 0x5
+#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_GFX__VS_EN_MASK 0x0002L
+#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_GFX__ES_EN_MASK 0x0008L
+#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_GFX__LS_EN_MASK 0x0020L
+#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_HP3D
+#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_HP3D__VS_EN__SHIFT 0x1
+#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_HP3D__ES_EN__SHIFT 0x3
+#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_HP3D__LS_EN__SHIFT 0x5
+#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_HP3D__VS_EN_MASK 0x0002L
+#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_HP3D__ES_EN_MASK 0x0008L
+#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_HP3D__LS_EN_MASK 0x0020L
+//SPI_CDBG_SYS_CS0
+#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xFF000000L
+//SPI_CDBG_SYS_CS1
+#define SPI_CDBG_SYS_CS1__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS1__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS1__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS1__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS1__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS1__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS1__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS1__PIPE3_MASK 0xFF000000L
+//SPI_WCL_PIPE_PERCENT_GFX
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE__SHIFT 0x7
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE__SHIFT 0x11
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_GFX__LS_GRP_VALUE_MASK 0x00000F80L
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_GFX__ES_GRP_VALUE_MASK 0x003E0000L
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_HP3D
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_CS0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS1
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS2
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS3
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS4
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS5
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS6
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS7
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_VMID__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL__STALL_VMID_MASK 0x0001FFFEL
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__ME_SEL__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL__SHIFT 0x2
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL__SHIFT 0x4
+#define SPI_GDBG_TRAP_CONFIG__ME_MATCH__SHIFT 0x7
+#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH__SHIFT 0x9
+#define SPI_GDBG_TRAP_CONFIG__TRAP_EN__SHIFT 0xf
+#define SPI_GDBG_TRAP_CONFIG__VMID_SEL__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__ME_SEL_MASK 0x00000003L
+#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL_MASK 0x0000000CL
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL_MASK 0x00000070L
+#define SPI_GDBG_TRAP_CONFIG__ME_MATCH_MASK 0x00000080L
+#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH_MASK 0x00000100L
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH_MASK 0x00000200L
+#define SPI_GDBG_TRAP_CONFIG__TRAP_EN_MASK 0x00008000L
+#define SPI_GDBG_TRAP_CONFIG__VMID_SEL_MASK 0xFFFF0000L
+//SPI_GDBG_TRAP_MASK
+#define SPI_GDBG_TRAP_MASK__EXCP_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_MASK__REPLACE__SHIFT 0x9
+#define SPI_GDBG_TRAP_MASK__EXCP_EN_MASK 0x01FFL
+#define SPI_GDBG_TRAP_MASK__REPLACE_MASK 0x0200L
+//SPI_GDBG_WAVE_CNTL2
+#define SPI_GDBG_WAVE_CNTL2__VMID_MASK__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL2__MODE__SHIFT 0x10
+#define SPI_GDBG_WAVE_CNTL2__VMID_MASK_MASK 0x0000FFFFL
+#define SPI_GDBG_WAVE_CNTL2__MODE_MASK 0x00030000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS_MASK 0x00000002L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_GDBG_TRAP_DATA0
+#define SPI_GDBG_TRAP_DATA0__DATA__SHIFT 0x0
+#define SPI_GDBG_TRAP_DATA0__DATA_MASK 0xFFFFFFFFL
+//SPI_GDBG_TRAP_DATA1
+#define SPI_GDBG_TRAP_DATA1__DATA__SHIFT 0x0
+#define SPI_GDBG_TRAP_DATA1__DATA_MASK 0xFFFFFFFFL
+//SPI_COMPUTE_QUEUE_RESET
+#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
+#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
+//SPI_RESOURCE_RESERVE_CU_0
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_1
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_2
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_3
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_4
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_5
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_6
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_7
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_8
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_9
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_2
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_3
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_4
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_5
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_6
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_7
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_8
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_9
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_CU_10
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_11
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_11
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_CU_12
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_13
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_14
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_15
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_12
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_13
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_14
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_RESOURCE_RESERVE_EN_CU_15
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY__SHIFT 0x18
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__RESERVE_SPACE_ONLY_MASK 0x01000000L
+//SPI_COMPUTE_WF_CTX_SAVE
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
+//SPI_ARB_CNTL_0
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
+
+
+// addressBlock: gc_cpphqddec
+//CP_HQD_GFX_CONTROL
+#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
+#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
+#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
+#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
+//CP_HQD_GFX_STATUS
+#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
+#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
+//CP_HPD_ROQ_OFFSETS
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x003F0000L
+//CP_HPD_STATUS0
+#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+//CP_HPD_UTCL1_CNTL
+#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
+#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
+//CP_HPD_UTCL1_ERROR
+#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
+#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
+#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
+#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
+#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
+#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
+//CP_HPD_UTCL1_ERROR_ADDR
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
+//CP_MQD_BASE_ADDR
+#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_MQD_BASE_ADDR_HI
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_ACTIVE
+#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
+#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
+//CP_HQD_VMID
+#define CP_HQD_VMID__VMID__SHIFT 0x0
+#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
+#define CP_HQD_VMID__VQID__SHIFT 0x10
+#define CP_HQD_VMID__VMID_MASK 0x0000000FL
+#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
+#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
+//CP_HQD_PERSISTENT_STATE
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
+//CP_HQD_PIPE_PRIORITY
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
+//CP_HQD_QUEUE_PRIORITY
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_HQD_QUANTUM
+#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
+#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
+#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_HQD_PQ_BASE
+#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
+#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_BASE_HI
+#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
+//CP_HQD_PQ_RPTR
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_RPTR_REPORT_ADDR
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_WPTR_POLL_ADDR
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
+//CP_HQD_PQ_WPTR_POLL_ADDR_HI
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_DOORBELL_CONTROL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_HQD_PQ_CONTROL
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
+#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT 0x10
+#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT 0x11
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x19
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
+#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP__SHIFT 0x1d
+#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
+#define CP_HQD_PQ_CONTROL__WPP_CLAMP_EN_MASK 0x00010000L
+#define CP_HQD_PQ_CONTROL__ENDIAN_SWAP_MASK 0x00060000L
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x06000000L
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
+#define CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK 0x20000000L
+#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
+//CP_HQD_IB_BASE_ADDR
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_IB_BASE_ADDR_HI
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_IB_RPTR
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
+//CP_HQD_IB_CONTROL
+#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
+#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
+//CP_HQD_IQ_TIMER
+#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x19
+#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
+#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x02000000L
+#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
+#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_HQD_IQ_RPTR
+#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
+#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
+//CP_HQD_DEQUEUE_REQUEST
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000007L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_HQD_DMA_OFFLOAD
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+//CP_HQD_OFFLOAD
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_SEMA_CMD
+#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
+#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
+#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
+#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
+//CP_HQD_MSG_TYPE
+#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
+#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
+#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
+#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
+//CP_HQD_ATOMIC0_PREOP_LO
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC0_PREOP_HI
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_LO
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_HI
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER0
+#define CP_HQD_HQ_SCHEDULER0__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER0__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_STATUS0
+#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT__SHIFT 0x2
+#define CP_HQD_HQ_STATUS0__RSV_6_4__SHIFT 0x4
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_STATUS0__PG_ACTIVATED__SHIFT 0x9
+#define CP_HQD_HQ_STATUS0__RSVR_29_10__SHIFT 0xa
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000003L
+#define CP_HQD_HQ_STATUS0__DEQUEUE_RETRY_CNT_MASK 0x0000000CL
+#define CP_HQD_HQ_STATUS0__RSV_6_4_MASK 0x00000070L
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_STATUS0__PG_ACTIVATED_MASK 0x00000200L
+#define CP_HQD_HQ_STATUS0__RSVR_29_10_MASK 0x3FFFFC00L
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_CONTROL0
+#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER1
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_MQD_CONTROL
+#define CP_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x01000000L
+//CP_HQD_HQ_STATUS1
+#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_CONTROL1
+#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR_HI
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
+//CP_HQD_EOP_CONTROL
+#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
+#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x01000000L
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
+//CP_HQD_EOP_RPTR
+#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
+#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
+#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
+#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
+//CP_HQD_EOP_WPTR
+#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
+#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
+#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
+#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
+//CP_HQD_EOP_EVENTS
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_LO
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_HI
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_CTX_SAVE_CONTROL
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000008L
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CP_HQD_CNTL_STACK_OFFSET
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x00007FFCL
+//CP_HQD_CNTL_STACK_SIZE
+#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x00007000L
+//CP_HQD_WG_STATE_OFFSET
+#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x01FFFFFCL
+//CP_HQD_CTX_SAVE_SIZE
+#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x01FFF000L
+//CP_HQD_GDS_RESOURCE_STATE
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
+//CP_HQD_ERROR
+#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
+#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
+#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
+#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
+#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
+#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
+#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
+#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
+#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
+#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
+//CP_HQD_EOP_WPTR_MEM
+#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
+//CP_HQD_AQL_CONTROL
+#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
+#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
+#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
+//CP_HQD_PQ_WPTR_LO
+#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_WPTR_HI
+#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_didtdec
+//DIDT_IND_INDEX
+#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
+#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
+//DIDT_IND_DATA
+#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
+#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
+//DIDT_INDEX_AUTO_INCR_EN
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN__SHIFT 0x0
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN_MASK 0x00000001L
+
+
+// addressBlock: gc_gccacdec
+//GC_CAC_CTRL_1
+#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x18
+#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x00FFFFFFL
+#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFF000000L
+//GC_CAC_CTRL_2
+#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE__SHIFT 0x1
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE__SHIFT 0x2
+#define GC_CAC_CTRL_2__SE_LCAC_ENABLE__SHIFT 0x3
+#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define GC_CAC_CTRL_2__CAC_SOFT_CTRL_ENABLE_MASK 0x00000002L
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE_MASK 0x00000004L
+#define GC_CAC_CTRL_2__SE_LCAC_ENABLE_MASK 0x00000008L
+//GC_CAC_INDEX_AUTO_INCR_EN
+#define GC_CAC_INDEX_AUTO_INCR_EN__GC_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x0
+#define GC_CAC_INDEX_AUTO_INCR_EN__GC_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000001L
+//GC_CAC_AGGR_LOWER
+#define GC_CAC_AGGR_LOWER__AGGR_31_0__SHIFT 0x0
+#define GC_CAC_AGGR_LOWER__AGGR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_UPPER
+#define GC_CAC_AGGR_UPPER__AGGR_63_32__SHIFT 0x0
+#define GC_CAC_AGGR_UPPER__AGGR_63_32_MASK 0xFFFFFFFFL
+//PCC_PERF_COUNTER
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER__SHIFT 0x0
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//GC_CAC_SOFT_CTRL
+#define GC_CAC_SOFT_CTRL__SOFT_SNAP__SHIFT 0x0
+#define GC_CAC_SOFT_CTRL__SOFT_SNAP_MASK 0x00000001L
+//GC_DIDT_CTRL0
+#define GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
+#define GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT 0x1
+#define GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT 0x3
+#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
+#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x5
+#define GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
+#define GC_DIDT_CTRL0__PHASE_OFFSET_MASK 0x00000006L
+#define GC_DIDT_CTRL0__DIDT_SW_RST_MASK 0x00000008L
+#define GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
+#define GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001E0L
+//GC_DIDT_CTRL1
+#define GC_DIDT_CTRL1__MIN_POWER__SHIFT 0x0
+#define GC_DIDT_CTRL1__MAX_POWER__SHIFT 0x10
+#define GC_DIDT_CTRL1__MIN_POWER_MASK 0x0000FFFFL
+#define GC_DIDT_CTRL1__MAX_POWER_MASK 0xFFFF0000L
+//GC_DIDT_CTRL2
+#define GC_DIDT_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
+#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define GC_DIDT_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define GC_DIDT_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define GC_DIDT_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+//GC_DIDT_WEIGHT
+#define GC_DIDT_WEIGHT__SQ_WEIGHT__SHIFT 0x0
+#define GC_DIDT_WEIGHT__DB_WEIGHT__SHIFT 0x8
+#define GC_DIDT_WEIGHT__TD_WEIGHT__SHIFT 0x10
+#define GC_DIDT_WEIGHT__TCP_WEIGHT__SHIFT 0x18
+#define GC_DIDT_WEIGHT__SQ_WEIGHT_MASK 0x000000FFL
+#define GC_DIDT_WEIGHT__DB_WEIGHT_MASK 0x0000FF00L
+#define GC_DIDT_WEIGHT__TD_WEIGHT_MASK 0x00FF0000L
+#define GC_DIDT_WEIGHT__TCP_WEIGHT_MASK 0xFF000000L
+//GC_EDC_CTRL
+#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x9
+#define GC_EDC_CTRL__GC_EDC_ONLY_MODE__SHIFT 0xb
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0xc
+#define GC_EDC_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x10
+#define GC_EDC_CTRL__RELEASE_STEP_INTERVAL__SHIFT 0x14
+#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000200L
+#define GC_EDC_CTRL__GC_EDC_ONLY_MODE_MASK 0x00000800L
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x0000F000L
+#define GC_EDC_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x000F0000L
+#define GC_EDC_CTRL__RELEASE_STEP_INTERVAL_MASK 0x3FF00000L
+//GC_EDC_THRESHOLD
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//GC_DIDT_DROOP_CTRL
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT 0x0
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT 0x1
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT 0xf
+#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT 0x13
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT 0x1f
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK 0x00000001L
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK 0x00007FFEL
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK 0x00078000L
+#define GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK 0x00080000L
+#define GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK 0x80000000L
+//GC_DIDT_DROOP_CTRL1
+#define GC_DIDT_DROOP_CTRL1__DIDT_DROOP_LEVEL_RELEASE_EN__SHIFT 0x0
+#define GC_DIDT_DROOP_CTRL1__DIDT_DROOP_DELTA_THRESHOLD__SHIFT 0x1
+#define GC_DIDT_DROOP_CTRL1__DIDT_DROOP_LEVEL_RELEASE_EN_MASK 0x00000001L
+#define GC_DIDT_DROOP_CTRL1__DIDT_DROOP_DELTA_THRESHOLD_MASK 0x00007FFEL
+//GC_EDC_DROOP_CTRL
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT 0x0
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT 0x1
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT 0xf
+#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT 0x14
+#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT 0x15
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK 0x00000001L
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK 0x00007FFEL
+#define GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK 0x000F8000L
+#define GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK 0x00100000L
+#define GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK 0x00200000L
+//GC_THROTTLE_CTRL
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST__SHIFT 0x0
+#define GC_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x2
+#define GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT 0x3
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE__SHIFT 0x7
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN__SHIFT 0x9
+#define GC_THROTTLE_CTRL__PCC_THROT_INCR_STEP_INTERVAL__SHIFT 0xa
+#define GC_THROTTLE_CTRL__PCC_FIXED_PATTERN_MIN__SHIFT 0x14
+#define GC_THROTTLE_CTRL__PCC_FIXED_PATTERN_MAX__SHIFT 0x19
+#define GC_THROTTLE_CTRL__INST_THROT_INCR__SHIFT 0x1e
+#define GC_THROTTLE_CTRL__INST_THROT_DECR__SHIFT 0x1f
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST_MASK 0x00000001L
+#define GC_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000004L
+#define GC_THROTTLE_CTRL__PATTERN_MODE_MASK 0x00000008L
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE_MASK 0x00000080L
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN_MASK 0x00000200L
+#define GC_THROTTLE_CTRL__PCC_THROT_INCR_STEP_INTERVAL_MASK 0x000FFC00L
+#define GC_THROTTLE_CTRL__PCC_FIXED_PATTERN_MIN_MASK 0x01F00000L
+#define GC_THROTTLE_CTRL__PCC_FIXED_PATTERN_MAX_MASK 0x3E000000L
+#define GC_THROTTLE_CTRL__INST_THROT_INCR_MASK 0x40000000L
+#define GC_THROTTLE_CTRL__INST_THROT_DECR_MASK 0x80000000L
+//GC_CAC_IND_INDEX
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//GC_CAC_IND_DATA
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
+//SE_CAC_IND_INDEX
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//SE_CAC_IND_DATA
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_tcpdec
+//TCP_WATCH0_ADDR_H
+#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH0_ADDR_L
+#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x6
+#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//TCP_WATCH0_CNTL
+#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH0_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH0_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH1_ADDR_H
+#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH1_ADDR_L
+#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x6
+#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//TCP_WATCH1_CNTL
+#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH1_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH1_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH2_ADDR_H
+#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH2_ADDR_L
+#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x6
+#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//TCP_WATCH2_CNTL
+#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH2_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH2_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH3_ADDR_H
+#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH3_ADDR_L
+#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x6
+#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//TCP_WATCH3_CNTL
+#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH3_CNTL__ATC__SHIFT 0x1c
+#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
+#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH3_CNTL__ATC_MASK 0x10000000L
+#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
+//TCP_GATCL1_CNTL
+#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID__SHIFT 0x19
+#define TCP_GATCL1_CNTL__FORCE_MISS__SHIFT 0x1a
+#define TCP_GATCL1_CNTL__FORCE_IN_ORDER__SHIFT 0x1b
+#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define TCP_GATCL1_CNTL__INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define TCP_GATCL1_CNTL__FORCE_MISS_MASK 0x04000000L
+#define TCP_GATCL1_CNTL__FORCE_IN_ORDER_MASK 0x08000000L
+#define TCP_GATCL1_CNTL__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define TCP_GATCL1_CNTL__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//TCP_GATCL1_DSM_CNTL
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0__SHIFT 0x0
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1__SHIFT 0x1
+#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A__SHIFT 0x2
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A0_MASK 0x00000001L
+#define TCP_GATCL1_DSM_CNTL__SEL_DSM_TCP_GATCL1_IRRITATOR_DATA_A1_MASK 0x00000002L
+#define TCP_GATCL1_DSM_CNTL__TCP_GATCL1_ENABLE_SINGLE_WRITE_A_MASK 0x00000004L
+//TCP_CNTL2
+#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
+#define TCP_CNTL2__TCPF_FMT_MGCG_DISABLE__SHIFT 0x8
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE__SHIFT 0x9
+#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
+#define TCP_CNTL2__TCPF_FMT_MGCG_DISABLE_MASK 0x00000100L
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE_MASK 0x00000200L
+//TCP_UTCL1_CNTL1
+#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT__SHIFT 0x1
+#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define TCP_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define TCP_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define TCP_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define TCP_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define TCP_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define TCP_UTCL1_CNTL1__GPUVM_64K_DEFAULT_MASK 0x00000002L
+#define TCP_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define TCP_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define TCP_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define TCP_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define TCP_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define TCP_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define TCP_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define TCP_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define TCP_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define TCP_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define TCP_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//TCP_UTCL1_CNTL2
+#define TCP_UTCL1_CNTL2__SPARE__SHIFT 0x0
+#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define TCP_UTCL1_CNTL2__ANY_LINE_VALID__SHIFT 0xa
+#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define TCP_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define TCP_UTCL1_CNTL2__SPARE_MASK 0x000000FFL
+#define TCP_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define TCP_UTCL1_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
+#define TCP_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define TCP_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define TCP_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define TCP_UTCL1_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+//TCP_UTCL1_STATUS
+#define TCP_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define TCP_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define TCP_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define TCP_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define TCP_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define TCP_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//TCP_PERFCOUNTER_FILTER
+#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0xf
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x14
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x16
+#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x19
+#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1a
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1b
+#define TCP_PERFCOUNTER_FILTER__ADDR_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x000007E0L
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x00007800L
+#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x000F8000L
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00300000L
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x01C00000L
+#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x02000000L
+#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x04000000L
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x08000000L
+#define TCP_PERFCOUNTER_FILTER__ADDR_MODE_MASK 0x70000000L
+//TCP_PERFCOUNTER_FILTER_EN
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
+#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0x8
+#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x9
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xa
+#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
+#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000100L
+#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000200L
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000400L
+#define TCP_PERFCOUNTER_FILTER_EN__ADDR_MODE_MASK 0x00000800L
+
+
+// addressBlock: gc_gdspdec
+//GDS_VMID0_BASE
+#define GDS_VMID0_BASE__BASE__SHIFT 0x0
+#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID0_SIZE
+#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID1_BASE
+#define GDS_VMID1_BASE__BASE__SHIFT 0x0
+#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID1_SIZE
+#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID2_BASE
+#define GDS_VMID2_BASE__BASE__SHIFT 0x0
+#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID2_SIZE
+#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID3_BASE
+#define GDS_VMID3_BASE__BASE__SHIFT 0x0
+#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID3_SIZE
+#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID4_BASE
+#define GDS_VMID4_BASE__BASE__SHIFT 0x0
+#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID4_SIZE
+#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID5_BASE
+#define GDS_VMID5_BASE__BASE__SHIFT 0x0
+#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID5_SIZE
+#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID6_BASE
+#define GDS_VMID6_BASE__BASE__SHIFT 0x0
+#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID6_SIZE
+#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID7_BASE
+#define GDS_VMID7_BASE__BASE__SHIFT 0x0
+#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID7_SIZE
+#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID8_BASE
+#define GDS_VMID8_BASE__BASE__SHIFT 0x0
+#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID8_SIZE
+#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID9_BASE
+#define GDS_VMID9_BASE__BASE__SHIFT 0x0
+#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID9_SIZE
+#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID10_BASE
+#define GDS_VMID10_BASE__BASE__SHIFT 0x0
+#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID10_SIZE
+#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID11_BASE
+#define GDS_VMID11_BASE__BASE__SHIFT 0x0
+#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID11_SIZE
+#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID12_BASE
+#define GDS_VMID12_BASE__BASE__SHIFT 0x0
+#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID12_SIZE
+#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID13_BASE
+#define GDS_VMID13_BASE__BASE__SHIFT 0x0
+#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID13_SIZE
+#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID14_BASE
+#define GDS_VMID14_BASE__BASE__SHIFT 0x0
+#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID14_SIZE
+#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_VMID15_BASE
+#define GDS_VMID15_BASE__BASE__SHIFT 0x0
+#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
+//GDS_VMID15_SIZE
+#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
+//GDS_GWS_VMID0
+#define GDS_GWS_VMID0__BASE__SHIFT 0x0
+#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID1
+#define GDS_GWS_VMID1__BASE__SHIFT 0x0
+#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID2
+#define GDS_GWS_VMID2__BASE__SHIFT 0x0
+#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID3
+#define GDS_GWS_VMID3__BASE__SHIFT 0x0
+#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID4
+#define GDS_GWS_VMID4__BASE__SHIFT 0x0
+#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID5
+#define GDS_GWS_VMID5__BASE__SHIFT 0x0
+#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID6
+#define GDS_GWS_VMID6__BASE__SHIFT 0x0
+#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID7
+#define GDS_GWS_VMID7__BASE__SHIFT 0x0
+#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID8
+#define GDS_GWS_VMID8__BASE__SHIFT 0x0
+#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID9
+#define GDS_GWS_VMID9__BASE__SHIFT 0x0
+#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID10
+#define GDS_GWS_VMID10__BASE__SHIFT 0x0
+#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID11
+#define GDS_GWS_VMID11__BASE__SHIFT 0x0
+#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID12
+#define GDS_GWS_VMID12__BASE__SHIFT 0x0
+#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID13
+#define GDS_GWS_VMID13__BASE__SHIFT 0x0
+#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID14
+#define GDS_GWS_VMID14__BASE__SHIFT 0x0
+#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
+//GDS_GWS_VMID15
+#define GDS_GWS_VMID15__BASE__SHIFT 0x0
+#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
+//GDS_OA_VMID0
+#define GDS_OA_VMID0__MASK__SHIFT 0x0
+#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID1
+#define GDS_OA_VMID1__MASK__SHIFT 0x0
+#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID2
+#define GDS_OA_VMID2__MASK__SHIFT 0x0
+#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID3
+#define GDS_OA_VMID3__MASK__SHIFT 0x0
+#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID4
+#define GDS_OA_VMID4__MASK__SHIFT 0x0
+#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID5
+#define GDS_OA_VMID5__MASK__SHIFT 0x0
+#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID6
+#define GDS_OA_VMID6__MASK__SHIFT 0x0
+#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID7
+#define GDS_OA_VMID7__MASK__SHIFT 0x0
+#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID8
+#define GDS_OA_VMID8__MASK__SHIFT 0x0
+#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID9
+#define GDS_OA_VMID9__MASK__SHIFT 0x0
+#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID10
+#define GDS_OA_VMID10__MASK__SHIFT 0x0
+#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID11
+#define GDS_OA_VMID11__MASK__SHIFT 0x0
+#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID12
+#define GDS_OA_VMID12__MASK__SHIFT 0x0
+#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID13
+#define GDS_OA_VMID13__MASK__SHIFT 0x0
+#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID14
+#define GDS_OA_VMID14__MASK__SHIFT 0x0
+#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID15
+#define GDS_OA_VMID15__MASK__SHIFT 0x0
+#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
+//GDS_GWS_RESET0
+#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
+#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
+#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
+#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
+#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
+#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
+#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
+#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
+#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
+#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
+#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
+#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
+#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
+#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
+#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
+#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
+#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
+#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
+#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
+#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
+#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
+#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
+#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
+#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
+#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
+#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
+#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
+//GDS_GWS_RESET1
+#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
+#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
+#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
+#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
+#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
+#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
+#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
+#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
+#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
+#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
+#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
+#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
+#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
+#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
+#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
+#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
+#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
+#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
+#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
+#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
+#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
+#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
+#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
+#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
+#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
+#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
+#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
+//GDS_GWS_RESOURCE_RESET
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
+//GDS_COMPUTE_MAX_WAVE_ID
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GDS_OA_RESET_MASK
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
+#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
+#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xc
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
+#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
+#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFF000L
+//GDS_OA_RESET
+#define GDS_OA_RESET__RESET__SHIFT 0x0
+#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
+#define GDS_OA_RESET__RESET_MASK 0x00000001L
+#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
+//GDS_ENHANCE
+#define GDS_ENHANCE__MISC__SHIFT 0x0
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
+#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
+#define GDS_ENHANCE__RD_BUF_TAG_MISS__SHIFT 0x12
+#define GDS_ENHANCE__GDSA_PC_CGTS_DIS__SHIFT 0x13
+#define GDS_ENHANCE__GDSO_PC_CGTS_DIS__SHIFT 0x14
+#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE__SHIFT 0x15
+#define GDS_ENHANCE__GDS_CLK_ENHANCE_DIS__SHIFT 0x16
+#define GDS_ENHANCE__DS_MEM_CLK_GATE_DIS__SHIFT 0x17
+#define GDS_ENHANCE__UNUSED__SHIFT 0x18
+#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
+#define GDS_ENHANCE__RD_BUF_TAG_MISS_MASK 0x00040000L
+#define GDS_ENHANCE__GDSA_PC_CGTS_DIS_MASK 0x00080000L
+#define GDS_ENHANCE__GDSO_PC_CGTS_DIS_MASK 0x00100000L
+#define GDS_ENHANCE__WD_GDS_CSB_OVERRIDE_MASK 0x00200000L
+#define GDS_ENHANCE__GDS_CLK_ENHANCE_DIS_MASK 0x00400000L
+#define GDS_ENHANCE__DS_MEM_CLK_GATE_DIS_MASK 0x00800000L
+#define GDS_ENHANCE__UNUSED_MASK 0xFF000000L
+//GDS_OA_CGPG_RESTORE
+#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
+#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
+#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
+#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
+#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
+#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
+#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
+#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
+#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
+#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
+//GDS_CS_CTXSW_STATUS
+#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_CS_CTXSW_CNT0
+#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT1
+#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT2
+#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT3
+#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GFX_CTXSW_STATUS
+#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_VS_CTXSW_CNT0
+#define GDS_VS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT1
+#define GDS_VS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT2
+#define GDS_VS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_VS_CTXSW_CNT3
+#define GDS_VS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_VS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_VS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_VS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT0
+#define GDS_PS0_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT1
+#define GDS_PS0_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT2
+#define GDS_PS0_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS0_CTXSW_CNT3
+#define GDS_PS0_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS0_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS0_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS0_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT0
+#define GDS_PS1_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT1
+#define GDS_PS1_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT2
+#define GDS_PS1_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS1_CTXSW_CNT3
+#define GDS_PS1_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS1_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS1_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS1_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT0
+#define GDS_PS2_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT1
+#define GDS_PS2_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT2
+#define GDS_PS2_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS2_CTXSW_CNT3
+#define GDS_PS2_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS2_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS2_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS2_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT0
+#define GDS_PS3_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT1
+#define GDS_PS3_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT2
+#define GDS_PS3_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS3_CTXSW_CNT3
+#define GDS_PS3_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS3_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS3_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS3_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT0
+#define GDS_PS4_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT1
+#define GDS_PS4_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT2
+#define GDS_PS4_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS4_CTXSW_CNT3
+#define GDS_PS4_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS4_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS4_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS4_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT0
+#define GDS_PS5_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT1
+#define GDS_PS5_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT2
+#define GDS_PS5_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS5_CTXSW_CNT3
+#define GDS_PS5_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS5_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS5_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS5_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT0
+#define GDS_PS6_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT1
+#define GDS_PS6_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT2
+#define GDS_PS6_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS6_CTXSW_CNT3
+#define GDS_PS6_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS6_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS6_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS6_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT0
+#define GDS_PS7_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT1
+#define GDS_PS7_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT2
+#define GDS_PS7_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS7_CTXSW_CNT3
+#define GDS_PS7_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS7_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS7_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS7_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT0
+#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT1
+#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT2
+#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT3
+#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_rasdec
+//RAS_SIGNATURE_CONTROL
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+//RAS_SIGNATURE_MASK
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE0
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE1
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE2
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE3
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_DB_SIGNATURE0
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_PA_SIGNATURE0
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_VGT_SIGNATURE0
+#define RAS_VGT_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_VGT_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SQ_SIGNATURE0
+#define RAS_SQ_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SQ_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE0
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE1
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE2
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE3
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE4
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE5
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE6
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE7
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_IA_SIGNATURE0
+#define RAS_IA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_IA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_IA_SIGNATURE1
+#define RAS_IA_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_IA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE0
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE1
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TA_SIGNATURE0
+#define RAS_TA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_TA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TD_SIGNATURE0
+#define RAS_TD_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_TD_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_CB_SIGNATURE0
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE0
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE1
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_TA_SIGNATURE1
+#define RAS_TA_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_TA_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gfxdec0
+//DB_RENDER_CONTROL
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
+//DB_COUNT_CONTROL
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE__SHIFT 0x0
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
+#define DB_COUNT_CONTROL__ZPASS_INCREMENT_DISABLE_MASK 0x00000001L
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
+//DB_DEPTH_VIEW
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
+#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
+//DB_RENDER_OVERRIDE
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT__SHIFT 0xf
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__FORCE_QC_SMASK_CONFLICT_MASK 0x00008000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+//DB_RENDER_OVERRIDE2
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
+//DB_HTILE_DATA_BASE
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_HTILE_DATA_BASE_HI
+#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_DEPTH_SIZE
+#define DB_DEPTH_SIZE__X_MAX__SHIFT 0x0
+#define DB_DEPTH_SIZE__Y_MAX__SHIFT 0x10
+#define DB_DEPTH_SIZE__X_MAX_MASK 0x00003FFFL
+#define DB_DEPTH_SIZE__Y_MAX_MASK 0x3FFF0000L
+//DB_DEPTH_BOUNDS_MIN
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
+//DB_DEPTH_BOUNDS_MAX
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
+//DB_STENCIL_CLEAR
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
+//DB_DEPTH_CLEAR
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
+//PA_SC_SCREEN_SCISSOR_TL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_SCISSOR_BR
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
+//DB_Z_INFO
+#define DB_Z_INFO__FORMAT__SHIFT 0x0
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
+#define DB_Z_INFO__SW_MODE__SHIFT 0x4
+#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0xd
+#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xf
+#define DB_Z_INFO__MAXMIP__SHIFT 0x10
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
+#define DB_Z_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
+#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
+#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00008000L
+#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+//DB_STENCIL_INFO
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
+#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0xd
+#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xf
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
+#define DB_STENCIL_INFO__CLEAR_DISALLOWED__SHIFT 0x1e
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00006000L
+#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00008000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+#define DB_STENCIL_INFO__CLEAR_DISALLOWED_MASK 0x40000000L
+//DB_Z_READ_BASE
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_READ_BASE_HI
+#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_READ_BASE
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_READ_BASE_HI
+#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_Z_WRITE_BASE
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_WRITE_BASE_HI
+#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_WRITE_BASE
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_WRITE_BASE_HI
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_DFSM_CONTROL
+#define DB_DFSM_CONTROL__PUNCHOUT_MODE__SHIFT 0x0
+#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP__SHIFT 0x2
+#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW__SHIFT 0x3
+#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
+#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
+#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
+//DB_Z_INFO2
+#define DB_Z_INFO2__EPITCH__SHIFT 0x0
+#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
+//DB_STENCIL_INFO2
+#define DB_STENCIL_INFO2__EPITCH__SHIFT 0x0
+#define DB_STENCIL_INFO2__EPITCH_MASK 0x0000FFFFL
+//TA_BC_BASE_ADDR
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_BC_BASE_ADDR_HI
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_1
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_2
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_3
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_2
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_3
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_WINDOW_OFFSET
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
+//PA_SC_WINDOW_SCISSOR_TL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_WINDOW_SCISSOR_BR
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_RULE
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
+//PA_SC_CLIPRECT_0_TL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_0_BR
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_TL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_BR
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_TL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_BR
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_TL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_BR
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_EDGERULE
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
+//PA_SU_HARDWARE_SCREEN_OFFSET
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
+//CB_TARGET_MASK
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
+//CB_SHADER_MASK
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
+//PA_SC_GENERIC_SCISSOR_TL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_GENERIC_SCISSOR_BR
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//COHER_DEST_BASE_0
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_1
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_SCISSOR_0_TL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_0_BR
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_1_TL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_1_BR
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_2_TL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_2_BR
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_3_TL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_3_BR
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_4_TL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_4_BR
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_5_TL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_5_BR
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_6_TL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_6_BR
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_7_TL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_7_BR
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_8_TL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_8_BR
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_9_TL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_9_BR
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_10_TL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_10_BR
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_11_TL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_11_BR
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_12_TL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_12_BR
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_13_TL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_13_BR
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_14_TL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_14_BR
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_15_TL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_15_BR
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_ZMIN_0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_1
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_1
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_2
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_2
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_3
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_3
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_4
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_4
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_5
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_5
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_6
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_6
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_7
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_7
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_8
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_8
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_9
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_9
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_10
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_10
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_11
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_11
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_12
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_12
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_13
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_13
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_14
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_14
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_15
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_15
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_RASTER_CONFIG
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1d
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x1C000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0xE0000000L
+//PA_SC_RASTER_CONFIG_1
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x5
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000001CL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x000000E0L
+//PA_SC_SCREEN_EXTENT_CONTROL
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
+//PA_SC_TILE_STEERING_OVERRIDE
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
+//CP_PERFMON_CNTX_CNTL
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+//CP_PIPEID
+#define CP_PIPEID__PIPE_ID__SHIFT 0x0
+#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
+//CP_RINGID
+#define CP_RINGID__RINGID__SHIFT 0x0
+#define CP_RINGID__RINGID_MASK 0x00000003L
+//CP_VMID
+#define CP_VMID__VMID__SHIFT 0x0
+#define CP_VMID__VMID_MASK 0x0000000FL
+//PA_SC_RIGHT_VERT_GRID
+#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR__SHIFT 0x0
+#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF__SHIFT 0x8
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
+#define PA_SC_RIGHT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
+#define PA_SC_RIGHT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
+#define PA_SC_RIGHT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
+//PA_SC_LEFT_VERT_GRID
+#define PA_SC_LEFT_VERT_GRID__LEFT_QTR__SHIFT 0x0
+#define PA_SC_LEFT_VERT_GRID__LEFT_HALF__SHIFT 0x8
+#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF__SHIFT 0x10
+#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR__SHIFT 0x18
+#define PA_SC_LEFT_VERT_GRID__LEFT_QTR_MASK 0x000000FFL
+#define PA_SC_LEFT_VERT_GRID__LEFT_HALF_MASK 0x0000FF00L
+#define PA_SC_LEFT_VERT_GRID__RIGHT_HALF_MASK 0x00FF0000L
+#define PA_SC_LEFT_VERT_GRID__RIGHT_QTR_MASK 0xFF000000L
+//PA_SC_HORIZ_GRID
+#define PA_SC_HORIZ_GRID__TOP_QTR__SHIFT 0x0
+#define PA_SC_HORIZ_GRID__TOP_HALF__SHIFT 0x8
+#define PA_SC_HORIZ_GRID__BOT_HALF__SHIFT 0x10
+#define PA_SC_HORIZ_GRID__BOT_QTR__SHIFT 0x18
+#define PA_SC_HORIZ_GRID__TOP_QTR_MASK 0x000000FFL
+#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
+#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
+#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
+//VGT_MULTI_PRIM_IB_RESET_INDX
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
+//CB_BLEND_RED
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
+#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
+//CB_BLEND_GREEN
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
+//CB_BLEND_BLUE
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
+//CB_BLEND_ALPHA
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
+//CB_DCC_CONTROL
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE__SHIFT 0x1
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK__SHIFT 0x2
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01__SHIFT 0x8
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE__SHIFT 0x9
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0xa
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01__SHIFT 0xc
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE__SHIFT 0xd
+#define CB_DCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG__SHIFT 0xe
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_MRT_SHARING_DISABLE_MASK 0x00000002L
+#define CB_DCC_CONTROL__OVERWRITE_COMBINER_WATERMARK_MASK 0x0000007CL
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01_MASK 0x00000100L
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE_MASK 0x00000200L
+#define CB_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00000400L
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01_MASK 0x00001000L
+#define CB_DCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE_MASK 0x00002000L
+#define CB_DCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG_MASK 0x00004000L
+//DB_STENCIL_CONTROL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
+//DB_STENCILREFMASK
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
+//DB_STENCILREFMASK_BF
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
+//PA_CL_VPORT_XSCALE
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_1
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_1
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_1
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_1
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_1
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_1
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_2
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_2
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_2
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_2
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_2
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_2
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_3
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_3
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_3
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_3
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_3
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_3
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_4
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_4
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_4
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_4
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_4
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_4
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_5
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_5
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_5
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_5
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_5
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_5
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_6
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_6
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_6
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_6
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_6
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_6
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_7
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_7
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_7
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_7
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_7
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_7
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_8
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_8
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_8
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_8
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_8
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_8
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_9
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_9
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_9
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_9
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_9
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_9
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_10
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_10
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_10
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_10
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_10
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_10
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_11
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_11
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_11
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_11
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_11
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_11
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_12
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_12
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_12
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_12
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_12
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_12
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_13
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_13
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_13
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_13
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_13
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_13
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_14
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_14
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_14
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_14
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_14
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_14
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_15
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_15
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_15
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_15
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_15
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_15
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_X
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Y
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Z
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_W
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_X
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Y
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Z
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_W
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_X
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Y
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Z
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_W
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_X
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Y
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Z
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_W
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_X
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Y
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Z
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_W
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_X
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Y
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Z
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_W
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_PROG_NEAR_CLIP_Z
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//SPI_PS_INPUT_CNTL_0
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_1
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_2
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_3
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_4
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_5
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_6
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_7
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_8
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_9
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_10
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_11
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_12
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_13
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_14
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_15
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_16
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_17
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_18
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_19
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP__SHIFT 0xd
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__CYL_WRAP_MASK 0x0001E000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_20
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_21
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_22
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_23
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_24
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_25
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_26
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_27
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_28
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_29
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_30
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_31
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
+//SPI_VS_OUT_CONFIG
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK__SHIFT 0x6
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
+#define SPI_VS_OUT_CONFIG__VS_HALF_PACK_MASK 0x00000040L
+//SPI_PS_INPUT_ENA
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_PS_INPUT_ADDR
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_INTERP_CONTROL_0
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+//SPI_PS_IN_CONTROL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+//SPI_BARYC_CNTL
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+//SPI_TMPRING_SIZE
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x01FFF000L
+//SPI_SHADER_POS_FORMAT
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
+//SPI_SHADER_Z_FORMAT
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_COL_FORMAT
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
+//SX_PS_DOWNCONVERT
+#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
+#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
+#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
+#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
+#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
+#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
+#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
+#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
+#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
+#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
+#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
+#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
+#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
+#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
+#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
+#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
+//SX_BLEND_OPT_EPSILON
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
+//SX_BLEND_OPT_CONTROL
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
+//SX_MRT0_BLEND_OPT
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT1_BLEND_OPT
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT2_BLEND_OPT
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT3_BLEND_OPT
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT4_BLEND_OPT
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT5_BLEND_OPT
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT6_BLEND_OPT
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT7_BLEND_OPT
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//CB_BLEND0_CONTROL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND1_CONTROL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND2_CONTROL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND3_CONTROL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND4_CONTROL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND5_CONTROL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND6_CONTROL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND7_CONTROL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_MRT0_EPITCH
+#define CB_MRT0_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT0_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT1_EPITCH
+#define CB_MRT1_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT1_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT2_EPITCH
+#define CB_MRT2_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT2_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT3_EPITCH
+#define CB_MRT3_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT3_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT4_EPITCH
+#define CB_MRT4_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT4_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT5_EPITCH
+#define CB_MRT5_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT5_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT6_EPITCH
+#define CB_MRT6_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT6_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CB_MRT7_EPITCH
+#define CB_MRT7_EPITCH__EPITCH__SHIFT 0x0
+#define CB_MRT7_EPITCH__EPITCH_MASK 0x0000FFFFL
+//CS_COPY_STATE
+#define CS_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define CS_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//GFX_COPY_STATE
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//PA_CL_POINT_X_RAD
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_Y_RAD
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_SIZE
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_CULL_RAD
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//VGT_DMA_BASE_HI
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
+//VGT_DMA_BASE
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
+//VGT_DRAW_INITIATOR
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
+#define VGT_DRAW_INITIATOR__UNROLLED_INST__SHIFT 0x7
+#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC__SHIFT 0x8
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__UNROLLED_INST_MASK 0x00000080L
+#define VGT_DRAW_INITIATOR__GRBM_SKEW_NO_DEC_MASK 0x00000100L
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
+//VGT_IMMED_DATA
+#define VGT_IMMED_DATA__DATA__SHIFT 0x0
+#define VGT_IMMED_DATA__DATA_MASK 0xFFFFFFFFL
+//VGT_EVENT_ADDRESS_REG
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
+//DB_DEPTH_CONTROL
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+//DB_EQAA
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+//CB_COLOR_CONTROL
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
+//DB_SHADER_CONTROL
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
+#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED__SHIFT 0x11
+#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES__SHIFT 0x14
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
+#define DB_SHADER_CONTROL__EXEC_IF_OVERLAPPED_MASK 0x00020000L
+#define DB_SHADER_CONTROL__POPS_OVERLAP_NUM_SAMPLES_MASK 0x00700000L
+//PA_CL_CLIP_CNTL
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA__SHIFT 0x1c
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA_MASK 0x10000000L
+//PA_SU_SC_MODE_CNTL
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
+//PA_CL_VTE_CNTL
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+//PA_CL_VS_OUT_CNTL
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1a
+#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID__SHIFT 0x1b
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x04000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_SHD_OBJPRIM_ID_MASK 0x08000000L
+//PA_CL_NANINF_CNTL
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+//PA_SU_LINE_STIPPLE_CNTL
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST__SHIFT 0x4
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+#define PA_SU_LINE_STIPPLE_CNTL__DIAMOND_ADJUST_MASK 0x00000010L
+//PA_SU_LINE_STIPPLE_SCALE
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
+//PA_SU_PRIM_FILTER_CNTL
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+//PA_SU_SMALL_PRIM_FILTER_CNTL
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE__SHIFT 0x6
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE_MASK 0x00000040L
+//PA_CL_OBJPRIM_ID_CNTL
+#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
+#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
+#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID__SHIFT 0x2
+#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL_MASK 0x00000001L
+#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID_MASK 0x00000002L
+#define PA_CL_OBJPRIM_ID_CNTL__EN_32BIT_OBJPRIMID_MASK 0x00000004L
+//PA_CL_NGG_CNTL
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
+//PA_SU_OVER_RASTERIZATION_CNTL
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
+//PA_STEREO_CNTL
+#define PA_STEREO_CNTL__EN_STEREO__SHIFT 0x0
+#define PA_STEREO_CNTL__STEREO_MODE__SHIFT 0x1
+#define PA_STEREO_CNTL__RT_SLICE_MODE__SHIFT 0x5
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET__SHIFT 0x8
+#define PA_STEREO_CNTL__VP_ID_MODE__SHIFT 0xa
+#define PA_STEREO_CNTL__VP_ID_OFFSET__SHIFT 0xd
+#define PA_STEREO_CNTL__EN_STEREO_MASK 0x00000001L
+#define PA_STEREO_CNTL__STEREO_MODE_MASK 0x0000001EL
+#define PA_STEREO_CNTL__RT_SLICE_MODE_MASK 0x000000E0L
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET_MASK 0x00000300L
+#define PA_STEREO_CNTL__VP_ID_MODE_MASK 0x00001C00L
+#define PA_STEREO_CNTL__VP_ID_OFFSET_MASK 0x0001E000L
+//PA_SU_POINT_SIZE
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
+//PA_SU_POINT_MINMAX
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
+//PA_SU_LINE_CNTL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
+//PA_SC_LINE_STIPPLE
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+//VGT_OUTPUT_PATH_CNTL
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT__SHIFT 0x0
+#define VGT_OUTPUT_PATH_CNTL__PATH_SELECT_MASK 0x00000007L
+//VGT_HOS_CNTL
+#define VGT_HOS_CNTL__TESS_MODE__SHIFT 0x0
+#define VGT_HOS_CNTL__TESS_MODE_MASK 0x00000003L
+//VGT_HOS_MAX_TESS_LEVEL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_MIN_TESS_LEVEL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_REUSE_DEPTH
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH__SHIFT 0x0
+#define VGT_HOS_REUSE_DEPTH__REUSE_DEPTH_MASK 0x000000FFL
+//VGT_GROUP_PRIM_TYPE
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER__SHIFT 0xe
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS__SHIFT 0xf
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER__SHIFT 0x10
+#define VGT_GROUP_PRIM_TYPE__PRIM_TYPE_MASK 0x0000001FL
+#define VGT_GROUP_PRIM_TYPE__RETAIN_ORDER_MASK 0x00004000L
+#define VGT_GROUP_PRIM_TYPE__RETAIN_QUADS_MASK 0x00008000L
+#define VGT_GROUP_PRIM_TYPE__PRIM_ORDER_MASK 0x00070000L
+//VGT_GROUP_FIRST_DECR
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR__SHIFT 0x0
+#define VGT_GROUP_FIRST_DECR__FIRST_DECR_MASK 0x0000000FL
+//VGT_GROUP_DECR
+#define VGT_GROUP_DECR__DECR__SHIFT 0x0
+#define VGT_GROUP_DECR__DECR_MASK 0x0000000FL
+//VGT_GROUP_VECT_0_CNTL
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN__SHIFT 0x0
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN__SHIFT 0x1
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN__SHIFT 0x2
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN__SHIFT 0x3
+#define VGT_GROUP_VECT_0_CNTL__STRIDE__SHIFT 0x8
+#define VGT_GROUP_VECT_0_CNTL__SHIFT__SHIFT 0x10
+#define VGT_GROUP_VECT_0_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_0_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_0_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_0_CNTL__STRIDE_MASK 0x0000FF00L
+#define VGT_GROUP_VECT_0_CNTL__SHIFT_MASK 0x00FF0000L
+//VGT_GROUP_VECT_1_CNTL
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN__SHIFT 0x0
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN__SHIFT 0x1
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN__SHIFT 0x2
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN__SHIFT 0x3
+#define VGT_GROUP_VECT_1_CNTL__STRIDE__SHIFT 0x8
+#define VGT_GROUP_VECT_1_CNTL__SHIFT__SHIFT 0x10
+#define VGT_GROUP_VECT_1_CNTL__COMP_X_EN_MASK 0x00000001L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Y_EN_MASK 0x00000002L
+#define VGT_GROUP_VECT_1_CNTL__COMP_Z_EN_MASK 0x00000004L
+#define VGT_GROUP_VECT_1_CNTL__COMP_W_EN_MASK 0x00000008L
+#define VGT_GROUP_VECT_1_CNTL__STRIDE_MASK 0x0000FF00L
+#define VGT_GROUP_VECT_1_CNTL__SHIFT_MASK 0x00FF0000L
+//VGT_GROUP_VECT_0_FMT_CNTL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV__SHIFT 0x0
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET__SHIFT 0x4
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV__SHIFT 0x8
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET__SHIFT 0xc
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV__SHIFT 0x10
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET__SHIFT 0x14
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV__SHIFT 0x18
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET__SHIFT 0x1c
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_CONV_MASK 0x0000000FL
+#define VGT_GROUP_VECT_0_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_CONV_MASK 0x00000F00L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_CONV_MASK 0x000F0000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_CONV_MASK 0x0F000000L
+#define VGT_GROUP_VECT_0_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
+//VGT_GROUP_VECT_1_FMT_CNTL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV__SHIFT 0x0
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET__SHIFT 0x4
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV__SHIFT 0x8
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET__SHIFT 0xc
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV__SHIFT 0x10
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET__SHIFT 0x14
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV__SHIFT 0x18
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET__SHIFT 0x1c
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_CONV_MASK 0x0000000FL
+#define VGT_GROUP_VECT_1_FMT_CNTL__X_OFFSET_MASK 0x000000F0L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_CONV_MASK 0x00000F00L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Y_OFFSET_MASK 0x0000F000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_CONV_MASK 0x000F0000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__Z_OFFSET_MASK 0x00F00000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_CONV_MASK 0x0F000000L
+#define VGT_GROUP_VECT_1_FMT_CNTL__W_OFFSET_MASK 0xF0000000L
+//VGT_GS_MODE
+#define VGT_GS_MODE__MODE__SHIFT 0x0
+#define VGT_GS_MODE__RESERVED_0__SHIFT 0x3
+#define VGT_GS_MODE__CUT_MODE__SHIFT 0x4
+#define VGT_GS_MODE__RESERVED_1__SHIFT 0x6
+#define VGT_GS_MODE__GS_C_PACK_EN__SHIFT 0xb
+#define VGT_GS_MODE__RESERVED_2__SHIFT 0xc
+#define VGT_GS_MODE__ES_PASSTHRU__SHIFT 0xd
+#define VGT_GS_MODE__RESERVED_3__SHIFT 0xe
+#define VGT_GS_MODE__RESERVED_4__SHIFT 0xf
+#define VGT_GS_MODE__RESERVED_5__SHIFT 0x10
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI__SHIFT 0x11
+#define VGT_GS_MODE__SUPPRESS_CUTS__SHIFT 0x12
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE__SHIFT 0x13
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE__SHIFT 0x14
+#define VGT_GS_MODE__ONCHIP__SHIFT 0x15
+#define VGT_GS_MODE__MODE_MASK 0x00000007L
+#define VGT_GS_MODE__RESERVED_0_MASK 0x00000008L
+#define VGT_GS_MODE__CUT_MODE_MASK 0x00000030L
+#define VGT_GS_MODE__RESERVED_1_MASK 0x000007C0L
+#define VGT_GS_MODE__GS_C_PACK_EN_MASK 0x00000800L
+#define VGT_GS_MODE__RESERVED_2_MASK 0x00001000L
+#define VGT_GS_MODE__ES_PASSTHRU_MASK 0x00002000L
+#define VGT_GS_MODE__RESERVED_3_MASK 0x00004000L
+#define VGT_GS_MODE__RESERVED_4_MASK 0x00008000L
+#define VGT_GS_MODE__RESERVED_5_MASK 0x00010000L
+#define VGT_GS_MODE__PARTIAL_THD_AT_EOI_MASK 0x00020000L
+#define VGT_GS_MODE__SUPPRESS_CUTS_MASK 0x00040000L
+#define VGT_GS_MODE__ES_WRITE_OPTIMIZE_MASK 0x00080000L
+#define VGT_GS_MODE__GS_WRITE_OPTIMIZE_MASK 0x00100000L
+#define VGT_GS_MODE__ONCHIP_MASK 0x00600000L
+//VGT_GS_ONCHIP_CNTL
+#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP__SHIFT 0x0
+#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP__SHIFT 0xb
+#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP__SHIFT 0x16
+#define VGT_GS_ONCHIP_CNTL__ES_VERTS_PER_SUBGRP_MASK 0x000007FFL
+#define VGT_GS_ONCHIP_CNTL__GS_PRIMS_PER_SUBGRP_MASK 0x003FF800L
+#define VGT_GS_ONCHIP_CNTL__GS_INST_PRIMS_IN_SUBGRP_MASK 0xFFC00000L
+//PA_SC_MODE_CNTL_0
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
+#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD__SHIFT 0x4
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__SCALE_LINE_WIDTH_PAD_MASK 0x00000010L
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
+//PA_SC_MODE_CNTL_1
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+//VGT_ENHANCE
+#define VGT_ENHANCE__MISC__SHIFT 0x0
+#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_GS_PER_ES
+#define VGT_GS_PER_ES__GS_PER_ES__SHIFT 0x0
+#define VGT_GS_PER_ES__GS_PER_ES_MASK 0x000007FFL
+//VGT_ES_PER_GS
+#define VGT_ES_PER_GS__ES_PER_GS__SHIFT 0x0
+#define VGT_ES_PER_GS__ES_PER_GS_MASK 0x000007FFL
+//VGT_GS_PER_VS
+#define VGT_GS_PER_VS__GS_PER_VS__SHIFT 0x0
+#define VGT_GS_PER_VS__GS_PER_VS_MASK 0x0000000FL
+//VGT_GSVS_RING_OFFSET_1
+#define VGT_GSVS_RING_OFFSET_1__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_1__OFFSET_MASK 0x00007FFFL
+//VGT_GSVS_RING_OFFSET_2
+#define VGT_GSVS_RING_OFFSET_2__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_2__OFFSET_MASK 0x00007FFFL
+//VGT_GSVS_RING_OFFSET_3
+#define VGT_GSVS_RING_OFFSET_3__OFFSET__SHIFT 0x0
+#define VGT_GSVS_RING_OFFSET_3__OFFSET_MASK 0x00007FFFL
+//VGT_GS_OUT_PRIM_TYPE
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1__SHIFT 0x8
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2__SHIFT 0x10
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3__SHIFT 0x16
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM__SHIFT 0x1f
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_1_MASK 0x00003F00L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_2_MASK 0x003F0000L
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_3_MASK 0x0FC00000L
+#define VGT_GS_OUT_PRIM_TYPE__UNIQUE_TYPE_PER_STREAM_MASK 0x80000000L
+//IA_ENHANCE
+#define IA_ENHANCE__MISC__SHIFT 0x0
+#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_DMA_SIZE
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_DMA_MAX_SIZE
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
+//VGT_DMA_INDEX_TYPE
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
+#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x00000040L
+#define VGT_DMA_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+//WD_ENHANCE
+#define WD_ENHANCE__MISC__SHIFT 0x0
+#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_EN
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
+//VGT_DMA_NUM_INSTANCES
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_RESET
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
+//VGT_EVENT_INITIATOR
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//VGT_GS_MAX_PRIMS_PER_SUBGROUP
+#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP__SHIFT 0x0
+#define VGT_GS_MAX_PRIMS_PER_SUBGROUP__MAX_PRIMS_PER_SUBGROUP_MASK 0x0000FFFFL
+//VGT_DRAW_PAYLOAD_CNTL
+#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN__SHIFT 0x0
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID__SHIFT 0x2
+#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN__SHIFT 0x3
+#define VGT_DRAW_PAYLOAD_CNTL__OBJPRIM_ID_EN_MASK 0x00000001L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
+#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
+//VGT_INSTANCE_STEP_RATE_0
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
+#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
+//VGT_INSTANCE_STEP_RATE_1
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE__SHIFT 0x0
+#define VGT_INSTANCE_STEP_RATE_1__STEP_RATE_MASK 0xFFFFFFFFL
+//IA_MULTI_VGT_PARAM_BC
+//VGT_ESGS_RING_ITEMSIZE
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GSVS_RING_ITEMSIZE
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_GSVS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_REUSE_OFF
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+//VGT_VTX_CNT_EN
+#define VGT_VTX_CNT_EN__VTX_CNT_EN__SHIFT 0x0
+#define VGT_VTX_CNT_EN__VTX_CNT_EN_MASK 0x00000001L
+//DB_HTILE_SURFACE
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN__SHIFT 0x2
+#define DB_HTILE_SURFACE__PRELOAD__SHIFT 0x3
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH__SHIFT 0x4
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT__SHIFT 0xa
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
+#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
+#define DB_HTILE_SURFACE__RB_ALIGNED__SHIFT 0x13
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__HTILE_USES_PRELOAD_WIN_MASK 0x00000004L
+#define DB_HTILE_SURFACE__PRELOAD_MASK 0x00000008L
+#define DB_HTILE_SURFACE__PREFETCH_WIDTH_MASK 0x000003F0L
+#define DB_HTILE_SURFACE__PREFETCH_HEIGHT_MASK 0x0000FC00L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
+#define DB_HTILE_SURFACE__RB_ALIGNED_MASK 0x00080000L
+//DB_SRESULTS_COMPARE_STATE0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+//DB_SRESULTS_COMPARE_STATE1
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+//DB_PRELOAD_CONTROL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
+//VGT_STRMOUT_BUFFER_SIZE_0
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_0__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_0
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_0__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_0
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_0__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_1
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_1__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_1
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_1__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_1
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_1__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_2
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_2__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_2
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_2__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_2
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_2__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_SIZE_3
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_SIZE_3__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_VTX_STRIDE_3
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_VTX_STRIDE_3__STRIDE_MASK 0x000003FFL
+//VGT_STRMOUT_BUFFER_OFFSET_3
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_OFFSET_3__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
+//VGT_GS_MAX_VERT_OUT
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
+//VGT_TESS_DISTRIBUTION
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
+//VGT_SHADER_STAGES_EN
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
+#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN__SHIFT 0x9
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0__SHIFT 0xa
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1__SHIFT 0xb
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
+#define VGT_SHADER_STAGES_EN__DISPATCH_DRAW_EN_MASK 0x00000200L
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_0_MASK 0x00000400L
+#define VGT_SHADER_STAGES_EN__DIS_DEALLOC_ACCUM_1_MASK 0x00000800L
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00180000L
+//VGT_LS_HS_CONFIG
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
+//VGT_GS_VERT_ITEMSIZE
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_1
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_1__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_2
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_2__ITEMSIZE_MASK 0x00007FFFL
+//VGT_GS_VERT_ITEMSIZE_3
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE__SHIFT 0x0
+#define VGT_GS_VERT_ITEMSIZE_3__ITEMSIZE_MASK 0x00007FFFL
+//VGT_TF_PARAM
+#define VGT_TF_PARAM__TYPE__SHIFT 0x0
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
+#define VGT_TF_PARAM__DEPRECATED__SHIFT 0x9
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
+#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__DEPRECATED_MASK 0x00000200L
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00008000L
+#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
+//DB_ALPHA_TO_MASK
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+//VGT_DISPATCH_DRAW_INDEX
+#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX__SHIFT 0x0
+#define VGT_DISPATCH_DRAW_INDEX__MATCH_INDEX_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_DB_FMT_CNTL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+//PA_SU_POLY_OFFSET_CLAMP
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_SCALE
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_OFFSET
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_SCALE
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_OFFSET
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_GS_INSTANCE_CNT
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
+//VGT_STRMOUT_CONFIG
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN__SHIFT 0x0
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN__SHIFT 0x1
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN__SHIFT 0x2
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN__SHIFT 0x3
+#define VGT_STRMOUT_CONFIG__RAST_STREAM__SHIFT 0x4
+#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT__SHIFT 0x7
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK__SHIFT 0x8
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK__SHIFT 0x1f
+#define VGT_STRMOUT_CONFIG__STREAMOUT_0_EN_MASK 0x00000001L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_1_EN_MASK 0x00000002L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_2_EN_MASK 0x00000004L
+#define VGT_STRMOUT_CONFIG__STREAMOUT_3_EN_MASK 0x00000008L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK 0x00000070L
+#define VGT_STRMOUT_CONFIG__EN_PRIMS_NEEDED_CNT_MASK 0x00000080L
+#define VGT_STRMOUT_CONFIG__RAST_STREAM_MASK_MASK 0x00000F00L
+#define VGT_STRMOUT_CONFIG__USE_RAST_STREAM_MASK_MASK 0x80000000L
+//VGT_STRMOUT_BUFFER_CONFIG
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN__SHIFT 0x4
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN__SHIFT 0x8
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN__SHIFT 0xc
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_0_BUFFER_EN_MASK 0x0000000FL
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_1_BUFFER_EN_MASK 0x000000F0L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_2_BUFFER_EN_MASK 0x00000F00L
+#define VGT_STRMOUT_BUFFER_CONFIG__STREAM_3_BUFFER_EN_MASK 0x0000F000L
+//VGT_DMA_EVENT_INITIATOR
+#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_DMA_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_DMA_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_DMA_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//PA_SC_CENTROID_PRIORITY_0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
+//PA_SC_CENTROID_PRIORITY_1
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
+//PA_SC_LINE_CNTL
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION__SHIFT 0xd
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION_MASK 0x00002000L
+//PA_SC_AA_CONFIG
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
+//PA_SU_VTX_CNTL
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+//PA_CL_GB_VERT_CLIP_ADJ
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_VERT_DISC_ADJ
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_CLIP_ADJ
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_DISC_ADJ
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_MASK_X0Y0_X1Y0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
+//PA_SC_AA_MASK_X0Y1_X1Y1
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
+//PA_SC_SHADER_CONTROL
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
+//PA_SC_BINNER_CNTL_0
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION_MASK 0x10000000L
+//PA_SC_BINNER_CNTL_1
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
+//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
+//PA_SC_NGG_MODE_CNTL
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
+//VGT_VERTEX_REUSE_BLOCK_CNTL
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH__SHIFT 0x0
+#define VGT_VERTEX_REUSE_BLOCK_CNTL__VTX_REUSE_DEPTH_MASK 0x000000FFL
+//VGT_OUT_DEALLOC_CNTL
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST__SHIFT 0x0
+#define VGT_OUT_DEALLOC_CNTL__DEALLOC_DIST_MASK 0x0000007FL
+//CB_COLOR0_BASE
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_BASE_EXT
+#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_ATTRIB2
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR0_VIEW
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR0_INFO
+#define CB_COLOR0_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR0_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR0_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR0_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR0_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR0_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR0_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR0_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR0_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR0_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR0_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR0_ATTRIB
+#define CB_COLOR0_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR0_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR0_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR0_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR0_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR0_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR0_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR0_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR0_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR0_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR0_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR0_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR0_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR0_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR0_DCC_CONTROL
+#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR0_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR0_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR0_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR0_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR0_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR0_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR0_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR0_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR0_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR0_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR0_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR0_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR0_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR0_CMASK
+#define CB_COLOR0_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_CMASK_BASE_EXT
+#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_FMASK
+#define CB_COLOR0_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_FMASK_BASE_EXT
+#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_CLEAR_WORD0
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR0_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR0_CLEAR_WORD1
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR0_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR0_DCC_BASE
+#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_DCC_BASE_EXT
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_BASE
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_BASE_EXT
+#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_ATTRIB2
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR1_VIEW
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR1_INFO
+#define CB_COLOR1_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR1_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR1_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR1_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR1_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR1_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR1_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR1_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR1_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR1_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR1_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR1_ATTRIB
+#define CB_COLOR1_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR1_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR1_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR1_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR1_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR1_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR1_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR1_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR1_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR1_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR1_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR1_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR1_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR1_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR1_DCC_CONTROL
+#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR1_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR1_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR1_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR1_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR1_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR1_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR1_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR1_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR1_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR1_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR1_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR1_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR1_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR1_CMASK
+#define CB_COLOR1_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_CMASK_BASE_EXT
+#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_FMASK
+#define CB_COLOR1_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_FMASK_BASE_EXT
+#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_CLEAR_WORD0
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR1_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR1_CLEAR_WORD1
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR1_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR1_DCC_BASE
+#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_DCC_BASE_EXT
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_BASE
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_BASE_EXT
+#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_ATTRIB2
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR2_VIEW
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR2_INFO
+#define CB_COLOR2_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR2_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR2_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR2_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR2_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR2_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR2_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR2_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR2_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR2_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR2_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR2_ATTRIB
+#define CB_COLOR2_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR2_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR2_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR2_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR2_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR2_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR2_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR2_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR2_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR2_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR2_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR2_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR2_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR2_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR2_DCC_CONTROL
+#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR2_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR2_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR2_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR2_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR2_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR2_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR2_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR2_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR2_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR2_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR2_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR2_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR2_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR2_CMASK
+#define CB_COLOR2_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_CMASK_BASE_EXT
+#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_FMASK
+#define CB_COLOR2_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_FMASK_BASE_EXT
+#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_CLEAR_WORD0
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR2_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR2_CLEAR_WORD1
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR2_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR2_DCC_BASE
+#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_DCC_BASE_EXT
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_BASE
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_BASE_EXT
+#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_ATTRIB2
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR3_VIEW
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR3_INFO
+#define CB_COLOR3_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR3_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR3_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR3_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR3_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR3_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR3_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR3_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR3_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR3_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR3_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR3_ATTRIB
+#define CB_COLOR3_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR3_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR3_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR3_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR3_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR3_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR3_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR3_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR3_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR3_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR3_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR3_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR3_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR3_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR3_DCC_CONTROL
+#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR3_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR3_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR3_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR3_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR3_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR3_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR3_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR3_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR3_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR3_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR3_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR3_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR3_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR3_CMASK
+#define CB_COLOR3_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_CMASK_BASE_EXT
+#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_FMASK
+#define CB_COLOR3_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_FMASK_BASE_EXT
+#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_CLEAR_WORD0
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR3_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR3_CLEAR_WORD1
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR3_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR3_DCC_BASE
+#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_DCC_BASE_EXT
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_BASE
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_BASE_EXT
+#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_ATTRIB2
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR4_VIEW
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR4_INFO
+#define CB_COLOR4_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR4_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR4_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR4_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR4_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR4_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR4_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR4_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR4_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR4_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR4_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR4_ATTRIB
+#define CB_COLOR4_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR4_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR4_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR4_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR4_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR4_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR4_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR4_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR4_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR4_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR4_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR4_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR4_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR4_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR4_DCC_CONTROL
+#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR4_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR4_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR4_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR4_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR4_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR4_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR4_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR4_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR4_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR4_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR4_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR4_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR4_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR4_CMASK
+#define CB_COLOR4_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_CMASK_BASE_EXT
+#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_FMASK
+#define CB_COLOR4_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_FMASK_BASE_EXT
+#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_CLEAR_WORD0
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR4_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR4_CLEAR_WORD1
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR4_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR4_DCC_BASE
+#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_DCC_BASE_EXT
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_BASE
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_BASE_EXT
+#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_ATTRIB2
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR5_VIEW
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR5_INFO
+#define CB_COLOR5_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR5_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR5_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR5_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR5_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR5_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR5_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR5_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR5_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR5_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR5_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR5_ATTRIB
+#define CB_COLOR5_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR5_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR5_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR5_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR5_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR5_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR5_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR5_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR5_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR5_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR5_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR5_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR5_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR5_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR5_DCC_CONTROL
+#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR5_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR5_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR5_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR5_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR5_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR5_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR5_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR5_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR5_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR5_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR5_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR5_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR5_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR5_CMASK
+#define CB_COLOR5_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_CMASK_BASE_EXT
+#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_FMASK
+#define CB_COLOR5_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_FMASK_BASE_EXT
+#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_CLEAR_WORD0
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR5_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR5_CLEAR_WORD1
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR5_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR5_DCC_BASE
+#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_DCC_BASE_EXT
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_BASE
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_BASE_EXT
+#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_ATTRIB2
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR6_VIEW
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR6_INFO
+#define CB_COLOR6_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR6_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR6_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR6_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR6_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR6_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR6_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR6_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR6_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR6_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR6_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR6_ATTRIB
+#define CB_COLOR6_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR6_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR6_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR6_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR6_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR6_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR6_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR6_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR6_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR6_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR6_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR6_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR6_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR6_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR6_DCC_CONTROL
+#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR6_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR6_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR6_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR6_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR6_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR6_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR6_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR6_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR6_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR6_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR6_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR6_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR6_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR6_CMASK
+#define CB_COLOR6_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_CMASK_BASE_EXT
+#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_FMASK
+#define CB_COLOR6_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_FMASK_BASE_EXT
+#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_CLEAR_WORD0
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR6_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR6_CLEAR_WORD1
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR6_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR6_DCC_BASE
+#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_DCC_BASE_EXT
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_BASE
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_BASE_EXT
+#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_ATTRIB2
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR7_VIEW
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x18
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x000007FFL
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x0F000000L
+//CB_COLOR7_INFO
+#define CB_COLOR7_INFO__ENDIAN__SHIFT 0x0
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x2
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR7_INFO__FAST_CLEAR__SHIFT 0xd
+#define CB_COLOR7_INFO__COMPRESSION__SHIFT 0xe
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE__SHIFT 0x1a
+#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY__SHIFT 0x1b
+#define CB_COLOR7_INFO__DCC_ENABLE__SHIFT 0x1c
+#define CB_COLOR7_INFO__CMASK_ADDR_TYPE__SHIFT 0x1d
+#define CB_COLOR7_INFO__ENDIAN_MASK 0x00000003L
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000007CL
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__FAST_CLEAR_MASK 0x00002000L
+#define CB_COLOR7_INFO__COMPRESSION_MASK 0x00004000L
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+#define CB_COLOR7_INFO__FMASK_COMPRESSION_DISABLE_MASK 0x04000000L
+#define CB_COLOR7_INFO__FMASK_COMPRESS_1FRAG_ONLY_MASK 0x08000000L
+#define CB_COLOR7_INFO__DCC_ENABLE_MASK 0x10000000L
+#define CB_COLOR7_INFO__CMASK_ADDR_TYPE_MASK 0x60000000L
+//CB_COLOR7_ATTRIB
+#define CB_COLOR7_ATTRIB__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR7_ATTRIB__META_LINEAR__SHIFT 0xb
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES__SHIFT 0xc
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0xf
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x11
+#define CB_COLOR7_ATTRIB__COLOR_SW_MODE__SHIFT 0x12
+#define CB_COLOR7_ATTRIB__FMASK_SW_MODE__SHIFT 0x17
+#define CB_COLOR7_ATTRIB__RESOURCE_TYPE__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB__RB_ALIGNED__SHIFT 0x1e
+#define CB_COLOR7_ATTRIB__PIPE_ALIGNED__SHIFT 0x1f
+#define CB_COLOR7_ATTRIB__MIP0_DEPTH_MASK 0x000007FFL
+#define CB_COLOR7_ATTRIB__META_LINEAR_MASK 0x00000800L
+#define CB_COLOR7_ATTRIB__NUM_SAMPLES_MASK 0x00007000L
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00018000L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00020000L
+#define CB_COLOR7_ATTRIB__COLOR_SW_MODE_MASK 0x007C0000L
+#define CB_COLOR7_ATTRIB__FMASK_SW_MODE_MASK 0x0F800000L
+#define CB_COLOR7_ATTRIB__RESOURCE_TYPE_MASK 0x30000000L
+#define CB_COLOR7_ATTRIB__RB_ALIGNED_MASK 0x40000000L
+#define CB_COLOR7_ATTRIB__PIPE_ALIGNED_MASK 0x80000000L
+//CB_COLOR7_DCC_CONTROL
+#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE__SHIFT 0x0
+#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE__SHIFT 0x1
+#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION__SHIFT 0xa
+#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION__SHIFT 0xe
+#define CB_COLOR7_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR7_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR7_DCC_CONTROL__OVERWRITE_COMBINER_DISABLE_MASK 0x00000001L
+#define CB_COLOR7_DCC_CONTROL__KEY_CLEAR_ENABLE_MASK 0x00000002L
+#define CB_COLOR7_DCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR7_DCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR7_DCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR7_DCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR7_DCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR7_DCC_CONTROL__LOSSY_RGB_PRECISION_MASK 0x00003C00L
+#define CB_COLOR7_DCC_CONTROL__LOSSY_ALPHA_PRECISION_MASK 0x0003C000L
+#define CB_COLOR7_DCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR7_DCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+//CB_COLOR7_CMASK
+#define CB_COLOR7_CMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_CMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_CMASK_BASE_EXT
+#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_CMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_FMASK
+#define CB_COLOR7_FMASK__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_FMASK__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_FMASK_BASE_EXT
+#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_FMASK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_CLEAR_WORD0
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0__SHIFT 0x0
+#define CB_COLOR7_CLEAR_WORD0__CLEAR_WORD0_MASK 0xFFFFFFFFL
+//CB_COLOR7_CLEAR_WORD1
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1__SHIFT 0x0
+#define CB_COLOR7_CLEAR_WORD1__CLEAR_WORD1_MASK 0xFFFFFFFFL
+//CB_COLOR7_DCC_BASE
+#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_DCC_BASE_EXT
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+
+
+// addressBlock: gc_gfxudec
+//CP_EOP_DONE_ADDR_LO
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_EOP_DONE_ADDR_HI
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_EOP_DONE_DATA_LO
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_EOP_DONE_DATA_HI
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_LO
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_HI
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
+//CP_STREAM_OUT_ADDR_LO
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO__SHIFT 0x2
+#define CP_STREAM_OUT_ADDR_LO__STREAM_OUT_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_STREAM_OUT_ADDR_HI
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI__SHIFT 0x0
+#define CP_STREAM_OUT_ADDR_HI__STREAM_OUT_ADDR_HI_MASK 0x0000FFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT0_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT0_LO__NUM_PRIM_WRITTEN_CNT0_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT0_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT0_HI__NUM_PRIM_WRITTEN_CNT0_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT0_LO
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT0_LO__NUM_PRIM_NEEDED_CNT0_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT0_HI
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT0_HI__NUM_PRIM_NEEDED_CNT0_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT1_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT1_LO__NUM_PRIM_WRITTEN_CNT1_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT1_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT1_HI__NUM_PRIM_WRITTEN_CNT1_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT1_LO
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT1_LO__NUM_PRIM_NEEDED_CNT1_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT1_HI
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT1_HI__NUM_PRIM_NEEDED_CNT1_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT2_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT2_LO__NUM_PRIM_WRITTEN_CNT2_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT2_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT2_HI__NUM_PRIM_WRITTEN_CNT2_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT2_LO
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT2_LO__NUM_PRIM_NEEDED_CNT2_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT2_HI
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT2_HI__NUM_PRIM_NEEDED_CNT2_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT3_LO
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT3_LO__NUM_PRIM_WRITTEN_CNT3_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_WRITTEN_COUNT3_HI
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI__SHIFT 0x0
+#define CP_NUM_PRIM_WRITTEN_COUNT3_HI__NUM_PRIM_WRITTEN_CNT3_HI_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT3_LO
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT3_LO__NUM_PRIM_NEEDED_CNT3_LO_MASK 0xFFFFFFFFL
+//CP_NUM_PRIM_NEEDED_COUNT3_HI
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI__SHIFT 0x0
+#define CP_NUM_PRIM_NEEDED_COUNT3_HI__NUM_PRIM_NEEDED_CNT3_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_ADDR_LO
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_PIPE_STATS_ADDR_HI
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
+//CP_VGT_IAVERT_COUNT_LO
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAVERT_COUNT_HI
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_LO
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_HI
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_LO
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_HI
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_LO
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_HI
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_LO
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_HI
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_LO
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_HI
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_LO
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_HI
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_LO
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_HI
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_LO
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_HI
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_LO
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_HI
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_LO
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_HI
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_LO
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_HI
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_CONTROL
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x02000000L
+//CP_STREAM_OUT_CONTROL
+#define CP_STREAM_OUT_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_STREAM_OUT_CONTROL__CACHE_POLICY_MASK 0x02000000L
+//CP_STRMOUT_CNTL
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE__SHIFT 0x0
+#define CP_STRMOUT_CNTL__OFFSET_UPDATE_DONE_MASK 0x00000001L
+//SCRATCH_REG0
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//SCRATCH_REG1
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//SCRATCH_REG2
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//SCRATCH_REG3
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//SCRATCH_REG4
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//SCRATCH_REG5
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//SCRATCH_REG6
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//SCRATCH_REG7
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//CP_APPEND_DATA_HI
+#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_HI
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_HI
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//SCRATCH_UMSK
+#define SCRATCH_UMSK__OBSOLETE_UMSK__SHIFT 0x0
+#define SCRATCH_UMSK__OBSOLETE_SWAP__SHIFT 0x10
+#define SCRATCH_UMSK__OBSOLETE_UMSK_MASK 0x000000FFL
+#define SCRATCH_UMSK__OBSOLETE_SWAP_MASK 0x00030000L
+//SCRATCH_ADDR
+#define SCRATCH_ADDR__OBSOLETE_ADDR__SHIFT 0x0
+#define SCRATCH_ADDR__OBSOLETE_ADDR_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_LO
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_HI
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_LO
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_HI
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_LO
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_HI
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_APPEND_ADDR_LO
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_ADDR_HI
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
+#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00010000L
+#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x02000000L
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
+//CP_APPEND_DATA_LO
+#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_LO
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_LO
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_LO
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_LO
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_HI
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_HI
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_LO
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_LO
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_HI
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_HI
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_LO
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_LO
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_HI
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_HI
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_WADDR_LO
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_WADDR_HI
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00400000L
+//CP_ME_MC_WDATA_LO
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
+//CP_ME_MC_WDATA_HI
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_RADDR_LO
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_RADDR_HI
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00400000L
+//CP_SEM_WAIT_TIMER
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
+//CP_SIG_SEM_ADDR_LO
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_SIG_SEM_ADDR_HI
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_WAIT_REG_MEM_TIMEOUT
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
+//CP_WAIT_SEM_ADDR_LO
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_SWAP_MASK 0x00000003L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_WAIT_SEM_ADDR_HI
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_DMA_PFP_CONTROL
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
+#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
+#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_CONTROL
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00002000L
+#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x02000000L
+#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_COHER_BASE_HI
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_COHER_START_DELAY
+#define CP_COHER_START_DELAY__START_DELAY_COUNT__SHIFT 0x0
+#define CP_COHER_START_DELAY__START_DELAY_COUNT_MASK 0x0000003FL
+//CP_COHER_CNTL
+#define CP_COHER_CNTL__TC_NC_ACTION_ENA__SHIFT 0x3
+#define CP_COHER_CNTL__TC_WC_ACTION_ENA__SHIFT 0x4
+#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA__SHIFT 0x5
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA__SHIFT 0xf
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA__SHIFT 0x12
+#define CP_COHER_CNTL__TCL1_ACTION_ENA__SHIFT 0x16
+#define CP_COHER_CNTL__TC_ACTION_ENA__SHIFT 0x17
+#define CP_COHER_CNTL__CB_ACTION_ENA__SHIFT 0x19
+#define CP_COHER_CNTL__DB_ACTION_ENA__SHIFT 0x1a
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA__SHIFT 0x1b
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA__SHIFT 0x1c
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA__SHIFT 0x1d
+#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA__SHIFT 0x1e
+#define CP_COHER_CNTL__TC_NC_ACTION_ENA_MASK 0x00000008L
+#define CP_COHER_CNTL__TC_WC_ACTION_ENA_MASK 0x00000010L
+#define CP_COHER_CNTL__TC_INV_METADATA_ACTION_ENA_MASK 0x00000020L
+#define CP_COHER_CNTL__TCL1_VOL_ACTION_ENA_MASK 0x00008000L
+#define CP_COHER_CNTL__TC_WB_ACTION_ENA_MASK 0x00040000L
+#define CP_COHER_CNTL__TCL1_ACTION_ENA_MASK 0x00400000L
+#define CP_COHER_CNTL__TC_ACTION_ENA_MASK 0x00800000L
+#define CP_COHER_CNTL__CB_ACTION_ENA_MASK 0x02000000L
+#define CP_COHER_CNTL__DB_ACTION_ENA_MASK 0x04000000L
+#define CP_COHER_CNTL__SH_KCACHE_ACTION_ENA_MASK 0x08000000L
+#define CP_COHER_CNTL__SH_KCACHE_VOL_ACTION_ENA_MASK 0x10000000L
+#define CP_COHER_CNTL__SH_ICACHE_ACTION_ENA_MASK 0x20000000L
+#define CP_COHER_CNTL__SH_KCACHE_WB_ACTION_ENA_MASK 0x40000000L
+//CP_COHER_SIZE
+#define CP_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_COHER_BASE
+#define CP_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_COHER_STATUS
+#define CP_COHER_STATUS__MEID__SHIFT 0x18
+#define CP_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_COHER_STATUS__MEID_MASK 0x03000000L
+#define CP_COHER_STATUS__STATUS_MASK 0x80000000L
+//CP_DMA_ME_SRC_ADDR
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_SRC_ADDR_HI
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_DST_ADDR
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_DST_ADDR_HI
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_COMMAND
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_PFP_SRC_ADDR
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_SRC_ADDR_HI
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_DST_ADDR
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_DST_ADDR_HI
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_COMMAND
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_CNTL
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x000F0000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
+//CP_DMA_READ_TAGS
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+//CP_COHER_SIZE_HI
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_PFP_IB_CONTROL
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
+//CP_PFP_LOAD_CONTROL
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+//CP_SCRATCH_INDEX
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000000FFL
+//CP_SCRATCH_DATA
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_RB_OFFSET
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_OFFSET
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_IB2_OFFSET
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_BEGIN
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_END
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_BEGIN
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_END
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_CE_IB1_OFFSET
+#define CP_CE_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_CE_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_CE_IB2_OFFSET
+#define CP_CE_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_CE_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_CE_COUNTER
+#define CP_CE_COUNTER__CONST_ENGINE_COUNT__SHIFT 0x0
+#define CP_CE_COUNTER__CONST_ENGINE_COUNT_MASK 0xFFFFFFFFL
+//CP_CE_RB_OFFSET
+#define CP_CE_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_CE_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_CE_INIT_CMD_BUFSZ
+#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_INIT_CMD_BUFSZ__INIT_CMD_REQSZ_MASK 0x00000FFFL
+//CP_CE_IB1_CMD_BUFSZ
+#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_CE_IB2_CMD_BUFSZ
+#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_CE_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB1_CMD_BUFSZ
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB2_CMD_BUFSZ
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_ST_CMD_BUFSZ
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_CE_INIT_BASE_LO
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO__SHIFT 0x5
+#define CP_CE_INIT_BASE_LO__INIT_BASE_LO_MASK 0xFFFFFFE0L
+//CP_CE_INIT_BASE_HI
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI__SHIFT 0x0
+#define CP_CE_INIT_BASE_HI__INIT_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_INIT_BUFSZ
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ__SHIFT 0x0
+#define CP_CE_INIT_BUFSZ__INIT_BUFSZ_MASK 0x00000FFFL
+//CP_CE_IB1_BASE_LO
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_CE_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_CE_IB1_BASE_HI
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_CE_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_IB1_BUFSZ
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_CE_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_CE_IB2_BASE_LO
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_CE_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_CE_IB2_BASE_HI
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_CE_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_CE_IB2_BUFSZ
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_IB2_BASE_LO
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB2_BASE_HI
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_IB2_BUFSZ
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_ST_BASE_LO
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
+//CP_ST_BASE_HI
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
+//CP_ST_BUFSZ
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
+//CP_EOP_DONE_EVENT_CNTL
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP__SHIFT 0x0
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA__SHIFT 0xc
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_TC_OP_MASK 0x0000007FL
+#define CP_EOP_DONE_EVENT_CNTL__WBINV_ACTION_ENA_MASK 0x0003F000L
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x02000000L
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
+//CP_EOP_DONE_DATA_CNTL
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
+//CP_EOP_DONE_CNTX_ID
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PFP_COMPLETION_STATUS
+#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_CE_COMPLETION_STATUS
+#define CP_CE_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_CE_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_PRED_NOT_VISIBLE
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
+//CP_PFP_METADATA_BASE_ADDR
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_PFP_METADATA_BASE_ADDR_HI
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_CE_METADATA_BASE_ADDR
+#define CP_CE_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_CE_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_CE_METADATA_BASE_ADDR_HI
+#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_CE_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DRAW_INDX_INDR_ADDR
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DRAW_INDX_INDR_ADDR_HI
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DISPATCH_INDR_ADDR
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DISPATCH_INDR_ADDR_HI
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_BASE_ADDR
+#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_INDEX_BASE_ADDR_HI
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_TYPE
+#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+//CP_GDS_BKUP_ADDR
+#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_GDS_BKUP_ADDR_HI
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_SAMPLE_STATUS
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
+//CP_ME_COHER_CNTL
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+//CP_ME_COHER_SIZE
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_SIZE_HI
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_BASE
+#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_BASE_HI
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_STATUS
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
+#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
+#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
+//RLC_GPM_PERF_COUNT_0
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_0__SH_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_0__CU_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_0__SH_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_0__CU_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_PERF_COUNT_1
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_1__SH_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_1__CU_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_1__SH_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_1__CU_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+//VGT_GSVS_RING_SIZE
+#define VGT_GSVS_RING_SIZE__MEM_SIZE__SHIFT 0x0
+#define VGT_GSVS_RING_SIZE__MEM_SIZE_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVE_TYPE
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_INDEX_TYPE
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_INDEX_TYPE__PRIMGEN_EN__SHIFT 0x8
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__PRIMGEN_EN_MASK 0x00000100L
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_0__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_1
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_1__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_2
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_2__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_BUFFER_FILLED_SIZE_3
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_BUFFER_FILLED_SIZE_3__SIZE_MASK 0xFFFFFFFFL
+//VGT_MAX_VTX_INDX
+#define VGT_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
+#define VGT_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
+//VGT_MIN_VTX_INDX
+#define VGT_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
+#define VGT_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
+//VGT_INDX_OFFSET
+#define VGT_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
+#define VGT_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
+//VGT_MULTI_PRIM_IB_RESET_EN
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
+#define VGT_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define VGT_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
+//VGT_NUM_INDICES
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_NUM_INSTANCES
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_TF_RING_SIZE
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0000FFFFL
+//VGT_HS_OFFCHIP_PARAM
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0x9
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000001FFL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000600L
+//VGT_TF_MEMORY_BASE
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
+//VGT_TF_MEMORY_BASE_HI
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_POS_BUF_BASE
+#define WD_POS_BUF_BASE__BASE__SHIFT 0x0
+#define WD_POS_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_POS_BUF_BASE_HI
+#define WD_POS_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_POS_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_CNTL_SB_BUF_BASE
+#define WD_CNTL_SB_BUF_BASE__BASE__SHIFT 0x0
+#define WD_CNTL_SB_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_CNTL_SB_BUF_BASE_HI
+#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_CNTL_SB_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//WD_INDEX_BUF_BASE
+#define WD_INDEX_BUF_BASE__BASE__SHIFT 0x0
+#define WD_INDEX_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//WD_INDEX_BUF_BASE_HI
+#define WD_INDEX_BUF_BASE_HI__BASE_HI__SHIFT 0x0
+#define WD_INDEX_BUF_BASE_HI__BASE_HI_MASK 0x000000FFL
+//IA_MULTI_VGT_PARAM
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE__SHIFT 0x0
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON__SHIFT 0x10
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP__SHIFT 0x11
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON__SHIFT 0x12
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI__SHIFT 0x13
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP__SHIFT 0x14
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC__SHIFT 0x15
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV__SHIFT 0x16
+#define IA_MULTI_VGT_PARAM__HW_USE_ONLY__SHIFT 0x17
+#define IA_MULTI_VGT_PARAM__PRIMGROUP_SIZE_MASK 0x0000FFFFL
+#define IA_MULTI_VGT_PARAM__PARTIAL_VS_WAVE_ON_MASK 0x00010000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOP_MASK 0x00020000L
+#define IA_MULTI_VGT_PARAM__PARTIAL_ES_WAVE_ON_MASK 0x00040000L
+#define IA_MULTI_VGT_PARAM__SWITCH_ON_EOI_MASK 0x00080000L
+#define IA_MULTI_VGT_PARAM__WD_SWITCH_ON_EOP_MASK 0x00100000L
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
+#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
+#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
+//VGT_INSTANCE_BASE_ID
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
+//PA_SU_LINE_STIPPLE_VALUE
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
+//PA_SC_LINE_STIPPLE_STATE
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
+//PA_SC_SCREEN_EXTENT_MIN_0
+#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_0
+#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MIN_1
+#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_1
+#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
+//PA_SC_P3D_TRAP_SCREEN_HV_EN
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_P3D_TRAP_SCREEN_H
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_V
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_P3D_TRAP_SCREEN_COUNT
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_HV_EN
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_HP3D_TRAP_SCREEN_H
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_V
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_COUNT
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_HV_EN
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_TRAP_SCREEN_H
+#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_V
+#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_COUNT
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_STATE_STEREO_X
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BASE
+#define SQ_THREAD_TRACE_BASE__ADDR__SHIFT 0x0
+#define SQ_THREAD_TRACE_BASE__ADDR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_SIZE
+#define SQ_THREAD_TRACE_SIZE__SIZE__SHIFT 0x0
+#define SQ_THREAD_TRACE_SIZE__SIZE_MASK 0x003FFFFFL
+//SQ_THREAD_TRACE_MASK
+#define SQ_THREAD_TRACE_MASK__CU_SEL__SHIFT 0x0
+#define SQ_THREAD_TRACE_MASK__SH_SEL__SHIFT 0x5
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN__SHIFT 0x7
+#define SQ_THREAD_TRACE_MASK__SIMD_EN__SHIFT 0x8
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK__SHIFT 0xc
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN__SHIFT 0xe
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN__SHIFT 0xf
+#define SQ_THREAD_TRACE_MASK__CU_SEL_MASK 0x0000001FL
+#define SQ_THREAD_TRACE_MASK__SH_SEL_MASK 0x00000020L
+#define SQ_THREAD_TRACE_MASK__REG_STALL_EN_MASK 0x00000080L
+#define SQ_THREAD_TRACE_MASK__SIMD_EN_MASK 0x00000F00L
+#define SQ_THREAD_TRACE_MASK__VM_ID_MASK_MASK 0x00003000L
+#define SQ_THREAD_TRACE_MASK__SPI_STALL_EN_MASK 0x00004000L
+#define SQ_THREAD_TRACE_MASK__SQ_STALL_EN_MASK 0x00008000L
+//SQ_THREAD_TRACE_TOKEN_MASK
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK__SHIFT 0x10
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL__SHIFT 0x18
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_MASK_MASK 0x0000FFFFL
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_MASK_MASK 0x00FF0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DROP_ON_STALL_MASK 0x01000000L
+//SQ_THREAD_TRACE_PERF_MASK
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK__SHIFT 0x10
+#define SQ_THREAD_TRACE_PERF_MASK__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_THREAD_TRACE_PERF_MASK__SH1_MASK_MASK 0xFFFF0000L
+//SQ_THREAD_TRACE_CTRL
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER__SHIFT 0x1f
+#define SQ_THREAD_TRACE_CTRL__RESET_BUFFER_MASK 0x80000000L
+//SQ_THREAD_TRACE_MODE
+#define SQ_THREAD_TRACE_MODE__MASK_PS__SHIFT 0x0
+#define SQ_THREAD_TRACE_MODE__MASK_VS__SHIFT 0x3
+#define SQ_THREAD_TRACE_MODE__MASK_GS__SHIFT 0x6
+#define SQ_THREAD_TRACE_MODE__MASK_ES__SHIFT 0x9
+#define SQ_THREAD_TRACE_MODE__MASK_HS__SHIFT 0xc
+#define SQ_THREAD_TRACE_MODE__MASK_LS__SHIFT 0xf
+#define SQ_THREAD_TRACE_MODE__MASK_CS__SHIFT 0x12
+#define SQ_THREAD_TRACE_MODE__MODE__SHIFT 0x15
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE__SHIFT 0x17
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN__SHIFT 0x19
+#define SQ_THREAD_TRACE_MODE__TC_PERF_EN__SHIFT 0x1a
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK__SHIFT 0x1b
+#define SQ_THREAD_TRACE_MODE__TEST_MODE__SHIFT 0x1d
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN__SHIFT 0x1e
+#define SQ_THREAD_TRACE_MODE__WRAP__SHIFT 0x1f
+#define SQ_THREAD_TRACE_MODE__MASK_PS_MASK 0x00000007L
+#define SQ_THREAD_TRACE_MODE__MASK_VS_MASK 0x00000038L
+#define SQ_THREAD_TRACE_MODE__MASK_GS_MASK 0x000001C0L
+#define SQ_THREAD_TRACE_MODE__MASK_ES_MASK 0x00000E00L
+#define SQ_THREAD_TRACE_MODE__MASK_HS_MASK 0x00007000L
+#define SQ_THREAD_TRACE_MODE__MASK_LS_MASK 0x00038000L
+#define SQ_THREAD_TRACE_MODE__MASK_CS_MASK 0x001C0000L
+#define SQ_THREAD_TRACE_MODE__MODE_MASK 0x00600000L
+#define SQ_THREAD_TRACE_MODE__CAPTURE_MODE_MASK 0x01800000L
+#define SQ_THREAD_TRACE_MODE__AUTOFLUSH_EN_MASK 0x02000000L
+#define SQ_THREAD_TRACE_MODE__TC_PERF_EN_MASK 0x04000000L
+#define SQ_THREAD_TRACE_MODE__ISSUE_MASK_MASK 0x18000000L
+#define SQ_THREAD_TRACE_MODE__TEST_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_MODE__INTERRUPT_EN_MASK 0x40000000L
+#define SQ_THREAD_TRACE_MODE__WRAP_MASK 0x80000000L
+//SQ_THREAD_TRACE_BASE2
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BASE2__ADDR_HI_MASK 0x0000000FL
+//SQ_THREAD_TRACE_TOKEN_MASK2
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK2__INST_MASK_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_WPTR
+#define SQ_THREAD_TRACE_WPTR__WPTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET__SHIFT 0x1e
+#define SQ_THREAD_TRACE_WPTR__WPTR_MASK 0x3FFFFFFFL
+#define SQ_THREAD_TRACE_WPTR__READ_OFFSET_MASK 0xC0000000L
+//SQ_THREAD_TRACE_STATUS
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0x10
+#define SQ_THREAD_TRACE_STATUS__UTC_ERROR__SHIFT 0x1c
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF__SHIFT 0x1d
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x1e
+#define SQ_THREAD_TRACE_STATUS__FULL__SHIFT 0x1f
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x000003FFL
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x03FF0000L
+#define SQ_THREAD_TRACE_STATUS__UTC_ERROR_MASK 0x10000000L
+#define SQ_THREAD_TRACE_STATUS__NEW_BUF_MASK 0x20000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x40000000L
+#define SQ_THREAD_TRACE_STATUS__FULL_MASK 0x80000000L
+//SQ_THREAD_TRACE_HIWATER
+#define SQ_THREAD_TRACE_HIWATER__HIWATER__SHIFT 0x0
+#define SQ_THREAD_TRACE_HIWATER__HIWATER_MASK 0x00000007L
+//SQ_THREAD_TRACE_CNTR
+#define SQ_THREAD_TRACE_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_1
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_2
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_3
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
+//SQC_CACHES
+#define SQC_CACHES__TARGET_INST__SHIFT 0x0
+#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
+#define SQC_CACHES__INVALIDATE__SHIFT 0x2
+#define SQC_CACHES__WRITEBACK__SHIFT 0x3
+#define SQC_CACHES__VOL__SHIFT 0x4
+#define SQC_CACHES__COMPLETE__SHIFT 0x10
+#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
+#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
+#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
+#define SQC_CACHES__WRITEBACK_MASK 0x00000008L
+#define SQC_CACHES__VOL_MASK 0x00000010L
+#define SQC_CACHES__COMPLETE_MASK 0x00010000L
+//SQC_WRITEBACK
+#define SQC_WRITEBACK__DWB__SHIFT 0x0
+#define SQC_WRITEBACK__DIRTY__SHIFT 0x1
+#define SQC_WRITEBACK__DWB_MASK 0x00000001L
+#define SQC_WRITEBACK__DIRTY_MASK 0x00000002L
+//TA_CS_BC_BASE_ADDR
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_CS_BC_BASE_ADDR_HI
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//DB_OCCLUSION_COUNT0_LOW
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT0_HI
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT1_LOW
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT1_HI
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT2_LOW
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT2_HI
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT3_LOW
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT3_HI
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_ZPASS_COUNT_LOW
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_ZPASS_COUNT_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_ZPASS_COUNT_HI
+#define DB_ZPASS_COUNT_HI__COUNT_HI__SHIFT 0x0
+#define DB_ZPASS_COUNT_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//GDS_RD_ADDR
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_DATA
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
+#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_ADDR
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_COUNT
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_DATA
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_ADDR
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_DATA
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_ADDR
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_DATA
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WRITE_COMPLETE
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
+//GDS_ATOM_CNTL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
+//GDS_ATOM_COMPLETE
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
+//GDS_ATOM_BASE
+#define GDS_ATOM_BASE__BASE__SHIFT 0x0
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0x10
+#define GDS_ATOM_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_ATOM_SIZE
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0x10
+#define GDS_ATOM_SIZE__SIZE_MASK 0x0000FFFFL
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFF0000L
+//GDS_ATOM_OFFSET0
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_OFFSET1
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_DST
+#define GDS_ATOM_DST__DST__SHIFT 0x0
+#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
+//GDS_ATOM_OP
+#define GDS_ATOM_OP__OP__SHIFT 0x0
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OP__OP_MASK 0x000000FFL
+#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_SRC0
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC0_U
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1_U
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0
+#define GDS_ATOM_READ0__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0_U
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1
+#define GDS_ATOM_READ1__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1_U
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_GWS_RESOURCE_CNTL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GWS_RESOURCE
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
+#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1c
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1d
+#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1e
+#define GDS_GWS_RESOURCE__UNUSED1__SHIFT 0x1f
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x0FFF0000L
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x10000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x20000000L
+#define GDS_GWS_RESOURCE__HALTED_MASK 0x40000000L
+#define GDS_GWS_RESOURCE__UNUSED1_MASK 0x80000000L
+//GDS_GWS_RESOURCE_CNT
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_CNTL
+#define GDS_OA_CNTL__INDEX__SHIFT 0x0
+#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
+#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
+#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
+//GDS_OA_COUNTER
+#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
+#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
+//GDS_OA_ADDRESS
+#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
+#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x10
+#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x14
+#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x16
+#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
+#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
+#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
+#define GDS_OA_ADDRESS__CRAWLER_MASK 0x000F0000L
+#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x00300000L
+#define GDS_OA_ADDRESS__UNUSED_MASK 0x3FC00000L
+#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
+#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
+//GDS_OA_INCDEC
+#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
+#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
+#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
+#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
+//GDS_OA_RING_SIZE
+#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
+#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
+//SPI_CONFIG_CNTL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET__SHIFT 0x1a
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL__SHIFT 0x1b
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__RSRC_MGMT_RESET_MASK 0x04000000L
+#define SPI_CONFIG_CNTL__TTRACE_STALL_ALL_MASK 0x08000000L
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
+//SPI_CONFIG_CNTL_1
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
+#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE__SHIFT 0x5
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x6
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE__SHIFT 0x9
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE__SHIFT 0x10
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__BATON_RESET_DISABLE_MASK 0x00000020L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000040L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__CRC_SIMD_ID_WADDR_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_MODE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_SIZE_MASK 0xFFFF0000L
+//SPI_CONFIG_CNTL_2
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
+//SPI_WAVE_LIMIT_CNTL
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN__SHIFT 0x0
+#define SPI_WAVE_LIMIT_CNTL__VS_WAVE_GRAN__SHIFT 0x2
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN__SHIFT 0x4
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN__SHIFT 0x6
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN_MASK 0x00000003L
+#define SPI_WAVE_LIMIT_CNTL__VS_WAVE_GRAN_MASK 0x0000000CL
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+
+
+// addressBlock: gc_perfddec
+//CPG_PERFCOUNTER1_LO
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER1_HI
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_LO
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_HI
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_LO
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_HI
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_LO
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_HI
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_LO
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_HI
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_LO
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_HI
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_LATENCY_STATS_DATA
+#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_LATENCY_STATS_DATA
+#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPC_LATENCY_STATS_DATA
+#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_LO
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_HI
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_LO
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_HI
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_LO
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_HI
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_LO
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_HI
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_LO
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_HI
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_LO
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_HI
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER0_LO
+#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER0_HI
+#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER1_LO
+#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER1_HI
+#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER2_LO
+#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER2_HI
+#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER3_LO
+#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define WD_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//WD_PERFCOUNTER3_HI
+#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define WD_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER0_LO
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER0_HI
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER1_LO
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER1_HI
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER2_LO
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER2_HI
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER3_LO
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define IA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//IA_PERFCOUNTER3_HI
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define IA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER0_LO
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER0_HI
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER1_LO
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER1_HI
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER2_LO
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER2_HI
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER3_LO
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define VGT_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//VGT_PERFCOUNTER3_HI
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define VGT_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_LO
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_HI
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER1_LO
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_HI
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER2_LO
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_HI
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SU_PERFCOUNTER3_LO
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_HI
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0x0000FFFFL
+//PA_SC_PERFCOUNTER0_LO
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_HI
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_LO
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_HI
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_LO
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_HI
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_LO
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_HI
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_LO
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_HI
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_LO
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_HI
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_LO
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_HI
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_LO
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_HI
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_HI
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_LO
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_HI
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_LO
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_HI
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_LO
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_HI
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_LO
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_HI
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_LO
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_HI
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_LO
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_LO
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_HI
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_LO
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_HI
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_LO
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_HI
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_LO
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_HI
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_LO
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_HI
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_LO
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_HI
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_LO
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_HI
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_LO
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_HI
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER8_LO
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER8_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER8_HI
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER8_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER9_LO
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER9_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER9_HI
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER9_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER10_LO
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER10_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER10_HI
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER10_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER11_LO
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER11_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER11_HI
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER11_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER12_LO
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER12_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER12_HI
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER12_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER13_LO
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER13_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER13_HI
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER13_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER14_LO
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER14_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER14_HI
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER14_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER15_LO
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER15_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER15_HI
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQ_PERFCOUNTER15_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_LO
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_HI
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_LO
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_HI
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_LO
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_HI
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_LO
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_HI
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_LO
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_HI
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_LO
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_HI
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_LO
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_HI
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_LO
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_HI
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_LO
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_HI
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_LO
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_HI
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_LO
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_HI
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_LO
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_HI
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_LO
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_HI
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_LO
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_HI
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_LO
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_HI
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_LO
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_HI
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER0_LO
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER0_HI
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER1_LO
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER1_HI
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER2_LO
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER2_HI
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER3_LO
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCC_PERFCOUNTER3_HI
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER0_LO
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER0_HI
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER1_LO
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER1_HI
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER2_LO
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER2_HI
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER3_LO
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCA_PERFCOUNTER3_HI
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_LO
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_HI
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_LO
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_HI
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_LO
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_HI
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_LO
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_HI
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_LO
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_HI
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_LO
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_HI
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_LO
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_HI
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_LO
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_HI
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_LO
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_HI
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_LO
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_HI
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_LO
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_HI
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_LO
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_HI
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_LO
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_HI
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_LO
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_HI
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_utcl2_atcl2pfcntrdec
+//ATC_L2_PERFCOUNTER_LO
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_L2_PERFCOUNTER_HI
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_utcl2_vml2prdec
+//MC_VM_L2_PERFCOUNTER_LO
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_L2_PERFCOUNTER_HI
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_perfsdec
+//CPG_PERFCOUNTER1_SELECT
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPG_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT1
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER1_SELECT
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPC_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER0_SELECT1
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER1_SELECT
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPF_PERFCOUNTER1_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT1
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL2_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_SEL3_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CP_PERFMON_CNTL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//CPC_PERFCOUNTER0_SELECT
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL0_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT__CNTR_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPG_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPF_LATENCY_STATS_SELECT
+#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
+#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPG_LATENCY_STATS_SELECT
+#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_LATENCY_STATS_SELECT
+#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x00000007L
+#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CP_DRAW_OBJECT
+#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
+#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
+//CP_DRAW_OBJECT_COUNTER
+#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
+#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
+//CP_DRAW_WINDOW_MASK_HI
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_HI
+#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_LO
+#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
+#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
+#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
+#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
+//CP_DRAW_WINDOW_CNTL
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
+#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
+#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
+//GRBM_PERFCOUNTER0_SELECT
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_PERFCOUNTER1_SELECT
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__IA_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__TC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__WD_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_SE0_PERFCOUNTER_SELECT
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE1_PERFCOUNTER_SELECT
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE2_PERFCOUNTER_SELECT
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//GRBM_SE3_PERFCOUNTER_SELECT
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__VGT_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+//WD_PERFCOUNTER0_SELECT
+#define WD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER1_SELECT
+#define WD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER2_SELECT
+#define WD_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//WD_PERFCOUNTER3_SELECT
+#define WD_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define WD_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define WD_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define WD_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER0_SELECT
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define IA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define IA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define IA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER1_SELECT
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER2_SELECT
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER3_SELECT
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define IA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define IA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//IA_PERFCOUNTER0_SELECT1
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define IA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define IA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER0_SELECT
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define VGT_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define VGT_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER1_SELECT
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define VGT_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define VGT_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER2_SELECT
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000000FFL
+#define VGT_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER3_SELECT
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define VGT_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000000FFL
+#define VGT_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//VGT_PERFCOUNTER0_SELECT1
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define VGT_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define VGT_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER1_SELECT1
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define VGT_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define VGT_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//VGT_PERFCOUNTER_SEID_MASK
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK__SHIFT 0x0
+#define VGT_PERFCOUNTER_SEID_MASK__PERF_SEID_IGNORE_MASK_MASK 0x000000FFL
+//PA_SU_PERFCOUNTER0_SELECT
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT1
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT1
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT1
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER1_SELECT
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER2_SELECT
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER3_SELECT
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER4_SELECT
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER5_SELECT
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER6_SELECT
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER7_SELECT
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER0_SELECT
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER0_SELECT1
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT1
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT1
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT1
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER4_SELECT
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000000FFL
+//SPI_PERFCOUNTER5_SELECT
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000000FFL
+//SPI_PERFCOUNTER_BINS
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
+//SQ_PERFCOUNTER0_SELECT
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER0_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER0_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER0_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER1_SELECT
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER1_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER1_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER1_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER2_SELECT
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER2_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER2_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER2_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER3_SELECT
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER3_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER3_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER3_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER4_SELECT
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER4_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER4_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER4_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER5_SELECT
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER5_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER5_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER5_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER6_SELECT
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER6_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER6_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER6_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER7_SELECT
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER7_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER7_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER7_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER8_SELECT
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER8_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER8_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER8_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER9_SELECT
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER9_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER9_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER9_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER10_SELECT
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER10_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER10_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER10_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER11_SELECT
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER11_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER11_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER11_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER12_SELECT
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER12_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER12_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER12_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER13_SELECT
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER13_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER13_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER13_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER14_SELECT
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER14_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER14_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER14_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER15_SELECT
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK__SHIFT 0xc
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK__SHIFT 0x18
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER15_SELECT__SQC_BANK_MASK_MASK 0x0000F000L
+#define SQ_PERFCOUNTER15_SELECT__SQC_CLIENT_MASK_MASK 0x000F0000L
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER15_SELECT__SIMD_MASK_MASK 0x0F000000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER_CTRL
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL__VS_EN__SHIFT 0x1
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQ_PERFCOUNTER_CTRL__ES_EN__SHIFT 0x3
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQ_PERFCOUNTER_CTRL__LS_EN__SHIFT 0x5
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE__SHIFT 0x8
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH__SHIFT 0xd
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__VS_EN_MASK 0x00000002L
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__ES_EN_MASK 0x00000008L
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__LS_EN_MASK 0x00000020L
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__CNTR_RATE_MASK 0x00001F00L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_FLUSH_MASK 0x00002000L
+//SQ_PERFCOUNTER_MASK
+#define SQ_PERFCOUNTER_MASK__SH0_MASK__SHIFT 0x0
+#define SQ_PERFCOUNTER_MASK__SH1_MASK__SHIFT 0x10
+#define SQ_PERFCOUNTER_MASK__SH0_MASK_MASK 0x0000FFFFL
+#define SQ_PERFCOUNTER_MASK__SH1_MASK_MASK 0xFFFF0000L
+//SQ_PERFCOUNTER_CTRL2
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+//SX_PERFCOUNTER0_SELECT
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER2_SELECT
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER3_SELECT
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER0_SELECT1
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT1
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT1
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT1
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER1_SELECT
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000000FFL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0003FC00L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT1
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000000FFL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0003FC00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TD_PERFCOUNTER1_SELECT
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000000FFL
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT1
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT1
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER2_SELECT
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER3_SELECT
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER0_SELECT
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER0_SELECT1
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCC_PERFCOUNTER1_SELECT
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER1_SELECT1
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCC_PERFCOUNTER2_SELECT
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCC_PERFCOUNTER3_SELECT
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER0_SELECT
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER0_SELECT1
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCA_PERFCOUNTER1_SELECT
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER1_SELECT1
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCA_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define TCA_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//TCA_PERFCOUNTER2_SELECT
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCA_PERFCOUNTER3_SELECT
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER_FILTER
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
+//CB_PERFCOUNTER0_SELECT
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER0_SELECT1
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//CB_PERFCOUNTER1_SELECT
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER2_SELECT
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER3_SELECT
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT1
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT1
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER2_SELECT
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER3_SELECT
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RLC_SPM_PERFMON_CNTL
+#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
+#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xe
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFFL
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
+#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x0000C000L
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_BASE_LO
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_RING_BASE_HI
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_SIZE
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_SEGMENT_SIZE
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1__SHIFT 0x8
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE__SHIFT 0xb
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE__SHIFT 0x10
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE__SHIFT 0x15
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE__SHIFT 0x1a
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED__SHIFT 0x1f
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__PERFMON_SEGMENT_SIZE_MASK 0x000000FFL
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED1_MASK 0x00000700L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_LINE_MASK 0x0000F800L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE0_NUM_LINE_MASK 0x001F0000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE1_NUM_LINE_MASK 0x03E00000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE2_NUM_LINE_MASK 0x7C000000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__RESERVED_MASK 0x80000000L
+//RLC_SPM_SE_MUXSEL_ADDR
+#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_MUXSEL_DATA
+#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_CPG_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CPC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CPF_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CPF_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_CB_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_CB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_DB_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_DB_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_PA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_GDS_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_GDS_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_IA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_IA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCC_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCC_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TCP_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TCP_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TA_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TA_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_TD_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_TD_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_VGT_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_VGT_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SPI_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SPI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SQG_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SQG_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_SX_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_SX_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_GLOBAL_MUXSEL_ADDR
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__PERFMON_SEL_ADDR_MASK 0xFFFFFFFFL
+//RLC_SPM_GLOBAL_MUXSEL_DATA
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__PERFMON_SEL_DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RING_RDPTR
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
+//RLC_SPM_SEGMENT_THRESHOLD
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0xFFFFFFFFL
+//RLC_SPM_RMI_PERFMON_SAMPLE_DELAY
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED__SHIFT 0x8
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__PERFMON_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_RMI_PERFMON_SAMPLE_DELAY__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PERFMON_SAMPLE_DELAY_MAX
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__PERFMON_MAX_SAMPLE_DELAY__SHIFT 0x0
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__RESERVED__SHIFT 0x8
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__PERFMON_MAX_SAMPLE_DELAY_MASK 0x000000FFL
+#define RLC_SPM_PERFMON_SAMPLE_DELAY_MAX__RESERVED_MASK 0xFFFFFF00L
+//RLC_PERFMON_CLK_CNTL_UCODE
+#define RLC_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE__SHIFT 0x0
+#define RLC_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE_MASK 0x00000001L
+//RLC_PERFMON_CLK_CNTL
+#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE__SHIFT 0x0
+#define RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK 0x00000001L
+//RLC_PERFMON_CNTL
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//RLC_PERFCOUNTER0_SELECT
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
+//RLC_PERFCOUNTER1_SELECT
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x00FFL
+//RLC_GPU_IOV_PERF_CNT_CNTL
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
+//RLC_GPU_IOV_PERF_CNT_WR_ADDR
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_WR_DATA
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0x0000000FL
+//RLC_GPU_IOV_PERF_CNT_RD_ADDR
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_RD_DATA
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0x0000000FL
+//RMI_PERFCOUNTER0_SELECT
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER0_SELECT1
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER1_SELECT
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT1
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000001FFL
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x0007FC00L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER3_SELECT
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERF_COUNTER_CNTL
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
+
+
+// addressBlock: gc_utcl2_atcl2pfcntldec
+//ATC_L2_PERFCOUNTER0_CFG
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER1_CFG
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_utcl2_vml2pldec
+//MC_VM_L2_PERFCOUNTER0_CFG
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER1_CFG
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER2_CFG
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER3_CFG
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER4_CFG
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER5_CFG
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER6_CFG
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER7_CFG
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_rlcpdec
+//RLC_CNTL
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
+#define RLC_CNTL__RESERVED__SHIFT 0x4
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_STAT
+#define RLC_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x1
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x2
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x3
+#define RLC_STAT__MC_BUSY__SHIFT 0x4
+#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
+#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
+#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
+#define RLC_STAT__RESERVED__SHIFT 0x8
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000008L
+#define RLC_STAT__MC_BUSY_MASK 0x00000010L
+#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
+#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
+#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
+#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
+//RLC_SAFE_MODE
+#define RLC_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_MEM_SLP_CNTL
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x2
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x18
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x0000007CL
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFF000000L
+//SMU_RLC_RESPONSE
+#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
+#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_RLCV_SAFE_MODE
+#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SMU_SAFE_MODE
+#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCV_COMMAND
+#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
+#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
+#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
+#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
+//RLC_REFCLOCK_TIMESTAMP_LSB
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
+//RLC_REFCLOCK_TIMESTAMP_MSB
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_0
+#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_1
+#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_2
+#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_CTRL
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
+#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x4
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
+#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_LB_CNTR_MAX
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX__SHIFT 0x0
+#define RLC_LB_CNTR_MAX__LB_CNTR_MAX_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_STAT
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC__SHIFT 0xa
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC__SHIFT 0xb
+#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0xc
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC_MASK 0x00000400L
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC_MASK 0x00000800L
+#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFFFF000L
+//RLC_GPM_TIMER_INT_3
+#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_NONCU_MASTER_MASK_1
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1__SHIFT 0x0
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1__SHIFT 0x10
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1__SHIFT 0x11
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK__SHIFT 0x12
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1__SHIFT 0x13
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK__SHIFT 0x14
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK__SHIFT 0x15
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK__SHIFT 0x16
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK__SHIFT 0x17
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK__SHIFT 0x18
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED__SHIFT 0x19
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SE_MASTER_MASK_1_MASK 0x0000FFFFL
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_MASTER_MASK_1_MASK 0x00010000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__GC_GFX_MASTER_MASK_1_MASK 0x00020000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__TC0_1_MASTER_MASK_MASK 0x00040000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_1_MASK 0x00080000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE4_MASTER_MASK_MASK 0x00100000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE5_MASTER_MASK_MASK 0x00200000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE6_MASTER_MASK_MASK 0x00400000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__SPARE7_MASTER_MASK_MASK 0x00800000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__EA_1_MASTER_MASK_MASK 0x01000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK_1__RESERVED_MASK 0xFE000000L
+//RLC_SERDES_NONCU_MASTER_BUSY_1
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1__SHIFT 0x0
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1__SHIFT 0x10
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1__SHIFT 0x11
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1__SHIFT 0x12
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1__SHIFT 0x13
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY__SHIFT 0x14
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY__SHIFT 0x15
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY__SHIFT 0x16
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY__SHIFT 0x17
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY__SHIFT 0x18
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED__SHIFT 0x19
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SE_MASTER_BUSY_1_MASK 0x0000FFFFL
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_MASTER_BUSY_1_MASK 0x00010000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__GC_GFX_MASTER_BUSY_1_MASK 0x00020000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__TC0_MASTER_BUSY_1_MASK 0x00040000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_1_MASK 0x00080000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE4_MASTER_BUSY_MASK 0x00100000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE5_MASTER_BUSY_MASK 0x00200000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE6_MASTER_BUSY_MASK 0x00400000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__SPARE7_MASTER_BUSY_MASK 0x00800000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__EA_1_MASTER_BUSY_MASK 0x01000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY_1__RESERVED_MASK 0xFE000000L
+//RLC_INT_STAT
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
+#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
+#define RLC_INT_STAT__RESERVED__SHIFT 0x9
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
+#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
+#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
+//RLC_LB_CNTL
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE__SHIFT 0x0
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY__SHIFT 0x1
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE__SHIFT 0x2
+#define RLC_LB_CNTL__LB_CNT_REG_INC__SHIFT 0x3
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST__SHIFT 0x4
+#define RLC_LB_CNTL__RESERVED__SHIFT 0xc
+#define RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK 0x00000001L
+#define RLC_LB_CNTL__LB_CNT_CP_BUSY_MASK 0x00000002L
+#define RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK 0x00000004L
+#define RLC_LB_CNTL__LB_CNT_REG_INC_MASK 0x00000008L
+#define RLC_LB_CNTL__CU_MASK_USED_OFF_HYST_MASK 0x00000FF0L
+#define RLC_LB_CNTL__RESERVED_MASK 0xFFFFF000L
+//RLC_MGCG_CTRL
+#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
+#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
+#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
+#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
+#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL__SHIFT 0xf
+#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x10
+#define RLC_MGCG_CTRL__SPARE__SHIFT 0x11
+#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
+#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
+#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
+#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
+#define RLC_MGCG_CTRL__GC_CAC_MGCG_CLK_CNTL_MASK 0x00008000L
+#define RLC_MGCG_CTRL__SE_CAC_MGCG_CLK_CNTL_MASK 0x00010000L
+#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFE0000L
+//RLC_LB_CNTR_INIT
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT__SHIFT 0x0
+#define RLC_LB_CNTR_INIT__LB_CNTR_INIT_MASK 0xFFFFFFFFL
+//RLC_LOAD_BALANCE_CNTR
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR__SHIFT 0x0
+#define RLC_LOAD_BALANCE_CNTR__RLC_LOAD_BALANCE_CNTR_MASK 0xFFFFFFFFL
+//RLC_JUMP_TABLE_RESTORE
+#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
+#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_2
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE__SHIFT 0x10
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY_2__PERCU_TIMEOUT_VALUE_MASK 0xFFFF0000L
+//RLC_GPU_CLOCK_COUNT_LSB
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_UCODE_CNTL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
+//RLC_GPM_THREAD_RESET
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
+#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
+#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPM_CP_DMA_COMPLETE_T0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_CP_DMA_COMPLETE_T1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_FIREWALL_VIOLATION
+#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
+#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_LSB
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_MSB
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_LSB
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_MSB
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_CTRL
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN__SHIFT 0x0
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET__SHIFT 0x1
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE__SHIFT 0x2
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN__SHIFT 0x3
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET__SHIFT 0x4
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE__SHIFT 0x5
+#define RLC_CLK_COUNT_CTRL__RESERVED__SHIFT 0x6
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN_MASK 0x00000001L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET_MASK 0x00000002L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE_MASK 0x00000004L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN_MASK 0x00000008L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET_MASK 0x00000010L
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE_MASK 0x00000020L
+#define RLC_CLK_COUNT_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_CLK_COUNT_STAT
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID__SHIFT 0x0
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID__SHIFT 0x1
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC__SHIFT 0x2
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC__SHIFT 0x3
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC__SHIFT 0x4
+#define RLC_CLK_COUNT_STAT__RESERVED__SHIFT 0x5
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID_MASK 0x00000001L
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID_MASK 0x00000002L
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC_MASK 0x00000004L
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC_MASK 0x00000008L
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC_MASK 0x00000010L
+#define RLC_CLK_COUNT_STAT__RESERVED_MASK 0xFFFFFFE0L
+//RLC_GPM_STAT
+#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_GPM_STAT__STATIC_CU_POWERING_UP__SHIFT 0xd
+#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN__SHIFT 0xe
+#define RLC_GPM_STAT__DYN_CU_POWERING_UP__SHIFT 0xf
+#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_GPM_STAT__STATIC_CU_POWERING_UP_MASK 0x00002000L
+#define RLC_GPM_STAT__STATIC_CU_POWERING_DOWN_MASK 0x00004000L
+#define RLC_GPM_STAT__DYN_CU_POWERING_UP_MASK 0x00008000L
+#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN_MASK 0x00010000L
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_GPU_CLOCK_32_RES_SEL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_CLOCK_32
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
+//RLC_PG_CNTL
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE__SHIFT 0x2
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE__SHIFT 0x3
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
+#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
+#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
+#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable__SHIFT 0x15
+#define RLC_PG_CNTL__RESERVED2__SHIFT 0x16
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
+#define RLC_PG_CNTL__RESERVED_MASK 0x00003FE0L
+#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
+#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x00080000L
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00100000L
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable_MASK 0x00200000L
+#define RLC_PG_CNTL__RESERVED2_MASK 0x00C00000L
+//RLC_GPM_THREAD_PRIORITY
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
+//RLC_GPM_THREAD_ENABLE
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
+#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
+#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY__SHIFT 0x10
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9_MASK 0x0000FE00L
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK 0x00010000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17_MASK 0xFFFE0000L
+//RLC_CGCG_CGLS_CTRL
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_DYN_PG_STATUS
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
+#define RLC_DYN_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_DYN_PG_REQUEST
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK__SHIFT 0x0
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY
+#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
+#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
+#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
+#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
+//RLC_CU_STATUS
+#define RLC_CU_STATUS__WORK_PENDING__SHIFT 0x0
+#define RLC_CU_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
+//RLC_LB_INIT_CU_MASK
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK__SHIFT 0x0
+#define RLC_LB_INIT_CU_MASK__INIT_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_LB_ALWAYS_ACTIVE_CU_MASK
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK__SHIFT 0x0
+#define RLC_LB_ALWAYS_ACTIVE_CU_MASK__ALWAYS_ACTIVE_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_LB_PARAMS
+#define RLC_LB_PARAMS__SKIP_L2_CHECK__SHIFT 0x0
+#define RLC_LB_PARAMS__FIFO_SAMPLES__SHIFT 0x1
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES__SHIFT 0x8
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_LB_PARAMS__SKIP_L2_CHECK_MASK 0x00000001L
+#define RLC_LB_PARAMS__FIFO_SAMPLES_MASK 0x000000FEL
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLES_MASK 0x0000FF00L
+#define RLC_LB_PARAMS__PG_IDLE_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_THREAD1_DELAY
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY__SHIFT 0x0
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY__SHIFT 0x8
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY__SHIFT 0x10
+#define RLC_THREAD1_DELAY__SPARE__SHIFT 0x18
+#define RLC_THREAD1_DELAY__CU_IDEL_DELAY_MASK 0x000000FFL
+#define RLC_THREAD1_DELAY__LBPW_INNER_LOOP_DELAY_MASK 0x0000FF00L
+#define RLC_THREAD1_DELAY__LBPW_OUTER_LOOP_DELAY_MASK 0x00FF0000L
+#define RLC_THREAD1_DELAY__SPARE_MASK 0xFF000000L
+//RLC_PG_ALWAYS_ON_CU_MASK
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK__SHIFT 0x0
+#define RLC_PG_ALWAYS_ON_CU_MASK__AON_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_MAX_PG_CU
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT 0x0
+#define RLC_MAX_PG_CU__SPARE__SHIFT 0x8
+#define RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK 0x000000FFL
+#define RLC_MAX_PG_CU__SPARE_MASK 0xFFFFFF00L
+//RLC_AUTO_PG_CTRL
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
+//RLC_SMU_GRBM_REG_SAVE_CTRL
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE__SHIFT 0x0
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE__SHIFT 0x1
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__START_GRBM_REG_SAVE_MASK 0x00000001L
+#define RLC_SMU_GRBM_REG_SAVE_CTRL__SPARE_MASK 0xFFFFFFFEL
+//RLC_SERDES_RD_PENDING
+#define RLC_SERDES_RD_PENDING__RD_PENDING__SHIFT 0x0
+#define RLC_SERDES_RD_PENDING__RD_PENDING_MASK 0x00000001L
+//RLC_SERDES_RD_MASTER_INDEX
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID__SHIFT 0x0
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID__SHIFT 0x4
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID__SHIFT 0x6
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID__SHIFT 0x9
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU__SHIFT 0xc
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE__SHIFT 0xd
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID__SHIFT 0x11
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE__SHIFT 0x13
+#define RLC_SERDES_RD_MASTER_INDEX__CU_ID_MASK 0x0000000FL
+#define RLC_SERDES_RD_MASTER_INDEX__SH_ID_MASK 0x00000030L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_ID_MASK 0x000001C0L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_ID_MASK 0x00000E00L
+#define RLC_SERDES_RD_MASTER_INDEX__SE_NONCU_MASK 0x00001000L
+#define RLC_SERDES_RD_MASTER_INDEX__NON_SE_MASK 0x0001E000L
+#define RLC_SERDES_RD_MASTER_INDEX__DATA_REG_ID_MASK 0x00060000L
+#define RLC_SERDES_RD_MASTER_INDEX__SPARE_MASK 0xFFF80000L
+//RLC_SERDES_RD_DATA_0
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_1
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_2
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_CU_MASTER_MASK
+#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK__SHIFT 0x0
+#define RLC_SERDES_WR_CU_MASTER_MASK__MASTER_MASK_MASK 0xFFFFFFFFL
+//RLC_SERDES_WR_NONCU_MASTER_MASK
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK__SHIFT 0x0
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK__SHIFT 0x10
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK__SHIFT 0x11
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK__SHIFT 0x12
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK__SHIFT 0x13
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK__SHIFT 0x14
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK__SHIFT 0x15
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK__SHIFT 0x16
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK__SHIFT 0x17
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK__SHIFT 0x18
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK__SHIFT 0x19
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED__SHIFT 0x1a
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SE_MASTER_MASK_MASK 0x0000FFFFL
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_MASTER_MASK_MASK 0x00010000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__GC_GFX_MASTER_MASK_MASK 0x00020000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC0_MASTER_MASK_MASK 0x00040000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC1_MASTER_MASK_MASK 0x00080000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE0_MASTER_MASK_MASK 0x00100000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE1_MASTER_MASK_MASK 0x00200000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE2_MASTER_MASK_MASK 0x00400000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__SPARE3_MASTER_MASK_MASK 0x00800000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__EA_0_MASTER_MASK_MASK 0x01000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__TC2_MASTER_MASK_MASK 0x02000000L
+#define RLC_SERDES_WR_NONCU_MASTER_MASK__RESERVED_MASK 0xFC000000L
+//RLC_SERDES_WR_CTRL
+#define RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT 0x0
+#define RLC_SERDES_WR_CTRL__POWER_DOWN__SHIFT 0x8
+#define RLC_SERDES_WR_CTRL__POWER_UP__SHIFT 0x9
+#define RLC_SERDES_WR_CTRL__P1_SELECT__SHIFT 0xa
+#define RLC_SERDES_WR_CTRL__P2_SELECT__SHIFT 0xb
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND__SHIFT 0xc
+#define RLC_SERDES_WR_CTRL__READ_COMMAND__SHIFT 0xd
+#define RLC_SERDES_WR_CTRL__RDDATA_RESET__SHIFT 0xe
+#define RLC_SERDES_WR_CTRL__SHORT_FORMAT__SHIFT 0xf
+#define RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT 0x10
+#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE__SHIFT 0x1a
+#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR__SHIFT 0x1b
+#define RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT 0x1c
+#define RLC_SERDES_WR_CTRL__BPM_ADDR_MASK 0x000000FFL
+#define RLC_SERDES_WR_CTRL__POWER_DOWN_MASK 0x00000100L
+#define RLC_SERDES_WR_CTRL__POWER_UP_MASK 0x00000200L
+#define RLC_SERDES_WR_CTRL__P1_SELECT_MASK 0x00000400L
+#define RLC_SERDES_WR_CTRL__P2_SELECT_MASK 0x00000800L
+#define RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK 0x00001000L
+#define RLC_SERDES_WR_CTRL__READ_COMMAND_MASK 0x00002000L
+#define RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK 0x00004000L
+#define RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK 0x00008000L
+#define RLC_SERDES_WR_CTRL__BPM_DATA_MASK 0x03FF0000L
+#define RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK 0x04000000L
+#define RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK 0x08000000L
+#define RLC_SERDES_WR_CTRL__REG_ADDR_MASK 0xF0000000L
+//RLC_SERDES_WR_DATA
+#define RLC_SERDES_WR_DATA__DATA__SHIFT 0x0
+#define RLC_SERDES_WR_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_CU_MASTER_BUSY
+#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY__SHIFT 0x0
+#define RLC_SERDES_CU_MASTER_BUSY__BUSY_BUSY_MASK 0xFFFFFFFFL
+//RLC_SERDES_NONCU_MASTER_BUSY
+#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY__SHIFT 0x0
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY__SHIFT 0x10
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY__SHIFT 0x11
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY__SHIFT 0x12
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY__SHIFT 0x13
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY__SHIFT 0x14
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY__SHIFT 0x15
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY__SHIFT 0x16
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY__SHIFT 0x17
+#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY__SHIFT 0x18
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY__SHIFT 0x19
+#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED__SHIFT 0x1a
+#define RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK 0x0000FFFFL
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK 0x00010000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__GC_GFX_MASTER_BUSY_MASK 0x00020000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK 0x00040000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK 0x00080000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE0_MASTER_BUSY_MASK 0x00100000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE1_MASTER_BUSY_MASK 0x00200000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE2_MASTER_BUSY_MASK 0x00400000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__SPARE3_MASTER_BUSY_MASK 0x00800000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__EA_0_MASTER_BUSY_MASK 0x01000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__TC2_MASTER_BUSY_MASK 0x02000000L
+#define RLC_SERDES_NONCU_MASTER_BUSY__RESERVED_MASK 0xFC000000L
+//RLC_GPM_GENERAL_0
+#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_1
+#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_2
+#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_3
+#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_4
+#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_5
+#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_6
+#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_7
+#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_SCRATCH_ADDR
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_SCRATCH_ADDR__RESERVED__SHIFT 0x9
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
+#define RLC_GPM_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
+//RLC_GPM_SCRATCH_DATA
+#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_STATIC_PG_STATUS
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK__SHIFT 0x0
+#define RLC_STATIC_PG_STATUS__PG_STATUS_CU_MASK_MASK 0xFFFFFFFFL
+//RLC_SPM_MC_CNTL
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x5
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x6
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x7
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x8
+#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0xa
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000010L
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000020L
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000040L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000080L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000300L
+#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFFFFC00L
+//RLC_SPM_INT_CNTL
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
+#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
+#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_STATUS
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
+#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
+#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_MESSAGE
+#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
+//RLC_GPM_LOG_SIZE
+#define RLC_GPM_LOG_SIZE__SIZE__SHIFT 0x0
+#define RLC_GPM_LOG_SIZE__SIZE_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_3
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
+#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
+#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
+//RLC_GPR_REG1
+#define RLC_GPR_REG1__DATA__SHIFT 0x0
+#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPR_REG2
+#define RLC_GPR_REG2__DATA__SHIFT 0x0
+#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_LOG_CONT
+#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
+#define RLC_GPM_LOG_CONT__CONT_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_DISABLE_TH0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_FORCE_TH0
+#define RLC_GPM_INT_FORCE_TH0__FORCE__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_FORCE_TH1
+#define RLC_GPM_INT_FORCE_TH1__FORCE__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH1__FORCE_MASK 0xFFFFFFFFL
+//RLC_SRM_CNTL
+#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
+#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
+#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_ARAM_ADDR
+#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xc
+#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00000FFFL
+#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_SRM_ARAM_DATA
+#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_DRAM_ADDR
+#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xc
+#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00000FFFL
+#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_SRM_DRAM_DATA
+#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_GPM_COMMAND
+#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_GPM_COMMAND__RESERVED_16__SHIFT 0x10
+#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x11
+#define RLC_SRM_GPM_COMMAND__RESERVED_30_29__SHIFT 0x1d
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0000FFE0L
+#define RLC_SRM_GPM_COMMAND__RESERVED_16_MASK 0x00010000L
+#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x1FFE0000L
+#define RLC_SRM_GPM_COMMAND__RESERVED_30_29_MASK 0x60000000L
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_GPM_COMMAND_STATUS
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_RLCV_COMMAND
+#define RLC_SRM_RLCV_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_RLCV_COMMAND__RESERVED__SHIFT 0x1
+#define RLC_SRM_RLCV_COMMAND__SIZE__SHIFT 0x4
+#define RLC_SRM_RLCV_COMMAND__START_OFFSET__SHIFT 0x10
+#define RLC_SRM_RLCV_COMMAND__RESERVED1__SHIFT 0x1c
+#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_RLCV_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_RLCV_COMMAND__RESERVED_MASK 0x0000000EL
+#define RLC_SRM_RLCV_COMMAND__SIZE_MASK 0x0000FFF0L
+#define RLC_SRM_RLCV_COMMAND__START_OFFSET_MASK 0x0FFF0000L
+#define RLC_SRM_RLCV_COMMAND__RESERVED1_MASK 0x70000000L
+#define RLC_SRM_RLCV_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_RLCV_COMMAND_STATUS
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_RLCV_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_RLCV_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_INDEX_CNTL_ADDR_0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_0__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_1
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_1__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_2
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_2__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_3
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_3__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_4
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_4__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_5
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_5__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_6
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_6__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_ADDR_7
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED__SHIFT 0x10
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0000FFFFL
+#define RLC_SRM_INDEX_CNTL_ADDR_7__RESERVED_MASK 0xFFFF0000L
+//RLC_SRM_INDEX_CNTL_DATA_0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_1
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_2
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_3
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_4
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_5
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_6
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_7
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_STAT
+#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
+#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
+#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
+#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
+#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
+#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_GPM_ABORT
+#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
+#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
+#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
+#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CSIB_ADDR_LO
+#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
+//RLC_CSIB_ADDR_HI
+#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
+//RLC_CSIB_LENGTH
+#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
+#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
+//RLC_SMU_COMMAND
+#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
+#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
+//RLC_CP_SCHEDULERS
+#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
+#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
+#define RLC_CP_SCHEDULERS__scheduler2__SHIFT 0x10
+#define RLC_CP_SCHEDULERS__scheduler3__SHIFT 0x18
+#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
+#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
+#define RLC_CP_SCHEDULERS__scheduler2_MASK 0x00FF0000L
+#define RLC_CP_SCHEDULERS__scheduler3_MASK 0xFF000000L
+//RLC_SMU_ARGUMENT_1
+#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_2
+#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_8
+#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_9
+#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_10
+#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_11
+#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_12
+#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_CNTL_0
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_1
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_2
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
+//RLC_SPM_UTCL1_CNTL
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
+//RLC_UTCL1_STATUS_2
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY__SHIFT 0x4
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans__SHIFT 0x9
+#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0xa
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_BUSY_MASK 0x00000010L
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
+#define RLC_UTCL1_STATUS_2__PREWALKER_UTCL1_StallOnTrans_MASK 0x00000200L
+#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFC00L
+//RLC_LB_THR_CONFIG_2
+#define RLC_LB_THR_CONFIG_2__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_2__DATA_MASK 0xFFFFFFFFL
+//RLC_LB_THR_CONFIG_3
+#define RLC_LB_THR_CONFIG_3__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_3__DATA_MASK 0xFFFFFFFFL
+//RLC_LB_THR_CONFIG_4
+#define RLC_LB_THR_CONFIG_4__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_UTCL1_ERROR_1
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_SPM_UTCL1_ERROR_2
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_1
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_LB_THR_CONFIG_1
+#define RLC_LB_THR_CONFIG_1__DATA__SHIFT 0x0
+#define RLC_LB_THR_CONFIG_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_2
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH1_ERROR_1
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH1_ERROR_2
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH2_ERROR_1
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH2_ERROR_2
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_CGCG_CGLS_CTRL_3D
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL_3D
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_SEMAPHORE_0
+#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_1
+#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CP_EOF_INT
+#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
+#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
+#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CP_EOF_INT_CNT
+#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
+#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT
+#define RLC_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_PREWALKER_UTCL1_CNTL
+#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_PREWALKER_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY__SHIFT 0x1d
+#define RLC_PREWALKER_UTCL1_CNTL__RESERVED__SHIFT 0x1e
+#define RLC_PREWALKER_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_PREWALKER_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_PREWALKER_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define RLC_PREWALKER_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_PREWALKER_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_PREWALKER_UTCL1_CNTL__FORCE_SD_VMID_DIRTY_MASK 0x20000000L
+#define RLC_PREWALKER_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
+//RLC_PREWALKER_UTCL1_TRIG
+#define RLC_PREWALKER_UTCL1_TRIG__VALID__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_TRIG__VMID__SHIFT 0x1
+#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE__SHIFT 0x5
+#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM__SHIFT 0x6
+#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM__SHIFT 0x7
+#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM__SHIFT 0x8
+#define RLC_PREWALKER_UTCL1_TRIG__RESERVED__SHIFT 0x9
+#define RLC_PREWALKER_UTCL1_TRIG__READY__SHIFT 0x1f
+#define RLC_PREWALKER_UTCL1_TRIG__VALID_MASK 0x00000001L
+#define RLC_PREWALKER_UTCL1_TRIG__VMID_MASK 0x0000001EL
+#define RLC_PREWALKER_UTCL1_TRIG__PRIME_MODE_MASK 0x00000020L
+#define RLC_PREWALKER_UTCL1_TRIG__READ_PERM_MASK 0x00000040L
+#define RLC_PREWALKER_UTCL1_TRIG__WRITE_PERM_MASK 0x00000080L
+#define RLC_PREWALKER_UTCL1_TRIG__EXEC_PERM_MASK 0x00000100L
+#define RLC_PREWALKER_UTCL1_TRIG__RESERVED_MASK 0x7FFFFE00L
+#define RLC_PREWALKER_UTCL1_TRIG__READY_MASK 0x80000000L
+//RLC_PREWALKER_UTCL1_ADDR_LSB
+#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_ADDR_LSB__ADDR_LSB_MASK 0xFFFFFFFFL
+//RLC_PREWALKER_UTCL1_ADDR_MSB
+#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_ADDR_MSB__ADDR_MSB_MASK 0x0000FFFFL
+//RLC_PREWALKER_UTCL1_SIZE_LSB
+#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_SIZE_LSB__SIZE_LSB_MASK 0xFFFFFFFFL
+//RLC_PREWALKER_UTCL1_SIZE_MSB
+#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB__SHIFT 0x0
+#define RLC_PREWALKER_UTCL1_SIZE_MSB__SIZE_MSB_MASK 0x00000003L
+//RLC_DSM_TRIG
+#define RLC_DSM_TRIG__START__SHIFT 0x0
+#define RLC_DSM_TRIG__START_MASK 0x00000001L
+//RLC_UTCL1_STATUS
+#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
+#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
+//RLC_R2I_CNTL_0
+#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_1
+#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_2
+#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_3
+#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
+//RLC_UTCL2_CNTL
+#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
+#define RLC_UTCL2_CNTL__RESERVED__SHIFT 0x1
+#define RLC_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
+#define RLC_UTCL2_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_LBPW_CU_STAT
+#define RLC_LBPW_CU_STAT__MAX_CU__SHIFT 0x0
+#define RLC_LBPW_CU_STAT__ON_CU__SHIFT 0x10
+#define RLC_LBPW_CU_STAT__MAX_CU_MASK 0x0000FFFFL
+#define RLC_LBPW_CU_STAT__ON_CU_MASK 0xFFFF0000L
+//RLC_DS_CNTL
+#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x0
+#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x1
+#define RLC_DS_CNTL__RESRVED__SHIFT 0x2
+#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x10
+#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x11
+#define RLC_DS_CNTL__RESRVED_1__SHIFT 0x12
+#define RLC_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000001L
+#define RLC_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000002L
+#define RLC_DS_CNTL__RESRVED_MASK 0x0000FFFCL
+#define RLC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00010000L
+#define RLC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00020000L
+#define RLC_DS_CNTL__RESRVED_1_MASK 0xFFFC0000L
+//RLC_GPM_INT_STAT_TH0
+#define RLC_GPM_INT_STAT_TH0__STATUS__SHIFT 0x0
+#define RLC_GPM_INT_STAT_TH0__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_13
+#define RLC_GPM_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_14
+#define RLC_GPM_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_15
+#define RLC_GPM_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT_1
+#define RLC_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_SPARE_INT_1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SEMAPHORE_2
+#define RLC_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_3
+#define RLC_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SMU_ARGUMENT_3
+#define RLC_SMU_ARGUMENT_3__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_3__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_4
+#define RLC_SMU_ARGUMENT_4__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_4__ARG_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_LSB_1
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_1
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_CLOCK_COUNT_LSB_2
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_2
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_2
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CPG_STAT_INVAL
+#define RLC_CPG_STAT_INVAL__CPG_stat_inval__SHIFT 0x0
+#define RLC_CPG_STAT_INVAL__CPG_stat_inval_MASK 0x00000001L
+//RLC_RLCV_SPARE_INT
+#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_CLK_REQ
+#define RLC_SMU_CLK_REQ__VALID__SHIFT 0x0
+#define RLC_SMU_CLK_REQ__VALID_MASK 0x00000001L
+
+
+// addressBlock: gc_pwrdec
+//CGTS_SM_CTRL_REG
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY__SHIFT 0x0
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY__SHIFT 0x4
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED__SHIFT 0xc
+#define CGTS_SM_CTRL_REG__BASE_MODE__SHIFT 0x10
+#define CGTS_SM_CTRL_REG__SM_MODE__SHIFT 0x11
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE__SHIFT 0x14
+#define CGTS_SM_CTRL_REG__OVERRIDE__SHIFT 0x15
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE__SHIFT 0x16
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN__SHIFT 0x17
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT 0x18
+#define CGTS_SM_CTRL_REG__ON_SEQ_DELAY_MASK 0x0000000FL
+#define CGTS_SM_CTRL_REG__OFF_SEQ_DELAY_MASK 0x00000FF0L
+#define CGTS_SM_CTRL_REG__MGCG_ENABLED_MASK 0x00001000L
+#define CGTS_SM_CTRL_REG__BASE_MODE_MASK 0x00010000L
+#define CGTS_SM_CTRL_REG__SM_MODE_MASK 0x000E0000L
+#define CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK 0x00100000L
+#define CGTS_SM_CTRL_REG__OVERRIDE_MASK 0x00200000L
+#define CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK 0x00400000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK 0x00800000L
+#define CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK 0xFF000000L
+//CGTS_RD_CTRL_REG
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x8
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000001FL
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x00001F00L
+//CGTS_RD_REG
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
+#define CGTS_RD_REG__READ_DATA_MASK 0x00003FFFL
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_CU0_SP0_CTRL_REG
+#define CGTS_CU0_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU0_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_LDS_SQ_CTRL_REG
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TA_SQC_CTRL_REG
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_SP1_CTRL_REG
+#define CGTS_CU0_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU0_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TD_TCP_CTRL_REG
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU0_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_SP0_CTRL_REG
+#define CGTS_CU1_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU1_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_LDS_SQ_CTRL_REG
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_TA_SQC_CTRL_REG
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU1_SP1_CTRL_REG
+#define CGTS_CU1_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU1_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU1_TD_TCP_CTRL_REG
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU1_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_SP0_CTRL_REG
+#define CGTS_CU2_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU2_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_LDS_SQ_CTRL_REG
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_TA_SQC_CTRL_REG
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU2_SP1_CTRL_REG
+#define CGTS_CU2_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU2_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU2_TD_TCP_CTRL_REG
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU2_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_SP0_CTRL_REG
+#define CGTS_CU3_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU3_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_LDS_SQ_CTRL_REG
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_TA_SQC_CTRL_REG
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_SP1_CTRL_REG
+#define CGTS_CU3_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU3_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU3_TD_TCP_CTRL_REG
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU3_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_SP0_CTRL_REG
+#define CGTS_CU4_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU4_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_LDS_SQ_CTRL_REG
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_TA_SQC_CTRL_REG
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU4_SP1_CTRL_REG
+#define CGTS_CU4_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU4_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU4_TD_TCP_CTRL_REG
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU4_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_SP0_CTRL_REG
+#define CGTS_CU5_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU5_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_LDS_SQ_CTRL_REG
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_TA_SQC_CTRL_REG
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU5_SP1_CTRL_REG
+#define CGTS_CU5_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU5_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU5_TD_TCP_CTRL_REG
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU5_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_SP0_CTRL_REG
+#define CGTS_CU6_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU6_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_LDS_SQ_CTRL_REG
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_TA_SQC_CTRL_REG
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_SP1_CTRL_REG
+#define CGTS_CU6_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU6_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU6_TD_TCP_CTRL_REG
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU6_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_SP0_CTRL_REG
+#define CGTS_CU7_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU7_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_LDS_SQ_CTRL_REG
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_TA_SQC_CTRL_REG
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU7_SP1_CTRL_REG
+#define CGTS_CU7_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU7_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU7_TD_TCP_CTRL_REG
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU7_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_SP0_CTRL_REG
+#define CGTS_CU8_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU8_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_LDS_SQ_CTRL_REG
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_TA_SQC_CTRL_REG
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU8_SP1_CTRL_REG
+#define CGTS_CU8_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU8_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU8_TD_TCP_CTRL_REG
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU8_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_SP0_CTRL_REG
+#define CGTS_CU9_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU9_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_LDS_SQ_CTRL_REG
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_TA_SQC_CTRL_REG
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_SP1_CTRL_REG
+#define CGTS_CU9_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU9_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU9_TD_TCP_CTRL_REG
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU9_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_SP0_CTRL_REG
+#define CGTS_CU10_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU10_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_LDS_SQ_CTRL_REG
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_TA_SQC_CTRL_REG
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU10_SP1_CTRL_REG
+#define CGTS_CU10_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU10_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU10_TD_TCP_CTRL_REG
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU10_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_SP0_CTRL_REG
+#define CGTS_CU11_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU11_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_LDS_SQ_CTRL_REG
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_TA_SQC_CTRL_REG
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU11_SP1_CTRL_REG
+#define CGTS_CU11_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU11_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU11_TD_TCP_CTRL_REG
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU11_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_SP0_CTRL_REG
+#define CGTS_CU12_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU12_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_LDS_SQ_CTRL_REG
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_TA_SQC_CTRL_REG
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_SP1_CTRL_REG
+#define CGTS_CU12_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU12_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU12_TD_TCP_CTRL_REG
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU12_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_SP0_CTRL_REG
+#define CGTS_CU13_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU13_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_LDS_SQ_CTRL_REG
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_TA_SQC_CTRL_REG
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU13_SP1_CTRL_REG
+#define CGTS_CU13_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU13_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU13_TD_TCP_CTRL_REG
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU13_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_SP0_CTRL_REG
+#define CGTS_CU14_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU14_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_LDS_SQ_CTRL_REG
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_TA_SQC_CTRL_REG
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+//CGTS_CU14_SP1_CTRL_REG
+#define CGTS_CU14_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU14_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU14_TD_TCP_CTRL_REG
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU14_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_SP0_CTRL_REG
+#define CGTS_CU15_SP0_CTRL_REG__SP00__SHIFT 0x0
+#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_SP0_CTRL_REG__SP01__SHIFT 0x10
+#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_SP0_CTRL_REG__SP00_MASK 0x0000007FL
+#define CGTS_CU15_SP0_CTRL_REG__SP00_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_SP0_CTRL_REG__SP00_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_MASK 0x007F0000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_SP0_CTRL_REG__SP01_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_LDS_SQ_CTRL_REG
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS__SHIFT 0x0
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ__SHIFT 0x10
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_MASK 0x0000007FL
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__LDS_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_MASK 0x007F0000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_LDS_SQ_CTRL_REG__SQ_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_TA_SQC_CTRL_REG
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA__SHIFT 0x0
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC__SHIFT 0x10
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_MASK 0x0000007FL
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TA_SQC_CTRL_REG__TA_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_MASK 0x007F0000L
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_TA_SQC_CTRL_REG__SQC_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_SP1_CTRL_REG
+#define CGTS_CU15_SP1_CTRL_REG__SP10__SHIFT 0x0
+#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_SP1_CTRL_REG__SP11__SHIFT 0x10
+#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_SP1_CTRL_REG__SP10_MASK 0x0000007FL
+#define CGTS_CU15_SP1_CTRL_REG__SP10_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_SP1_CTRL_REG__SP10_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_MASK 0x007F0000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_SP1_CTRL_REG__SP11_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU15_TD_TCP_CTRL_REG
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD__SHIFT 0x0
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF__SHIFT 0x10
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE__SHIFT 0x17
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE__SHIFT 0x18
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE__SHIFT 0x1a
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE__SHIFT 0x1b
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_MASK 0x0000007FL
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TD_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_MASK 0x007F0000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_OVERRIDE_MASK 0x00800000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_BUSY_OVERRIDE_MASK 0x03000000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_LS_OVERRIDE_MASK 0x04000000L
+#define CGTS_CU15_TD_TCP_CTRL_REG__TCPF_SIMDBUSY_OVERRIDE_MASK 0x08000000L
+//CGTS_CU0_TCPI_CTRL_REG
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU0_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU0_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU0_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU1_TCPI_CTRL_REG
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU1_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU1_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU1_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU2_TCPI_CTRL_REG
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU2_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU2_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU2_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU3_TCPI_CTRL_REG
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU3_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU3_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU3_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU4_TCPI_CTRL_REG
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU4_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU4_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU4_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU5_TCPI_CTRL_REG
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU5_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU5_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU5_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU6_TCPI_CTRL_REG
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU6_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU6_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU6_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU7_TCPI_CTRL_REG
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU7_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU7_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU7_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU8_TCPI_CTRL_REG
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU8_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU8_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU8_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU9_TCPI_CTRL_REG
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU9_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU9_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU9_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU10_TCPI_CTRL_REG
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU10_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU10_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU10_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU11_TCPI_CTRL_REG
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU11_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU11_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU11_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU12_TCPI_CTRL_REG
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU12_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU12_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU12_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU13_TCPI_CTRL_REG
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU13_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU13_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU13_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU14_TCPI_CTRL_REG
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU14_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU14_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU14_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTS_CU15_TCPI_CTRL_REG
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI__SHIFT 0x0
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE__SHIFT 0x7
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE__SHIFT 0x8
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE__SHIFT 0xa
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE__SHIFT 0xb
+#define CGTS_CU15_TCPI_CTRL_REG__RESERVED__SHIFT 0xc
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_MASK 0x0000007FL
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_OVERRIDE_MASK 0x00000080L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_BUSY_OVERRIDE_MASK 0x00000300L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_LS_OVERRIDE_MASK 0x00000400L
+#define CGTS_CU15_TCPI_CTRL_REG__TCPI_SIMDBUSY_OVERRIDE_MASK 0x00000800L
+#define CGTS_CU15_TCPI_CTRL_REG__RESERVED_MASK 0xFFFFF000L
+//CGTT_SPI_PS_CLK_CTRL
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPIS_CLK_CTRL
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTX_SPI_DEBUG_CLK_CTRL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x0
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x6
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x7
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL__SHIFT 0x8
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x0000003FL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x00000040L
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x00000080L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL_MASK 0x00000100L
+//CGTT_SPI_CLK_CTRL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PC_CLK_CTRL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x12
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x18
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0x19
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0x1a
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x00FC0000L
+#define CGTT_PC_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x01000000L
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x02000000L
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x04000000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_PC_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_BCI_CLK_CTRL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9__SHIFT 0x18
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
+#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE9_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
+#define CGTT_VGT_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__GS_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8__SHIFT 0x19
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x1a
+#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b
+#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE8_MASK 0x02000000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x04000000L
+#define CGTT_WD_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L
+#define CGTT_WD_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQ_CLK_CTRL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQ_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//SQ_ALU_CLK_CTRL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_ALU_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_TEX_CLK_CTRL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_TEX_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_LDS_CLK_CTRL
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0__SHIFT 0x0
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1__SHIFT 0x10
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH0_MASK 0x0000FFFFL
+#define SQ_LDS_CLK_CTRL__FORCE_CU_ON_SH1_MASK 0xFFFF0000L
+//SQ_POWER_THROTTLE
+#define SQ_POWER_THROTTLE__MIN_POWER__SHIFT 0x0
+#define SQ_POWER_THROTTLE__MAX_POWER__SHIFT 0x10
+#define SQ_POWER_THROTTLE__PHASE_OFFSET__SHIFT 0x1e
+#define SQ_POWER_THROTTLE__MIN_POWER_MASK 0x00003FFFL
+#define SQ_POWER_THROTTLE__MAX_POWER_MASK 0x3FFF0000L
+#define SQ_POWER_THROTTLE__PHASE_OFFSET_MASK 0xC0000000L
+//SQ_POWER_THROTTLE2
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT 0x0
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK__SHIFT 0x1f
+#define SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+#define SQ_POWER_THROTTLE2__USE_REF_CLOCK_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL0
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL1
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL2
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL3
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL4
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+//TD_CGTT_CTRL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPI_CLK_CTRL
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x0000F000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCI_CLK_CTRL
+#define CGTT_TCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_TCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_TCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_GDS_CLK_CTRL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TCC_CGTT_SCLK_CTRL
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TCC_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TCC_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCC_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TCA_CGTT_SCLK_CTRL
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TCA_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TCA_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TCA_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_RLC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//RLC_GFX_RM_CNTL
+#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
+#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
+#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
+#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RMI_CGTT_SCLK_CTRL
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPF_CLK_CTRL
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x0000F000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SE_CAC_CGTT_CLK_CTRL
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GC_CAC_CGTT_CLK_CTRL
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GRBM_CGTT_CLK_CNTL
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+
+
+// addressBlock: gc_ea_pwrdec
+//GCEA_CGTT_CLK_CTRL
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+
+
+// addressBlock: gc_utcl2_vmsharedhvdec
+//MC_VM_FB_SIZE_OFFSET_VF0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF1
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF2
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF3
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF4
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF5
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF6
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF7
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF8
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF9
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF11
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF12
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF13
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF14
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF15
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VM_IOMMU_MMIO_CNTRL_1
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//MC_VM_MARC_BASE_LO_0
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_1
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_2
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_3
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_HI_0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_1
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_2
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_3
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_LO_0
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_1
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_2
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_3
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_HI_0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_1
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_2
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_3
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_LO_0
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_1
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_2
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_3
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_HI_0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_1
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_2
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_3
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VM_IOMMU_CONTROL_REGISTER
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VM_PCIE_ATS_CNTL
+#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_0
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_1
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_2
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_3
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_4
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_5
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_6
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_7
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_8
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_9
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_10
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_11
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_12
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_13
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_14
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_15
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//UTCL2_CGTT_CLK_CTRL
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//MC_SHARED_ACTIVE_FCN_ID
+#define MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MC_VM_XGMI_GPUIOV_ENABLE
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: gc_hypdec
+//CP_HYP_PFP_UCODE_ADDR
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+//CP_PFP_UCODE_ADDR
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+//CP_HYP_PFP_UCODE_DATA
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_PFP_UCODE_DATA
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_ADDR
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x00001FFFL
+//CP_ME_RAM_RADDR
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x00001FFFL
+//CP_ME_RAM_WADDR
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x00001FFFL
+//CP_HYP_ME_UCODE_DATA
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_ME_RAM_DATA
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
+//CP_CE_UCODE_ADDR
+#define CP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+//CP_HYP_CE_UCODE_ADDR
+#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_CE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+//CP_CE_UCODE_DATA
+#define CP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_CE_UCODE_DATA
+#define CP_HYP_CE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_CE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC1_UCODE_ADDR
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_MEC_ME1_UCODE_ADDR
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_HYP_MEC1_UCODE_DATA
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME1_UCODE_DATA
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC2_UCODE_ADDR
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_MEC_ME2_UCODE_ADDR
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x0001FFFFL
+//CP_HYP_MEC2_UCODE_DATA
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_UCODE_DATA
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_PFP_UCODE_CHKSUM
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_CE_UCODE_CHKSUM
+#define CP_HYP_CE_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_CE_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_CHKSUM
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME1_UCODE_CHKSUM
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME2_UCODE_CHKSUM
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_GPM_UCODE_ADDR
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
+//RLC_GPM_UCODE_DATA
+#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//GRBM_GFX_INDEX_SR_SELECT
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
+//GRBM_GFX_INDEX_SR_DATA
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX_SR_DATA__SH_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX_SR_DATA__SH_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_SELECT
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
+//GRBM_GFX_CNTL_SR_DATA
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
+//GRBM_CAM_INDEX
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+//GRBM_HYP_CAM_INDEX
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x00000007L
+//GRBM_CAM_DATA
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_HYP_CAM_DATA
+#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//RLC_GPU_IOV_VF_ENABLE
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG6
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
+#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
+#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//RLC_GPU_IOV_CFG_REG8
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_0
+#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_CTRL
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCV_TIMER_STAT
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
+//RLC_GPU_IOV_VF_MASK
+#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
+#define RLC_GPU_IOV_VF_MASK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VF_MASK__RESERVED_MASK 0xFFFF0000L
+//RLC_HYP_SEMAPHORE_0
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_1
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CLK_CNTL
+#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL__SHIFT 0x0
+#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL__SHIFT 0x2
+#define RLC_CLK_CNTL__RLC_GPM_CLK_CNTL__SHIFT 0x4
+#define RLC_CLK_CNTL__RLC_CMN_CLK_CNTL__SHIFT 0x5
+#define RLC_CLK_CNTL__RLC_TC_CLK_CNTL__SHIFT 0x6
+#define RLC_CLK_CNTL__RLC_SPP_CLK_CNTL__SHIFT 0x7
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE__SHIFT 0x8
+#define RLC_CLK_CNTL__RESERVED__SHIFT 0x9
+#define RLC_CLK_CNTL__RLC_SRM_CLK_CNTL_MASK 0x00000003L
+#define RLC_CLK_CNTL__RLC_SPM_CLK_CNTL_MASK 0x0000000CL
+#define RLC_CLK_CNTL__RLC_GPM_CLK_CNTL_MASK 0x00000010L
+#define RLC_CLK_CNTL__RLC_CMN_CLK_CNTL_MASK 0x00000020L
+#define RLC_CLK_CNTL__RLC_TC_CLK_CNTL_MASK 0x00000040L
+#define RLC_CLK_CNTL__RLC_SPP_CLK_CNTL_MASK 0x00000080L
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK 0x00000100L
+#define RLC_CLK_CNTL__RESERVED_MASK 0xFFFFFE00L
+//RLC_GPU_IOV_SCH_BLOCK
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x00007F00L
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0x7FFF0000L
+//RLC_GPU_IOV_CFG_REG1
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
+//RLC_GPU_IOV_CFG_REG2
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPU_IOV_VM_BUSY_STATUS
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_ACTIVE_FCN_ID
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//RLC_GPU_IOV_SCH_3
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_1
+#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_2
+#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_STAT
+#define RLC_GPU_IOV_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_1
+#define RLC_RLCV_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_UCODE_ADDR
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_GPU_IOV_UCODE_DATA
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCRATCH_ADDR
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED__SHIFT 0x9
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x000001FFL
+#define RLC_GPU_IOV_SCRATCH_ADDR__RESERVED_MASK 0xFFFFFE00L
+//RLC_GPU_IOV_SCRATCH_DATA
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_CNTL
+#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_CNTL__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_F32_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_IOV_F32_RESET
+#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
+#define RLC_GPU_IOV_F32_RESET__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
+#define RLC_GPU_IOV_F32_RESET__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_IOV_SDMA0_STATUS
+#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA0_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA0_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA0_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA0_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SDMA1_STATUS
+#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_SDMA1_STATUS__SAVED__SHIFT 0x8
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1__SHIFT 0x9
+#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED__SHIFT 0xc
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2__SHIFT 0xd
+#define RLC_GPU_IOV_SDMA1_STATUS__PREEMPTED_MASK 0x00000001L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED_MASK 0x000000FEL
+#define RLC_GPU_IOV_SDMA1_STATUS__SAVED_MASK 0x00000100L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED1_MASK 0x00000E00L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESTORED_MASK 0x00001000L
+#define RLC_GPU_IOV_SDMA1_STATUS__RESERVED2_MASK 0xFFFFE000L
+//RLC_GPU_IOV_SMU_RESPONSE
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_VIRT_RESET_REQ
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x0000FFFFL
+#define RLC_GPU_IOV_VIRT_RESET_REQ__RESERVED_MASK 0x7FFF0000L
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
+//RLC_GPU_IOV_RLC_RESPONSE
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_DISABLE
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE__SHIFT 0x0
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_FORCE
+#define RLC_GPU_IOV_INT_FORCE__FORCE__SHIFT 0x0
+#define RLC_GPU_IOV_INT_FORCE__FORCE_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA0_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_HYP_SEMAPHORE_2
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_3
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+
+
+// addressBlock: gccacind
+//GC_CAC_CNTL
+#define GC_CAC_CNTL__CAC_FORCE_DISABLE__SHIFT 0x0
+#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
+#define GC_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
+#define GC_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
+#define GC_CAC_CNTL__CAC_FORCE_DISABLE_MASK 0x00000001L
+#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
+#define GC_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
+#define GC_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
+//GC_CAC_OVR_SEL
+#define GC_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
+#define GC_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
+//GC_CAC_OVR_VAL
+#define GC_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
+#define GC_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
+//GC_CAC_WEIGHT_BCI_0
+#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CB_0
+#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CB_1
+#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CP_0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CP_1
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_DB_0
+#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_DB_1
+#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_1
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_IA_0
+#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_IA_0__WEIGHT_IA_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_LDS_0
+#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_LDS_1
+#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PA_0
+#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PC_0
+#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_SC_0
+#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_SPI_0
+#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SPI_1
+#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SPI_2
+#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SQ_0
+#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SQ_1
+#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SQ_2
+#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SQ_3
+#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SQ_3__WEIGHT_SQ_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SQ_4
+#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_SQ_4__WEIGHT_SQ_SIG8_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_SX_0
+#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_SXRB_0
+#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_TA_0
+#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_TCC_0
+#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TCC_0__WEIGHT_TCC_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TCC_1
+#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TCC_1__WEIGHT_TCC_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TCC_2
+#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCC_2__WEIGHT_TCC_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_TCP_0
+#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TCP_1
+#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TCP_2
+#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_TD_0
+#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TD_1
+#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_TD_2
+#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_VGT_0
+#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_VGT_0__WEIGHT_VGT_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_VGT_1
+#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_VGT_1__WEIGHT_VGT_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_WD_0
+#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_WD_0__WEIGHT_WD_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_CU_0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
+//GC_CAC_ACC_BCI0
+#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_BCI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CB0
+#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CB1
+#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CB2
+#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CB3
+#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP1
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP2
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_DB0
+#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_DB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_DB1
+#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_DB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_DB2
+#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_DB2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_DB3
+#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_DB3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS1
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS2
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS3
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_IA0
+#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_IA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_LDS0
+#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_LDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_LDS1
+#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_LDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_LDS2
+#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_LDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_LDS3
+#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_LDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PA0
+#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PA1
+#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PC0
+#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SC0
+#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI0
+#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI1
+#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI2
+#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI3
+#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI4
+#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SPI5
+#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SPI5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_WEIGHT_UTCL2_ATCL2_0
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_0__WEIGHT_UTCL2_ATCL2_SIG1_MASK 0xFFFF0000L
+//GC_CAC_ACC_EA0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA1
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA2
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA3
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL20
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_OVRD_EA
+#define GC_CAC_OVRD_EA__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_EA__OVRRD_VALUE__SHIFT 0x6
+#define GC_CAC_OVRD_EA__OVRRD_SELECT_MASK 0x0000003FL
+#define GC_CAC_OVRD_EA__OVRRD_VALUE_MASK 0x00000FC0L
+//GC_CAC_OVRD_UTCL2_ATCL2
+#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE__SHIFT 0x5
+#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_SELECT_MASK 0x0000001FL
+#define GC_CAC_OVRD_UTCL2_ATCL2__OVRRD_VALUE_MASK 0x000003E0L
+//GC_CAC_WEIGHT_EA_0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_1
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_RMI_0
+#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
+//GC_CAC_ACC_RMI0
+#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_RMI0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_OVRD_RMI
+#define GC_CAC_OVRD_RMI__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_RMI__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_RMI__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_RMI__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_WEIGHT_UTCL2_ATCL2_1
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_1__WEIGHT_UTCL2_ATCL2_SIG3_MASK 0xFFFF0000L
+//GC_CAC_ACC_UTCL2_ATCL21
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL22
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL23
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA4
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA5
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_WEIGHT_EA_2
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_ACC_SQ0_LOWER
+#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ0_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ0_UPPER
+#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ0_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ1_LOWER
+#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ1_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ1_UPPER
+#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ1_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ2_LOWER
+#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ2_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ2_UPPER
+#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ2_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ3_LOWER
+#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ3_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ3_UPPER
+#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ3_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ4_LOWER
+#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ4_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ4_UPPER
+#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ4_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ5_LOWER
+#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ5_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ5_UPPER
+#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ5_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ6_LOWER
+#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ6_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ6_UPPER
+#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ6_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ7_LOWER
+#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ7_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ7_UPPER
+#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ7_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SQ8_LOWER
+#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SQ8_LOWER__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SQ8_UPPER
+#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32__SHIFT 0x0
+#define GC_CAC_ACC_SQ8_UPPER__ACCUMULATOR_39_32_MASK 0x000000FFL
+//GC_CAC_ACC_SX0
+#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SX0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SXRB0
+#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SXRB0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SXRB1
+#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SXRB1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TA0
+#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCC0
+#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCC1
+#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCC2
+#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCC3
+#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCC3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCC4
+#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCC4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCP0
+#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCP1
+#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCP2
+#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCP3
+#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCP3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TCP4
+#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TCP4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD0
+#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD1
+#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD2
+#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD3
+#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD4
+#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_TD5
+#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_TD5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_VGT0
+#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_VGT0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_VGT1
+#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_VGT1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_VGT2
+#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_VGT2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_WD0
+#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_WD0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CU0
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CU1
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CU2
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CU3
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CU4
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_OVRD_BCI
+#define GC_CAC_OVRD_BCI__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_BCI__OVRRD_VALUE__SHIFT 0x2
+#define GC_CAC_OVRD_BCI__OVRRD_SELECT_MASK 0x00000003L
+#define GC_CAC_OVRD_BCI__OVRRD_VALUE_MASK 0x0000000CL
+//GC_CAC_OVRD_CB
+#define GC_CAC_OVRD_CB__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CB__OVRRD_VALUE__SHIFT 0x4
+#define GC_CAC_OVRD_CB__OVRRD_SELECT_MASK 0x0000000FL
+#define GC_CAC_OVRD_CB__OVRRD_VALUE_MASK 0x000000F0L
+//GC_CAC_OVRD_CP
+#define GC_CAC_OVRD_CP__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CP__OVRRD_VALUE__SHIFT 0x3
+#define GC_CAC_OVRD_CP__OVRRD_SELECT_MASK 0x00000007L
+#define GC_CAC_OVRD_CP__OVRRD_VALUE_MASK 0x00000038L
+//GC_CAC_OVRD_DB
+#define GC_CAC_OVRD_DB__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_DB__OVRRD_VALUE__SHIFT 0x4
+#define GC_CAC_OVRD_DB__OVRRD_SELECT_MASK 0x0000000FL
+#define GC_CAC_OVRD_DB__OVRRD_VALUE_MASK 0x000000F0L
+//GC_CAC_OVRD_GDS
+#define GC_CAC_OVRD_GDS__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_GDS__OVRRD_VALUE__SHIFT 0x4
+#define GC_CAC_OVRD_GDS__OVRRD_SELECT_MASK 0x0000000FL
+#define GC_CAC_OVRD_GDS__OVRRD_VALUE_MASK 0x000000F0L
+//GC_CAC_OVRD_IA
+#define GC_CAC_OVRD_IA__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_IA__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_IA__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_IA__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_LDS
+#define GC_CAC_OVRD_LDS__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_LDS__OVRRD_VALUE__SHIFT 0x4
+#define GC_CAC_OVRD_LDS__OVRRD_SELECT_MASK 0x0000000FL
+#define GC_CAC_OVRD_LDS__OVRRD_VALUE_MASK 0x000000F0L
+//GC_CAC_OVRD_PA
+#define GC_CAC_OVRD_PA__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_PA__OVRRD_VALUE__SHIFT 0x2
+#define GC_CAC_OVRD_PA__OVRRD_SELECT_MASK 0x00000003L
+#define GC_CAC_OVRD_PA__OVRRD_VALUE_MASK 0x0000000CL
+//GC_CAC_OVRD_PC
+#define GC_CAC_OVRD_PC__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_PC__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_PC__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_PC__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_SC
+#define GC_CAC_OVRD_SC__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_SC__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_SC__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_SC__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_SPI
+#define GC_CAC_OVRD_SPI__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_SPI__OVRRD_VALUE__SHIFT 0x6
+#define GC_CAC_OVRD_SPI__OVRRD_SELECT_MASK 0x0000003FL
+#define GC_CAC_OVRD_SPI__OVRRD_VALUE_MASK 0x00000FC0L
+//GC_CAC_OVRD_CU
+#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_SQ
+#define GC_CAC_OVRD_SQ__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_SQ__OVRRD_VALUE__SHIFT 0x9
+#define GC_CAC_OVRD_SQ__OVRRD_SELECT_MASK 0x000001FFL
+#define GC_CAC_OVRD_SQ__OVRRD_VALUE_MASK 0x0003FE00L
+//GC_CAC_OVRD_SX
+#define GC_CAC_OVRD_SX__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_SX__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_SX__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_SX__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_SXRB
+#define GC_CAC_OVRD_SXRB__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_SXRB__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_SXRB__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_SXRB__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_TA
+#define GC_CAC_OVRD_TA__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_TA__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_TA__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_TA__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_OVRD_TCC
+#define GC_CAC_OVRD_TCC__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_TCC__OVRRD_VALUE__SHIFT 0x5
+#define GC_CAC_OVRD_TCC__OVRRD_SELECT_MASK 0x0000001FL
+#define GC_CAC_OVRD_TCC__OVRRD_VALUE_MASK 0x000003E0L
+//GC_CAC_OVRD_TCP
+#define GC_CAC_OVRD_TCP__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_TCP__OVRRD_VALUE__SHIFT 0x5
+#define GC_CAC_OVRD_TCP__OVRRD_SELECT_MASK 0x0000001FL
+#define GC_CAC_OVRD_TCP__OVRRD_VALUE_MASK 0x000003E0L
+//GC_CAC_OVRD_TD
+#define GC_CAC_OVRD_TD__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_TD__OVRRD_VALUE__SHIFT 0x6
+#define GC_CAC_OVRD_TD__OVRRD_SELECT_MASK 0x0000003FL
+#define GC_CAC_OVRD_TD__OVRRD_VALUE_MASK 0x00000FC0L
+//GC_CAC_OVRD_VGT
+#define GC_CAC_OVRD_VGT__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_VGT__OVRRD_VALUE__SHIFT 0x3
+#define GC_CAC_OVRD_VGT__OVRRD_SELECT_MASK 0x00000007L
+#define GC_CAC_OVRD_VGT__OVRRD_VALUE_MASK 0x00000038L
+//GC_CAC_OVRD_WD
+#define GC_CAC_OVRD_WD__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_WD__OVRRD_VALUE__SHIFT 0x1
+#define GC_CAC_OVRD_WD__OVRRD_SELECT_MASK 0x00000001L
+#define GC_CAC_OVRD_WD__OVRRD_VALUE_MASK 0x00000002L
+//GC_CAC_ACC_BCI1
+#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_BCI1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_WEIGHT_UTCL2_ATCL2_2
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ATCL2_2__WEIGHT_UTCL2_ATCL2_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_UTCL2_ROUTER_0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_1
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_2
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_3
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_4
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_1
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_2
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
+//GC_CAC_ACC_UTCL2_ATCL24
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER1
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER2
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER3
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER4
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER5
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER6
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER7
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER8
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER9
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML20
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML21
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML22
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML23
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML24
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_OVRD_UTCL2_ROUTER
+#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE__SHIFT 0xa
+#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_SELECT_MASK 0x000003FFL
+#define GC_CAC_OVRD_UTCL2_ROUTER__OVRRD_VALUE_MASK 0x000FFC00L
+//GC_CAC_OVRD_UTCL2_VML2
+#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE__SHIFT 0x5
+#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_SELECT_MASK 0x0000001FL
+#define GC_CAC_OVRD_UTCL2_VML2__OVRRD_VALUE_MASK 0x000003E0L
+//GC_CAC_WEIGHT_UTCL2_WALKER_0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_1
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_2
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
+//GC_CAC_ACC_UTCL2_WALKER0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER1
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER2
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER3
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER4
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_OVRD_UTCL2_WALKER
+#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE__SHIFT 0x5
+#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_SELECT_MASK 0x0000001FL
+#define GC_CAC_OVRD_UTCL2_WALKER__OVRRD_VALUE_MASK 0x000003E0L
+//PCC_STALL_PATTERN_1_2
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1__SHIFT 0x0
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2__SHIFT 0x10
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_3_4
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3__SHIFT 0x0
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4__SHIFT 0x10
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_5_6
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5__SHIFT 0x0
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6__SHIFT 0x10
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_7
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7__SHIFT 0x0
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7_MASK 0x00007FFFL
+//PCC_THROT_REINCR_FIRST_PATN_1_8
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define PCC_THROT_REINCR_FIRST_PATN_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//PCC_THROT_REINCR_FIRST_PATN_9_16
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define PCC_THROT_REINCR_FIRST_PATN_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//PCC_THROT_REINCR_FIRST_PATN_17_20
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define PCC_THROT_REINCR_FIRST_PATN_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//PCC_THROT_DECR_FIRST_PATN_1_4
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define PCC_THROT_DECR_FIRST_PATN_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//PCC_THROT_DECR_FIRST_PATN_5_7
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define PCC_THROT_DECR_FIRST_PATN_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+
+
+// addressBlock: secacind
+//SE_CAC_CNTL
+#define SE_CAC_CNTL__CAC_FORCE_DISABLE__SHIFT 0x0
+#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x1
+#define SE_CAC_CNTL__CAC_BLOCK_ID__SHIFT 0x11
+#define SE_CAC_CNTL__CAC_SIGNAL_ID__SHIFT 0x17
+#define SE_CAC_CNTL__CAC_FORCE_DISABLE_MASK 0x00000001L
+#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0001FFFEL
+#define SE_CAC_CNTL__CAC_BLOCK_ID_MASK 0x007E0000L
+#define SE_CAC_CNTL__CAC_SIGNAL_ID_MASK 0x7F800000L
+//SE_CAC_OVR_SEL
+#define SE_CAC_OVR_SEL__CAC_OVR_SEL__SHIFT 0x0
+#define SE_CAC_OVR_SEL__CAC_OVR_SEL_MASK 0xFFFFFFFFL
+//SE_CAC_OVR_VAL
+#define SE_CAC_OVR_VAL__CAC_OVR_VAL__SHIFT 0x0
+#define SE_CAC_OVR_VAL__CAC_OVR_VAL_MASK 0xFFFFFFFFL
+
+
+// addressBlock: sqind
+//SQ_WAVE_MODE
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
+#define SQ_WAVE_MODE__DEBUG_EN__SHIFT 0xb
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
+#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
+#define SQ_WAVE_MODE__POPS_PACKER0__SHIFT 0x18
+#define SQ_WAVE_MODE__POPS_PACKER1__SHIFT 0x19
+#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1a
+#define SQ_WAVE_MODE__GPR_IDX_EN__SHIFT 0x1b
+#define SQ_WAVE_MODE__VSKIP__SHIFT 0x1c
+#define SQ_WAVE_MODE__CSP__SHIFT 0x1d
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__DEBUG_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
+#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
+#define SQ_WAVE_MODE__POPS_PACKER0_MASK 0x01000000L
+#define SQ_WAVE_MODE__POPS_PACKER1_MASK 0x02000000L
+#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x04000000L
+#define SQ_WAVE_MODE__GPR_IDX_EN_MASK 0x08000000L
+#define SQ_WAVE_MODE__VSKIP_MASK 0x10000000L
+#define SQ_WAVE_MODE__CSP_MASK 0xE0000000L
+//SQ_WAVE_STATUS
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
+#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
+#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
+#define SQ_WAVE_STATUS__TTRACE_CU_EN__SHIFT 0xf
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
+#define SQ_WAVE_STATUS__ALLOW_REPLAY__SHIFT 0x16
+#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TTRACE_CU_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__ALLOW_REPLAY_MASK 0x00400000L
+#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+//SQ_WAVE_TRAPSTS
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
+#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
+#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE__SHIFT 0x10
+#define SQ_WAVE_TRAPSTS__XNACK_ERROR__SHIFT 0x1c
+#define SQ_WAVE_TRAPSTS__DP_RATE__SHIFT 0x1d
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
+#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
+#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
+#define SQ_WAVE_TRAPSTS__EXCP_CYCLE_MASK 0x003F0000L
+#define SQ_WAVE_TRAPSTS__XNACK_ERROR_MASK 0x10000000L
+#define SQ_WAVE_TRAPSTS__DP_RATE_MASK 0xE0000000L
+//SQ_WAVE_HW_ID
+#define SQ_WAVE_HW_ID__WAVE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID__SIMD_ID__SHIFT 0x4
+#define SQ_WAVE_HW_ID__PIPE_ID__SHIFT 0x6
+#define SQ_WAVE_HW_ID__CU_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID__SH_ID__SHIFT 0xc
+#define SQ_WAVE_HW_ID__SE_ID__SHIFT 0xd
+#define SQ_WAVE_HW_ID__TG_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID__VM_ID__SHIFT 0x14
+#define SQ_WAVE_HW_ID__QUEUE_ID__SHIFT 0x18
+#define SQ_WAVE_HW_ID__STATE_ID__SHIFT 0x1b
+#define SQ_WAVE_HW_ID__ME_ID__SHIFT 0x1e
+#define SQ_WAVE_HW_ID__WAVE_ID_MASK 0x0000000FL
+#define SQ_WAVE_HW_ID__SIMD_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID__PIPE_ID_MASK 0x000000C0L
+#define SQ_WAVE_HW_ID__CU_ID_MASK 0x00000F00L
+#define SQ_WAVE_HW_ID__SH_ID_MASK 0x00001000L
+#define SQ_WAVE_HW_ID__SE_ID_MASK 0x00006000L
+#define SQ_WAVE_HW_ID__TG_ID_MASK 0x000F0000L
+#define SQ_WAVE_HW_ID__VM_ID_MASK 0x00F00000L
+#define SQ_WAVE_HW_ID__QUEUE_ID_MASK 0x07000000L
+#define SQ_WAVE_HW_ID__STATE_ID_MASK 0x38000000L
+#define SQ_WAVE_HW_ID__ME_ID_MASK 0xC0000000L
+//SQ_WAVE_GPR_ALLOC
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0x8
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE__SHIFT 0x10
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE__SHIFT 0x18
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x0000003FL
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x00003F00L
+#define SQ_WAVE_GPR_ALLOC__SGPR_BASE_MASK 0x003F0000L
+#define SQ_WAVE_GPR_ALLOC__SGPR_SIZE_MASK 0x0F000000L
+//SQ_WAVE_LDS_ALLOC
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000000FFL
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
+//SQ_WAVE_IB_STS
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0x0
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x4
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x8
+#define SQ_WAVE_IB_STS__VALU_CNT__SHIFT 0xc
+#define SQ_WAVE_IB_STS__FIRST_REPLAY__SHIFT 0xf
+#define SQ_WAVE_IB_STS__RCNT__SHIFT 0x10
+#define SQ_WAVE_IB_STS__VM_CNT_HI__SHIFT 0x16
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000000FL
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000070L
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x00000F00L
+#define SQ_WAVE_IB_STS__VALU_CNT_MASK 0x00007000L
+#define SQ_WAVE_IB_STS__FIRST_REPLAY_MASK 0x00008000L
+#define SQ_WAVE_IB_STS__RCNT_MASK 0x001F0000L
+#define SQ_WAVE_IB_STS__VM_CNT_HI_MASK 0x00C00000L
+//SQ_WAVE_PC_LO
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_PC_HI
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
+//SQ_WAVE_INST_DW0
+#define SQ_WAVE_INST_DW0__INST_DW0__SHIFT 0x0
+#define SQ_WAVE_INST_DW0__INST_DW0_MASK 0xFFFFFFFFL
+//SQ_WAVE_INST_DW1
+#define SQ_WAVE_INST_DW1__INST_DW1__SHIFT 0x0
+#define SQ_WAVE_INST_DW1__INST_DW1_MASK 0xFFFFFFFFL
+//SQ_WAVE_IB_DBG0
+#define SQ_WAVE_IB_DBG0__IBUF_ST__SHIFT 0x0
+#define SQ_WAVE_IB_DBG0__PC_INVALID__SHIFT 0x3
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW__SHIFT 0x4
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT__SHIFT 0x5
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR__SHIFT 0x8
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR__SHIFT 0xa
+#define SQ_WAVE_IB_DBG0__INST_STR_ST__SHIFT 0x10
+#define SQ_WAVE_IB_DBG0__ECC_ST__SHIFT 0x18
+#define SQ_WAVE_IB_DBG0__IS_HYB__SHIFT 0x1a
+#define SQ_WAVE_IB_DBG0__HYB_CNT__SHIFT 0x1b
+#define SQ_WAVE_IB_DBG0__KILL__SHIFT 0x1d
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH__SHIFT 0x1e
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI__SHIFT 0x1f
+#define SQ_WAVE_IB_DBG0__IBUF_ST_MASK 0x00000007L
+#define SQ_WAVE_IB_DBG0__PC_INVALID_MASK 0x00000008L
+#define SQ_WAVE_IB_DBG0__NEED_NEXT_DW_MASK 0x00000010L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_MASK 0x000000E0L
+#define SQ_WAVE_IB_DBG0__IBUF_RPTR_MASK 0x00000300L
+#define SQ_WAVE_IB_DBG0__IBUF_WPTR_MASK 0x00000C00L
+#define SQ_WAVE_IB_DBG0__INST_STR_ST_MASK 0x000F0000L
+#define SQ_WAVE_IB_DBG0__ECC_ST_MASK 0x03000000L
+#define SQ_WAVE_IB_DBG0__IS_HYB_MASK 0x04000000L
+#define SQ_WAVE_IB_DBG0__HYB_CNT_MASK 0x18000000L
+#define SQ_WAVE_IB_DBG0__KILL_MASK 0x20000000L
+#define SQ_WAVE_IB_DBG0__NEED_KILL_IFETCH_MASK 0x40000000L
+#define SQ_WAVE_IB_DBG0__NO_PREFETCH_CNT_HI_MASK 0x80000000L
+//SQ_WAVE_IB_DBG1
+#define SQ_WAVE_IB_DBG1__IXNACK__SHIFT 0x0
+#define SQ_WAVE_IB_DBG1__XNACK__SHIFT 0x1
+#define SQ_WAVE_IB_DBG1__TA_NEED_RESET__SHIFT 0x2
+#define SQ_WAVE_IB_DBG1__XCNT__SHIFT 0x4
+#define SQ_WAVE_IB_DBG1__QCNT__SHIFT 0xb
+#define SQ_WAVE_IB_DBG1__RCNT__SHIFT 0x12
+#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
+#define SQ_WAVE_IB_DBG1__IXNACK_MASK 0x00000001L
+#define SQ_WAVE_IB_DBG1__XNACK_MASK 0x00000002L
+#define SQ_WAVE_IB_DBG1__TA_NEED_RESET_MASK 0x00000004L
+#define SQ_WAVE_IB_DBG1__XCNT_MASK 0x000001F0L
+#define SQ_WAVE_IB_DBG1__QCNT_MASK 0x0000F800L
+#define SQ_WAVE_IB_DBG1__RCNT_MASK 0x007C0000L
+#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
+//SQ_WAVE_FLUSH_IB
+#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
+#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP0
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP1
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP2
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP3
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP4
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP5
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP6
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP7
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP8
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP9
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP10
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP11
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP12
+#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP13
+#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP14
+#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP15
+#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_M0
+#define SQ_WAVE_M0__M0__SHIFT 0x0
+#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_LO
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_HI
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
+//SQ_INTERRUPT_WORD_AUTO_CTXID
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 0x1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 0x2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 0x3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 0x5
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 0x6
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 0x7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x0000001L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x0000002L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x0000004L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x0000008L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x0000010L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x0000020L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x0000040L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x0000080L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x0000100L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_AUTO_HI
+#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_AUTO_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_AUTO_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_AUTO_LO
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_AUTO_LO__WLT__SHIFT 0x1
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL__SHIFT 0x2
+#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP__SHIFT 0x3
+#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW__SHIFT 0x5
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW__SHIFT 0x6
+#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW__SHIFT 0x7
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_MASK 0x001L
+#define SQ_INTERRUPT_WORD_AUTO_LO__WLT_MASK 0x002L
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_BUF_FULL_MASK 0x004L
+#define SQ_INTERRUPT_WORD_AUTO_LO__REG_TIMESTAMP_MASK 0x008L
+#define SQ_INTERRUPT_WORD_AUTO_LO__CMD_TIMESTAMP_MASK 0x010L
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_CMD_OVERFLOW_MASK 0x020L
+#define SQ_INTERRUPT_WORD_AUTO_LO__HOST_REG_OVERFLOW_MASK 0x040L
+#define SQ_INTERRUPT_WORD_AUTO_LO__IMMED_OVERFLOW_MASK 0x080L
+#define SQ_INTERRUPT_WORD_AUTO_LO__THREAD_TRACE_UTC_ERROR_MASK 0x100L
+//SQ_INTERRUPT_WORD_CMN_CTXID
+#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_CMN_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_CMN_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_CMN_HI
+#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_CMN_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_CMN_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_WAVE_CTXID
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 0xc
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 0xd
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 0xe
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 0x12
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 0x14
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x0000FFFL
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x0001000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x0002000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x003C000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x00C0000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x0F00000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x3000000L
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0xC000000L
+//SQ_INTERRUPT_WORD_WAVE_HI
+#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID__SHIFT 0x4
+#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID__SHIFT 0x8
+#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING__SHIFT 0xa
+#define SQ_INTERRUPT_WORD_WAVE_HI__CU_ID_MASK 0x00FL
+#define SQ_INTERRUPT_WORD_WAVE_HI__VM_ID_MASK 0x0F0L
+#define SQ_INTERRUPT_WORD_WAVE_HI__SE_ID_MASK 0x300L
+#define SQ_INTERRUPT_WORD_WAVE_HI__ENCODING_MASK 0xC00L
+//SQ_INTERRUPT_WORD_WAVE_LO
+#define SQ_INTERRUPT_WORD_WAVE_LO__DATA__SHIFT 0x0
+#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID__SHIFT 0x18
+#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV__SHIFT 0x19
+#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID__SHIFT 0x1a
+#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID__SHIFT 0x1e
+#define SQ_INTERRUPT_WORD_WAVE_LO__DATA_MASK 0x00FFFFFFL
+#define SQ_INTERRUPT_WORD_WAVE_LO__SH_ID_MASK 0x01000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__PRIV_MASK 0x02000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__WAVE_ID_MASK 0x3C000000L
+#define SQ_INTERRUPT_WORD_WAVE_LO__SIMD_ID_MASK 0xC0000000L
+
+
+// addressBlock: didtind
+//DIDT_SQ_CTRL0
+#define DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
+#define DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT 0x1
+#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
+#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
+#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
+#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
+#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
+#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
+#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
+#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
+#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
+#define DIDT_SQ_CTRL0__DIDT_RLC_FORCE_STALL_EN__SHIFT 0x1b
+#define DIDT_SQ_CTRL0__DIDT_RLC_STALL_LEVEL_SEL__SHIFT 0x1c
+#define DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
+#define DIDT_SQ_CTRL0__PHASE_OFFSET_MASK 0x00000006L
+#define DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
+#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
+#define DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
+#define DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
+#define DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
+#define DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
+#define DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
+#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
+#define DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
+#define DIDT_SQ_CTRL0__DIDT_RLC_FORCE_STALL_EN_MASK 0x08000000L
+#define DIDT_SQ_CTRL0__DIDT_RLC_STALL_LEVEL_SEL_MASK 0x10000000L
+//DIDT_SQ_CTRL2
+#define DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
+#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+//DIDT_SQ_STALL_CTRL
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
+#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
+#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
+#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
+#define DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
+//DIDT_SQ_TUNING_CTRL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
+//DIDT_SQ_STALL_AUTO_RELEASE_CTRL
+#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
+#define DIDT_SQ_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
+//DIDT_SQ_CTRL3
+#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
+#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
+#define DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT 0x2
+#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
+#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
+#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
+#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
+#define DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
+#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
+#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
+#define DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
+#define DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
+#define DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
+#define DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
+#define DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
+#define DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
+#define DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
+#define DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
+#define DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
+#define DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
+//DIDT_SQ_STALL_PATTERN_1_2
+#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_SQ_STALL_PATTERN_3_4
+#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_SQ_STALL_PATTERN_5_6
+#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_SQ_STALL_PATTERN_7
+#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_SQ_MPD_SCALE_FACTOR
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1__SHIFT 0x0
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2__SHIFT 0x4
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3__SHIFT 0x8
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4__SHIFT 0xc
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0__SHIFT 0x10
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1__SHIFT 0x14
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2__SHIFT 0x18
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3__SHIFT 0x1c
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1_MASK 0x0000000FL
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2_MASK 0x000000F0L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3_MASK 0x00000F00L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4_MASK 0x0000F000L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0_MASK 0x000F0000L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1_MASK 0x00F00000L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2_MASK 0x0F000000L
+#define DIDT_SQ_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3_MASK 0xF0000000L
+//DIDT_SQ_THROTTLE_CNTL0
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN__SHIFT 0x0
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL__SHIFT 0x1
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI__SHIFT 0x2
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO__SHIFT 0xd
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN_MASK 0x00000001L
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL_MASK 0x00000002L
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI_MASK 0x00001FFCL
+#define DIDT_SQ_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO_MASK 0x00FFE000L
+//DIDT_SQ_THROTTLE_CNTL1
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI__SHIFT 0x0
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI__SHIFT 0x5
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO__SHIFT 0xa
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO__SHIFT 0xf
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI_MASK 0x0000001FL
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI_MASK 0x000003E0L
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO_MASK 0x00007C00L
+#define DIDT_SQ_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO_MASK 0x000F8000L
+//DIDT_SQ_THROTTLE_CNTL_STATUS
+#define DIDT_SQ_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE__SHIFT 0x0
+#define DIDT_SQ_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE_MASK 0x00000003L
+//DIDT_SQ_WEIGHT0_3
+#define DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT 0x0
+#define DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT 0x8
+#define DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT 0x10
+#define DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT 0x18
+#define DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
+#define DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
+#define DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
+#define DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
+//DIDT_SQ_WEIGHT4_7
+#define DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT 0x0
+#define DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT 0x8
+#define DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT 0x10
+#define DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT 0x18
+#define DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
+#define DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
+#define DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
+#define DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
+//DIDT_SQ_WEIGHT8_11
+#define DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT 0x0
+#define DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT 0x8
+#define DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT 0x10
+#define DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
+#define DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
+#define DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
+#define DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
+//DIDT_SQ_EDC_CTRL
+#define DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
+#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
+#define DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
+#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
+#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
+#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_SQ_EDC_CTRL__EDC_LEVEL_MODE_SEL__SHIFT 0x17
+#define DIDT_SQ_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
+#define DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
+#define DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
+#define DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
+#define DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
+#define DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_SQ_EDC_CTRL__EDC_LEVEL_MODE_SEL_MASK 0x00800000L
+//DIDT_SQ_THROTTLE_CTRL
+#define DIDT_SQ_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x1
+#define DIDT_SQ_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000002L
+//DIDT_SQ_EDC_STALL_PATTERN_1_2
+#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_SQ_EDC_STALL_PATTERN_3_4
+#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_SQ_EDC_STALL_PATTERN_5_6
+#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_SQ_EDC_STALL_PATTERN_7
+#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_SQ_EDC_STALL_DELAY_1
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT 0x7
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT 0xe
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT 0x15
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK 0x0000007FL
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK 0x00003F80L
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK 0x001FC000L
+#define DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK 0x0FE00000L
+//DIDT_SQ_EDC_STALL_DELAY_2
+#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT 0x0
+#define DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK 0x0000007FL
+//DIDT_DB_CTRL0
+#define DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
+#define DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT 0x1
+#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
+#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
+#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
+#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
+#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
+#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
+#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
+#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
+#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
+#define DIDT_DB_CTRL0__DIDT_RLC_FORCE_STALL_EN__SHIFT 0x1b
+#define DIDT_DB_CTRL0__DIDT_RLC_STALL_LEVEL_SEL__SHIFT 0x1c
+#define DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
+#define DIDT_DB_CTRL0__PHASE_OFFSET_MASK 0x00000006L
+#define DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
+#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
+#define DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
+#define DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
+#define DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
+#define DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
+#define DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
+#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
+#define DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
+#define DIDT_DB_CTRL0__DIDT_RLC_FORCE_STALL_EN_MASK 0x08000000L
+#define DIDT_DB_CTRL0__DIDT_RLC_STALL_LEVEL_SEL_MASK 0x10000000L
+//DIDT_DB_CTRL2
+#define DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
+#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+//DIDT_DB_STALL_CTRL
+#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
+#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
+#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
+#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
+#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
+#define DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
+#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
+#define DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
+//DIDT_DB_TUNING_CTRL
+#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
+#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
+#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
+#define DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
+//DIDT_DB_STALL_AUTO_RELEASE_CTRL
+#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
+#define DIDT_DB_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
+//DIDT_DB_CTRL3
+#define DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
+#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
+#define DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT 0x2
+#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
+#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
+#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
+#define DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
+#define DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
+#define DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
+#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
+#define DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
+#define DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
+#define DIDT_DB_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
+#define DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
+#define DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
+#define DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
+#define DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
+#define DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
+#define DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
+#define DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
+//DIDT_DB_STALL_PATTERN_1_2
+#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_DB_STALL_PATTERN_3_4
+#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_DB_STALL_PATTERN_5_6
+#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_DB_STALL_PATTERN_7
+#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_DB_MPD_SCALE_FACTOR
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1__SHIFT 0x0
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2__SHIFT 0x4
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3__SHIFT 0x8
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4__SHIFT 0xc
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0__SHIFT 0x10
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1__SHIFT 0x14
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2__SHIFT 0x18
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3__SHIFT 0x1c
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1_MASK 0x0000000FL
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2_MASK 0x000000F0L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3_MASK 0x00000F00L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4_MASK 0x0000F000L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0_MASK 0x000F0000L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1_MASK 0x00F00000L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2_MASK 0x0F000000L
+#define DIDT_DB_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3_MASK 0xF0000000L
+//DIDT_DB_THROTTLE_CNTL0
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN__SHIFT 0x0
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL__SHIFT 0x1
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI__SHIFT 0x2
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO__SHIFT 0xd
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN_MASK 0x00000001L
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL_MASK 0x00000002L
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI_MASK 0x00001FFCL
+#define DIDT_DB_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO_MASK 0x00FFE000L
+//DIDT_DB_THROTTLE_CNTL1
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI__SHIFT 0x0
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI__SHIFT 0x5
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO__SHIFT 0xa
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO__SHIFT 0xf
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI_MASK 0x0000001FL
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI_MASK 0x000003E0L
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO_MASK 0x00007C00L
+#define DIDT_DB_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO_MASK 0x000F8000L
+//DIDT_DB_THROTTLE_CNTL_STATUS
+#define DIDT_DB_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE__SHIFT 0x0
+#define DIDT_DB_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE_MASK 0x00000003L
+//DIDT_DB_WEIGHT0_3
+#define DIDT_DB_WEIGHT0_3__WEIGHT0__SHIFT 0x0
+#define DIDT_DB_WEIGHT0_3__WEIGHT1__SHIFT 0x8
+#define DIDT_DB_WEIGHT0_3__WEIGHT2__SHIFT 0x10
+#define DIDT_DB_WEIGHT0_3__WEIGHT3__SHIFT 0x18
+#define DIDT_DB_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
+#define DIDT_DB_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
+#define DIDT_DB_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
+#define DIDT_DB_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
+//DIDT_DB_WEIGHT4_7
+#define DIDT_DB_WEIGHT4_7__WEIGHT4__SHIFT 0x0
+#define DIDT_DB_WEIGHT4_7__WEIGHT5__SHIFT 0x8
+#define DIDT_DB_WEIGHT4_7__WEIGHT6__SHIFT 0x10
+#define DIDT_DB_WEIGHT4_7__WEIGHT7__SHIFT 0x18
+#define DIDT_DB_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
+#define DIDT_DB_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
+#define DIDT_DB_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
+#define DIDT_DB_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
+//DIDT_DB_WEIGHT8_11
+#define DIDT_DB_WEIGHT8_11__WEIGHT8__SHIFT 0x0
+#define DIDT_DB_WEIGHT8_11__WEIGHT9__SHIFT 0x8
+#define DIDT_DB_WEIGHT8_11__WEIGHT10__SHIFT 0x10
+#define DIDT_DB_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_DB_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
+#define DIDT_DB_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
+#define DIDT_DB_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
+#define DIDT_DB_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
+//DIDT_DB_EDC_CTRL
+#define DIDT_DB_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
+#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
+#define DIDT_DB_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
+#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
+#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
+#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_DB_EDC_CTRL__EDC_LEVEL_MODE_SEL__SHIFT 0x17
+#define DIDT_DB_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_DB_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_DB_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_DB_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_DB_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
+#define DIDT_DB_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
+#define DIDT_DB_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
+#define DIDT_DB_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
+#define DIDT_DB_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
+#define DIDT_DB_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_DB_EDC_CTRL__EDC_LEVEL_MODE_SEL_MASK 0x00800000L
+//DIDT_DB_THROTTLE_CTRL
+#define DIDT_DB_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x1
+#define DIDT_DB_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000002L
+//DIDT_DB_EDC_STALL_PATTERN_1_2
+#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_DB_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_DB_EDC_STALL_PATTERN_3_4
+#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_DB_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_DB_EDC_STALL_PATTERN_5_6
+#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_DB_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_DB_EDC_STALL_PATTERN_7
+#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_DB_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_DB_EDC_STALL_DELAY_1
+#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0__SHIFT 0x0
+#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1__SHIFT 0x5
+#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB0_MASK 0x0000001FL
+#define DIDT_DB_EDC_STALL_DELAY_1__EDC_STALL_DELAY_DB1_MASK 0x000003E0L
+//DIDT_TD_CTRL0
+#define DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
+#define DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT 0x1
+#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
+#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
+#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
+#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
+#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
+#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
+#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
+#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
+#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
+#define DIDT_TD_CTRL0__DIDT_RLC_FORCE_STALL_EN__SHIFT 0x1b
+#define DIDT_TD_CTRL0__DIDT_RLC_STALL_LEVEL_SEL__SHIFT 0x1c
+#define DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
+#define DIDT_TD_CTRL0__PHASE_OFFSET_MASK 0x00000006L
+#define DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
+#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
+#define DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
+#define DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
+#define DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
+#define DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
+#define DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
+#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
+#define DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
+#define DIDT_TD_CTRL0__DIDT_RLC_FORCE_STALL_EN_MASK 0x08000000L
+#define DIDT_TD_CTRL0__DIDT_RLC_STALL_LEVEL_SEL_MASK 0x10000000L
+//DIDT_TD_CTRL2
+#define DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
+#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+//DIDT_TD_STALL_CTRL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
+#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
+#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
+#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
+#define DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
+//DIDT_TD_TUNING_CTRL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
+//DIDT_TD_STALL_AUTO_RELEASE_CTRL
+#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
+#define DIDT_TD_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
+//DIDT_TD_CTRL3
+#define DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
+#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
+#define DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT 0x2
+#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
+#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
+#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
+#define DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
+#define DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
+#define DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
+#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
+#define DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
+#define DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
+#define DIDT_TD_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
+#define DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
+#define DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
+#define DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
+#define DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
+#define DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
+#define DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
+#define DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
+//DIDT_TD_STALL_PATTERN_1_2
+#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_TD_STALL_PATTERN_3_4
+#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_TD_STALL_PATTERN_5_6
+#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_TD_STALL_PATTERN_7
+#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_TD_MPD_SCALE_FACTOR
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1__SHIFT 0x0
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2__SHIFT 0x4
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3__SHIFT 0x8
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4__SHIFT 0xc
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0__SHIFT 0x10
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1__SHIFT 0x14
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2__SHIFT 0x18
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3__SHIFT 0x1c
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1_MASK 0x0000000FL
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2_MASK 0x000000F0L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3_MASK 0x00000F00L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4_MASK 0x0000F000L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0_MASK 0x000F0000L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1_MASK 0x00F00000L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2_MASK 0x0F000000L
+#define DIDT_TD_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3_MASK 0xF0000000L
+//DIDT_TD_THROTTLE_CNTL0
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN__SHIFT 0x0
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL__SHIFT 0x1
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI__SHIFT 0x2
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO__SHIFT 0xd
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN_MASK 0x00000001L
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL_MASK 0x00000002L
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI_MASK 0x00001FFCL
+#define DIDT_TD_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO_MASK 0x00FFE000L
+//DIDT_TD_THROTTLE_CNTL1
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI__SHIFT 0x0
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI__SHIFT 0x5
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO__SHIFT 0xa
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO__SHIFT 0xf
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI_MASK 0x0000001FL
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI_MASK 0x000003E0L
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO_MASK 0x00007C00L
+#define DIDT_TD_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO_MASK 0x000F8000L
+//DIDT_TD_THROTTLE_CNTL_STATUS
+#define DIDT_TD_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE__SHIFT 0x0
+#define DIDT_TD_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE_MASK 0x00000003L
+//DIDT_TD_WEIGHT0_3
+#define DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT 0x0
+#define DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT 0x8
+#define DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT 0x10
+#define DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT 0x18
+#define DIDT_TD_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
+#define DIDT_TD_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
+#define DIDT_TD_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
+#define DIDT_TD_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
+//DIDT_TD_WEIGHT4_7
+#define DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT 0x0
+#define DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT 0x8
+#define DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT 0x10
+#define DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT 0x18
+#define DIDT_TD_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
+#define DIDT_TD_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
+#define DIDT_TD_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
+#define DIDT_TD_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
+//DIDT_TD_WEIGHT8_11
+#define DIDT_TD_WEIGHT8_11__WEIGHT8__SHIFT 0x0
+#define DIDT_TD_WEIGHT8_11__WEIGHT9__SHIFT 0x8
+#define DIDT_TD_WEIGHT8_11__WEIGHT10__SHIFT 0x10
+#define DIDT_TD_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_TD_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
+#define DIDT_TD_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
+#define DIDT_TD_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
+#define DIDT_TD_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
+//DIDT_TD_EDC_CTRL
+#define DIDT_TD_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
+#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
+#define DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
+#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
+#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
+#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_TD_EDC_CTRL__EDC_LEVEL_MODE_SEL__SHIFT 0x17
+#define DIDT_TD_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
+#define DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
+#define DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
+#define DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
+#define DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
+#define DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_TD_EDC_CTRL__EDC_LEVEL_MODE_SEL_MASK 0x00800000L
+//DIDT_TD_THROTTLE_CTRL
+#define DIDT_TD_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x1
+#define DIDT_TD_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000002L
+//DIDT_TD_EDC_STALL_PATTERN_1_2
+#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_TD_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_TD_EDC_STALL_PATTERN_3_4
+#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_TD_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_TD_EDC_STALL_PATTERN_5_6
+#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_TD_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_TD_EDC_STALL_PATTERN_7
+#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_TD_EDC_STALL_DELAY_1
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1__SHIFT 0x7
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2__SHIFT 0xe
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3__SHIFT 0x15
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD0_MASK 0x0000007FL
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD1_MASK 0x00003F80L
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD2_MASK 0x001FC000L
+#define DIDT_TD_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TD3_MASK 0x0FE00000L
+//DIDT_TD_EDC_STALL_DELAY_2
+#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4__SHIFT 0x0
+#define DIDT_TD_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TD4_MASK 0x0000007FL
+//DIDT_TCP_CTRL0
+#define DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT 0x0
+#define DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT 0x1
+#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x3
+#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x4
+#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT 0x5
+#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT 0x6
+#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT 0x7
+#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT 0x8
+#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT 0x18
+#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT 0x19
+#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT 0x1a
+#define DIDT_TCP_CTRL0__DIDT_RLC_FORCE_STALL_EN__SHIFT 0x1b
+#define DIDT_TCP_CTRL0__DIDT_RLC_STALL_LEVEL_SEL__SHIFT 0x1c
+#define DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK 0x00000001L
+#define DIDT_TCP_CTRL0__PHASE_OFFSET_MASK 0x00000006L
+#define DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK 0x00000008L
+#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x00000010L
+#define DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK 0x00000020L
+#define DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK 0x00000040L
+#define DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK 0x00000080L
+#define DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK 0x00FFFF00L
+#define DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK 0x01000000L
+#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK 0x02000000L
+#define DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK 0x04000000L
+#define DIDT_TCP_CTRL0__DIDT_RLC_FORCE_STALL_EN_MASK 0x08000000L
+#define DIDT_TCP_CTRL0__DIDT_RLC_STALL_LEVEL_SEL_MASK 0x10000000L
+//DIDT_TCP_CTRL2
+#define DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT 0x0
+#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT 0x10
+#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT 0x1b
+#define DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK 0x00003FFFL
+#define DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK 0x03FF0000L
+#define DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK 0x78000000L
+//DIDT_TCP_STALL_CTRL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x0
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x6
+#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0xc
+#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x12
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000003FL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00000FC0L
+#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x0003F000L
+#define DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x00FC0000L
+//DIDT_TCP_TUNING_CTRL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x0
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0xe
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00003FFFL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x0FFFC000L
+//DIDT_TCP_STALL_AUTO_RELEASE_CTRL
+#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME__SHIFT 0x0
+#define DIDT_TCP_STALL_AUTO_RELEASE_CTRL__DIDT_STALL_AUTO_RELEASE_TIME_MASK 0x00FFFFFFL
+//DIDT_TCP_CTRL3
+#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT 0x0
+#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT 0x1
+#define DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT 0x2
+#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT 0x9
+#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0xe
+#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT 0x17
+#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT 0x18
+#define DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT 0x19
+#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT 0x1b
+#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT 0x1c
+#define DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK 0x00000002L
+#define DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK 0x0000000CL
+#define DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK 0x00003E00L
+#define DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x003FC000L
+#define DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK 0x00800000L
+#define DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK 0x01000000L
+#define DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK 0x06000000L
+#define DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK 0x08000000L
+#define DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK 0x10000000L
+//DIDT_TCP_STALL_PATTERN_1_2
+#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_TCP_STALL_PATTERN_3_4
+#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_TCP_STALL_PATTERN_5_6
+#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_TCP_STALL_PATTERN_7
+#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_TCP_MPD_SCALE_FACTOR
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1__SHIFT 0x0
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2__SHIFT 0x4
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3__SHIFT 0x8
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4__SHIFT 0xc
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0__SHIFT 0x10
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1__SHIFT 0x14
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2__SHIFT 0x18
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3__SHIFT 0x1c
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL1_MASK 0x0000000FL
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL2_MASK 0x000000F0L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL3_MASK 0x00000F00L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_RATIO_SCALE_LEVEL4_MASK 0x0000F000L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL0_MASK 0x000F0000L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL1_MASK 0x00F00000L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL2_MASK 0x0F000000L
+#define DIDT_TCP_MPD_SCALE_FACTOR__MPD_SCALE_LEVEL3_MASK 0xF0000000L
+//DIDT_TCP_THROTTLE_CNTL0
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN__SHIFT 0x0
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL__SHIFT 0x1
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI__SHIFT 0x2
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO__SHIFT 0xd
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_THROTTLE_CNTL_EN_MASK 0x00000001L
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_STALL_CNTL_SEL_MASK 0x00000002L
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_HI_MASK 0x00001FFCL
+#define DIDT_TCP_THROTTLE_CNTL0__DIDT_RELEASE_DELAY_LO_MASK 0x00FFE000L
+//DIDT_TCP_THROTTLE_CNTL1
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI__SHIFT 0x0
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI__SHIFT 0x5
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO__SHIFT 0xa
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO__SHIFT 0xf
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_HI_MASK 0x0000001FL
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_HI_MASK 0x000003E0L
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_BASE_RELEASE_ALLOWED_LO_MASK 0x00007C00L
+#define DIDT_TCP_THROTTLE_CNTL1__DIDT_INCR_RELEASE_ALLOWED_LO_MASK 0x000F8000L
+//DIDT_TCP_THROTTLE_CNTL_STATUS
+#define DIDT_TCP_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE__SHIFT 0x0
+#define DIDT_TCP_THROTTLE_CNTL_STATUS__DIDT_THROTTLE_CNTL_FSM_STATE_MASK 0x00000003L
+//DIDT_TCP_WEIGHT0_3
+#define DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT 0x0
+#define DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT 0x8
+#define DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT 0x10
+#define DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT 0x18
+#define DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK 0x000000FFL
+#define DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK 0x0000FF00L
+#define DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK 0x00FF0000L
+#define DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK 0xFF000000L
+//DIDT_TCP_WEIGHT4_7
+#define DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT 0x0
+#define DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT 0x8
+#define DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT 0x10
+#define DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT 0x18
+#define DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK 0x000000FFL
+#define DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK 0x0000FF00L
+#define DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK 0x00FF0000L
+#define DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK 0xFF000000L
+//DIDT_TCP_WEIGHT8_11
+#define DIDT_TCP_WEIGHT8_11__WEIGHT8__SHIFT 0x0
+#define DIDT_TCP_WEIGHT8_11__WEIGHT9__SHIFT 0x8
+#define DIDT_TCP_WEIGHT8_11__WEIGHT10__SHIFT 0x10
+#define DIDT_TCP_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_TCP_WEIGHT8_11__WEIGHT8_MASK 0x000000FFL
+#define DIDT_TCP_WEIGHT8_11__WEIGHT9_MASK 0x0000FF00L
+#define DIDT_TCP_WEIGHT8_11__WEIGHT10_MASK 0x00FF0000L
+#define DIDT_TCP_WEIGHT8_11__WEIGHT11_MASK 0xFF000000L
+//DIDT_TCP_EDC_CTRL
+#define DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0x9
+#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0x11
+#define DIDT_TCP_EDC_CTRL__GC_EDC_EN__SHIFT 0x12
+#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT 0x13
+#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT 0x15
+#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT 0x16
+#define DIDT_TCP_EDC_CTRL__EDC_LEVEL_MODE_SEL__SHIFT 0x17
+#define DIDT_TCP_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_TCP_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_TCP_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_TCP_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000001F0L
+#define DIDT_TCP_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x0001FE00L
+#define DIDT_TCP_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00020000L
+#define DIDT_TCP_EDC_CTRL__GC_EDC_EN_MASK 0x00040000L
+#define DIDT_TCP_EDC_CTRL__GC_EDC_STALL_POLICY_MASK 0x00180000L
+#define DIDT_TCP_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK 0x00200000L
+#define DIDT_TCP_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK 0x00400000L
+#define DIDT_TCP_EDC_CTRL__EDC_LEVEL_MODE_SEL_MASK 0x00800000L
+//DIDT_TCP_THROTTLE_CTRL
+#define DIDT_TCP_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x1
+#define DIDT_TCP_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000002L
+//DIDT_TCP_EDC_STALL_PATTERN_1_2
+#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_TCP_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_TCP_EDC_STALL_PATTERN_3_4
+#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_TCP_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_TCP_EDC_STALL_PATTERN_5_6
+#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_TCP_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_TCP_EDC_STALL_PATTERN_7
+#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_TCP_EDC_STALL_DELAY_1
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1__SHIFT 0x7
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2__SHIFT 0xe
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3__SHIFT 0x15
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP0_MASK 0x0000007FL
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP1_MASK 0x00003F80L
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP2_MASK 0x001FC000L
+#define DIDT_TCP_EDC_STALL_DELAY_1__EDC_STALL_DELAY_TCP3_MASK 0x0FE00000L
+//DIDT_TCP_EDC_STALL_DELAY_2
+#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4__SHIFT 0x0
+#define DIDT_TCP_EDC_STALL_DELAY_2__EDC_STALL_DELAY_TCP4_MASK 0x0000007FL
+//DIDT_SQ_STALL_EVENT_COUNTER
+#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
+#define DIDT_SQ_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//DIDT_DB_STALL_EVENT_COUNTER
+#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
+#define DIDT_DB_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//DIDT_TD_STALL_EVENT_COUNTER
+#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
+#define DIDT_TD_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//DIDT_TCP_STALL_EVENT_COUNTER
+#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
+#define DIDT_TCP_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//DIDT_DBR_STALL_EVENT_COUNTER
+#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0
+#define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL
+//DIDT_SQ_CTRL1
+#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
+#define DIDT_SQ_CTRL1__MAX_POWER__SHIFT 0x10
+#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0x0000FFFFL
+#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xFFFF0000L
+//DIDT_SQ_EDC_THRESHOLD
+#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_DB_CTRL1
+#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
+#define DIDT_DB_CTRL1__MAX_POWER__SHIFT 0x10
+#define DIDT_DB_CTRL1__MIN_POWER_MASK 0x0000FFFFL
+#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xFFFF0000L
+//DIDT_DB_EDC_THRESHOLD
+#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_DB_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_TD_CTRL1
+#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
+#define DIDT_TD_CTRL1__MAX_POWER__SHIFT 0x10
+#define DIDT_TD_CTRL1__MIN_POWER_MASK 0x0000FFFFL
+#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xFFFF0000L
+//DIDT_TD_EDC_THRESHOLD
+#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_TD_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_TCP_CTRL1
+#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
+#define DIDT_TCP_CTRL1__MAX_POWER__SHIFT 0x10
+#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0x0000FFFFL
+#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xFFFF0000L
+//DIDT_TCP_EDC_THRESHOLD
+#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_TCP_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h
new file mode 100644
index 0000000..94325fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _hdp_4_0_OFFSET_HEADER
+#define _hdp_4_0_OFFSET_HEADER
+
+
+
+// addressBlock: hdp_hdpdec
+// base address: 0x3c80
+#define mmHDP_MMHUB_TLVL 0x0000
+#define mmHDP_MMHUB_TLVL_BASE_IDX 0
+#define mmHDP_MMHUB_UNITID 0x0001
+#define mmHDP_MMHUB_UNITID_BASE_IDX 0
+#define mmHDP_NONSURFACE_BASE 0x0040
+#define mmHDP_NONSURFACE_BASE_BASE_IDX 0
+#define mmHDP_NONSURFACE_INFO 0x0041
+#define mmHDP_NONSURFACE_INFO_BASE_IDX 0
+#define mmHDP_NONSURFACE_BASE_HI 0x0042
+#define mmHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define mmHDP_NONSURF_FLAGS 0x00c8
+#define mmHDP_NONSURF_FLAGS_BASE_IDX 0
+#define mmHDP_NONSURF_FLAGS_CLR 0x00c9
+#define mmHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define mmHDP_HOST_PATH_CNTL 0x00cc
+#define mmHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define mmHDP_SW_SEMAPHORE 0x00cd
+#define mmHDP_SW_SEMAPHORE_BASE_IDX 0
+#define mmHDP_DEBUG0 0x00ce
+#define mmHDP_DEBUG0_BASE_IDX 0
+#define mmHDP_LAST_SURFACE_HIT 0x00d0
+#define mmHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define mmHDP_READ_CACHE_INVALIDATE 0x00d1
+#define mmHDP_READ_CACHE_INVALIDATE_BASE_IDX 0
+#define mmHDP_OUTSTANDING_REQ 0x00d2
+#define mmHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define mmHDP_MISC_CNTL 0x00d3
+#define mmHDP_MISC_CNTL_BASE_IDX 0
+#define mmHDP_MEM_POWER_LS 0x00d4
+#define mmHDP_MEM_POWER_LS_BASE_IDX 0
+#define mmHDP_MMHUB_CNTL 0x00d5
+#define mmHDP_MMHUB_CNTL_BASE_IDX 0
+#define mmHDP_EDC_CNT 0x00d6
+#define mmHDP_EDC_CNT_BASE_IDX 0
+#define mmHDP_VERSION 0x00d7
+#define mmHDP_VERSION_BASE_IDX 0
+#define mmHDP_CLK_CNTL 0x00d8
+#define mmHDP_CLK_CNTL_BASE_IDX 0
+#define mmHDP_MEMIO_CNTL 0x00f6
+#define mmHDP_MEMIO_CNTL_BASE_IDX 0
+#define mmHDP_MEMIO_ADDR 0x00f7
+#define mmHDP_MEMIO_ADDR_BASE_IDX 0
+#define mmHDP_MEMIO_STATUS 0x00f8
+#define mmHDP_MEMIO_STATUS_BASE_IDX 0
+#define mmHDP_MEMIO_WR_DATA 0x00f9
+#define mmHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define mmHDP_MEMIO_RD_DATA 0x00fa
+#define mmHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define mmHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define mmHDP_XDP_D2H_FLUSH 0x0101
+#define mmHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define mmHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define mmHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_3 0x0103
+#define mmHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_4 0x0104
+#define mmHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_5 0x0105
+#define mmHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_6 0x0106
+#define mmHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_7 0x0107
+#define mmHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_8 0x0108
+#define mmHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_9 0x0109
+#define mmHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_10 0x010a
+#define mmHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_11 0x010b
+#define mmHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_12 0x010c
+#define mmHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_13 0x010d
+#define mmHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_14 0x010e
+#define mmHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_15 0x010f
+#define mmHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_16 0x0110
+#define mmHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_17 0x0111
+#define mmHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_18 0x0112
+#define mmHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_19 0x0113
+#define mmHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_20 0x0114
+#define mmHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_21 0x0115
+#define mmHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_22 0x0116
+#define mmHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_23 0x0117
+#define mmHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_24 0x0118
+#define mmHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_25 0x0119
+#define mmHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_26 0x011a
+#define mmHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_27 0x011b
+#define mmHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_28 0x011c
+#define mmHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_29 0x011d
+#define mmHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_30 0x011e
+#define mmHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_31 0x011f
+#define mmHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_32 0x0120
+#define mmHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_33 0x0121
+#define mmHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define mmHDP_XDP_D2H_RSVD_34 0x0122
+#define mmHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define mmHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define mmHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR_CFG 0x0124
+#define mmHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define mmHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define mmHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define mmHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define mmHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define mmHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define mmHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define mmHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define mmHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define mmHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define mmHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define mmHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define mmHDP_XDP_HDP_MC_CFG 0x012e
+#define mmHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define mmHDP_XDP_HST_CFG 0x012f
+#define mmHDP_XDP_HST_CFG_BASE_IDX 0
+#define mmHDP_XDP_HDP_IPH_CFG 0x0131
+#define mmHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR0 0x0134
+#define mmHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR1 0x0135
+#define mmHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR2 0x0136
+#define mmHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR3 0x0137
+#define mmHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR4 0x0138
+#define mmHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR5 0x0139
+#define mmHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR6 0x013a
+#define mmHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define mmHDP_XDP_P2P_BAR7 0x013b
+#define mmHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define mmHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define mmHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define mmHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define mmHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define mmHDP_XDP_BUSY_STS 0x013e
+#define mmHDP_XDP_BUSY_STS_BASE_IDX 0
+#define mmHDP_XDP_STICKY 0x013f
+#define mmHDP_XDP_STICKY_BASE_IDX 0
+#define mmHDP_XDP_CHKN 0x0140
+#define mmHDP_XDP_CHKN_BASE_IDX 0
+#define mmHDP_XDP_BARS_ADDR_39_36 0x0144
+#define mmHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmHDP_XDP_MMHUB_ERROR 0x0149
+#define mmHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h
new file mode 100644
index 0000000..25e2869
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _hdp_4_0_SH_MASK_HEADER
+#define _hdp_4_0_SH_MASK_HEADER
+
+
+// addressBlock: hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x00000007L
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x00000070L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000700L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x00007000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x00070000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS__SHIFT 0x1e
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS__SHIFT 0x1f
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS_MASK 0x40000000L
+#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS_MASK 0x80000000L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_READ_CACHE_INVALIDATE
+#define HDP_READ_CACHE_INVALIDATE__READ_CACHE_INVALIDATE__SHIFT 0x0
+#define HDP_READ_CACHE_INVALIDATE__READ_CACHE_INVALIDATE_MASK 0x00000001L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE__SHIFT 0x0
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__MULTIPLE_READS__SHIFT 0x6
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__ALL_FUNCTION_CACHELINE_INVALID__SHIFT 0x19
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1a
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1b
+#define HDP_MISC_CNTL__VARIABLE_CACHELINE_SIZE__SHIFT 0x1c
+#define HDP_MISC_CNTL__ADAPTIVE_CACHELINE_SIZE__SHIFT 0x1d
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK 0x00000001L
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__MULTIPLE_READS_MASK 0x00000040L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__ALL_FUNCTION_CACHELINE_INVALID_MASK 0x02000000L
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x04000000L
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x08000000L
+#define HDP_MISC_CNTL__VARIABLE_CACHELINE_SIZE_MASK 0x10000000L
+#define HDP_MISC_CNTL__ADAPTIVE_CACHELINE_SIZE_MASK 0x20000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_LS
+#define HDP_MEM_POWER_LS__LS_ENABLE__SHIFT 0x0
+#define HDP_MEM_POWER_LS__LS_HOLD__SHIFT 0x7
+#define HDP_MEM_POWER_LS__LS_ENABLE_MASK 0x00000001L
+#define HDP_MEM_POWER_LS__LS_HOLD_MASK 0x00001F80L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+//HDP_EDC_CNT
+#define HDP_EDC_CNT__MEM0_SED_COUNT__SHIFT 0x0
+#define HDP_EDC_CNT__MEM1_SED_COUNT__SHIFT 0x2
+#define HDP_EDC_CNT__MEM0_SED_COUNT_MASK 0x00000003L
+#define HDP_EDC_CNT__MEM1_SED_COUNT_MASK 0x0000000CL
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK__SHIFT 0x4
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK_MASK 0x00000010L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x6
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003FL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000FC0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x0003FFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_default.h
index 02989fe..02989fe 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 352ffae7..352ffae7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
index 34278ef..34278ef 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_offset.h
index 4b6fc72..4b6fc72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_sh_mask.h
index 8effec7..8effec7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_offset.h
new file mode 100644
index 0000000..904ae53
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_offset.h
@@ -0,0 +1,1991 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_3_0_OFFSET_HEADER
+#define _mmhub_9_3_0_OFFSET_HEADER
+
+
+
+// addressBlock: mmhub_dagbdec
+// base address: 0x68000
+#define mmDAGB0_RDCLI0 0x0000
+#define mmDAGB0_RDCLI0_BASE_IDX 0
+#define mmDAGB0_RDCLI1 0x0001
+#define mmDAGB0_RDCLI1_BASE_IDX 0
+#define mmDAGB0_RDCLI2 0x0002
+#define mmDAGB0_RDCLI2_BASE_IDX 0
+#define mmDAGB0_RDCLI3 0x0003
+#define mmDAGB0_RDCLI3_BASE_IDX 0
+#define mmDAGB0_RDCLI4 0x0004
+#define mmDAGB0_RDCLI4_BASE_IDX 0
+#define mmDAGB0_RDCLI5 0x0005
+#define mmDAGB0_RDCLI5_BASE_IDX 0
+#define mmDAGB0_RDCLI6 0x0006
+#define mmDAGB0_RDCLI6_BASE_IDX 0
+#define mmDAGB0_RDCLI7 0x0007
+#define mmDAGB0_RDCLI7_BASE_IDX 0
+#define mmDAGB0_RDCLI8 0x0008
+#define mmDAGB0_RDCLI8_BASE_IDX 0
+#define mmDAGB0_RDCLI9 0x0009
+#define mmDAGB0_RDCLI9_BASE_IDX 0
+#define mmDAGB0_RDCLI10 0x000a
+#define mmDAGB0_RDCLI10_BASE_IDX 0
+#define mmDAGB0_RDCLI11 0x000b
+#define mmDAGB0_RDCLI11_BASE_IDX 0
+#define mmDAGB0_RDCLI12 0x000c
+#define mmDAGB0_RDCLI12_BASE_IDX 0
+#define mmDAGB0_RDCLI13 0x000d
+#define mmDAGB0_RDCLI13_BASE_IDX 0
+#define mmDAGB0_RDCLI14 0x000e
+#define mmDAGB0_RDCLI14_BASE_IDX 0
+#define mmDAGB0_RDCLI15 0x000f
+#define mmDAGB0_RDCLI15_BASE_IDX 0
+#define mmDAGB0_RD_CNTL 0x0010
+#define mmDAGB0_RD_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_GMI_CNTL 0x0011
+#define mmDAGB0_RD_GMI_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_ADDR_DAGB 0x0012
+#define mmDAGB0_RD_ADDR_DAGB_BASE_IDX 0
+#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST 0x0013
+#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER 0x0014
+#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define mmDAGB0_RD_CGTT_CLK_CTRL 0x0015
+#define mmDAGB0_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL 0x0016
+#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL 0x0017
+#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0 0x0018
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0 0x0019
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1 0x001a
+#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1 0x001b
+#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB0_RD_VC0_CNTL 0x001c
+#define mmDAGB0_RD_VC0_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC1_CNTL 0x001d
+#define mmDAGB0_RD_VC1_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC2_CNTL 0x001e
+#define mmDAGB0_RD_VC2_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC3_CNTL 0x001f
+#define mmDAGB0_RD_VC3_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC4_CNTL 0x0020
+#define mmDAGB0_RD_VC4_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC5_CNTL 0x0021
+#define mmDAGB0_RD_VC5_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC6_CNTL 0x0022
+#define mmDAGB0_RD_VC6_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_VC7_CNTL 0x0023
+#define mmDAGB0_RD_VC7_CNTL_BASE_IDX 0
+#define mmDAGB0_RD_CNTL_MISC 0x0024
+#define mmDAGB0_RD_CNTL_MISC_BASE_IDX 0
+#define mmDAGB0_RD_TLB_CREDIT 0x0025
+#define mmDAGB0_RD_TLB_CREDIT_BASE_IDX 0
+#define mmDAGB0_RDCLI_ASK_PENDING 0x0026
+#define mmDAGB0_RDCLI_ASK_PENDING_BASE_IDX 0
+#define mmDAGB0_RDCLI_GO_PENDING 0x0027
+#define mmDAGB0_RDCLI_GO_PENDING_BASE_IDX 0
+#define mmDAGB0_RDCLI_GBLSEND_PENDING 0x0028
+#define mmDAGB0_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define mmDAGB0_RDCLI_TLB_PENDING 0x0029
+#define mmDAGB0_RDCLI_TLB_PENDING_BASE_IDX 0
+#define mmDAGB0_RDCLI_OARB_PENDING 0x002a
+#define mmDAGB0_RDCLI_OARB_PENDING_BASE_IDX 0
+#define mmDAGB0_RDCLI_OSD_PENDING 0x002b
+#define mmDAGB0_RDCLI_OSD_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI0 0x002c
+#define mmDAGB0_WRCLI0_BASE_IDX 0
+#define mmDAGB0_WRCLI1 0x002d
+#define mmDAGB0_WRCLI1_BASE_IDX 0
+#define mmDAGB0_WRCLI2 0x002e
+#define mmDAGB0_WRCLI2_BASE_IDX 0
+#define mmDAGB0_WRCLI3 0x002f
+#define mmDAGB0_WRCLI3_BASE_IDX 0
+#define mmDAGB0_WRCLI4 0x0030
+#define mmDAGB0_WRCLI4_BASE_IDX 0
+#define mmDAGB0_WRCLI5 0x0031
+#define mmDAGB0_WRCLI5_BASE_IDX 0
+#define mmDAGB0_WRCLI6 0x0032
+#define mmDAGB0_WRCLI6_BASE_IDX 0
+#define mmDAGB0_WRCLI7 0x0033
+#define mmDAGB0_WRCLI7_BASE_IDX 0
+#define mmDAGB0_WRCLI8 0x0034
+#define mmDAGB0_WRCLI8_BASE_IDX 0
+#define mmDAGB0_WRCLI9 0x0035
+#define mmDAGB0_WRCLI9_BASE_IDX 0
+#define mmDAGB0_WRCLI10 0x0036
+#define mmDAGB0_WRCLI10_BASE_IDX 0
+#define mmDAGB0_WRCLI11 0x0037
+#define mmDAGB0_WRCLI11_BASE_IDX 0
+#define mmDAGB0_WRCLI12 0x0038
+#define mmDAGB0_WRCLI12_BASE_IDX 0
+#define mmDAGB0_WRCLI13 0x0039
+#define mmDAGB0_WRCLI13_BASE_IDX 0
+#define mmDAGB0_WRCLI14 0x003a
+#define mmDAGB0_WRCLI14_BASE_IDX 0
+#define mmDAGB0_WRCLI15 0x003b
+#define mmDAGB0_WRCLI15_BASE_IDX 0
+#define mmDAGB0_WR_CNTL 0x003c
+#define mmDAGB0_WR_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_GMI_CNTL 0x003d
+#define mmDAGB0_WR_GMI_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_ADDR_DAGB 0x003e
+#define mmDAGB0_WR_ADDR_DAGB_BASE_IDX 0
+#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST 0x003f
+#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER 0x0040
+#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define mmDAGB0_WR_CGTT_CLK_CTRL 0x0041
+#define mmDAGB0_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL 0x0042
+#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL 0x0043
+#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0 0x0044
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0 0x0045
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1 0x0046
+#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1 0x0047
+#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB0_WR_DATA_DAGB 0x0048
+#define mmDAGB0_WR_DATA_DAGB_BASE_IDX 0
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0 0x0049
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0 0x004a
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1 0x004b
+#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1 0x004c
+#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB0_WR_VC0_CNTL 0x004d
+#define mmDAGB0_WR_VC0_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC1_CNTL 0x004e
+#define mmDAGB0_WR_VC1_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC2_CNTL 0x004f
+#define mmDAGB0_WR_VC2_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC3_CNTL 0x0050
+#define mmDAGB0_WR_VC3_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC4_CNTL 0x0051
+#define mmDAGB0_WR_VC4_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC5_CNTL 0x0052
+#define mmDAGB0_WR_VC5_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC6_CNTL 0x0053
+#define mmDAGB0_WR_VC6_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_VC7_CNTL 0x0054
+#define mmDAGB0_WR_VC7_CNTL_BASE_IDX 0
+#define mmDAGB0_WR_CNTL_MISC 0x0055
+#define mmDAGB0_WR_CNTL_MISC_BASE_IDX 0
+#define mmDAGB0_WR_TLB_CREDIT 0x0056
+#define mmDAGB0_WR_TLB_CREDIT_BASE_IDX 0
+#define mmDAGB0_WR_DATA_CREDIT 0x0057
+#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 0
+#define mmDAGB0_WR_MISC_CREDIT 0x0058
+#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 0
+#define mmDAGB0_WRCLI_ASK_PENDING 0x0059
+#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_GO_PENDING 0x005a
+#define mmDAGB0_WRCLI_GO_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_GBLSEND_PENDING 0x005b
+#define mmDAGB0_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_TLB_PENDING 0x005c
+#define mmDAGB0_WRCLI_TLB_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_OARB_PENDING 0x005d
+#define mmDAGB0_WRCLI_OARB_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_OSD_PENDING 0x005e
+#define mmDAGB0_WRCLI_OSD_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_DBUS_ASK_PENDING 0x005f
+#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define mmDAGB0_WRCLI_DBUS_GO_PENDING 0x0060
+#define mmDAGB0_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define mmDAGB0_DAGB_DLY 0x0061
+#define mmDAGB0_DAGB_DLY_BASE_IDX 0
+#define mmDAGB0_CNTL_MISC 0x0062
+#define mmDAGB0_CNTL_MISC_BASE_IDX 0
+#define mmDAGB0_CNTL_MISC2 0x0063
+#define mmDAGB0_CNTL_MISC2_BASE_IDX 0
+#define mmDAGB0_FIFO_EMPTY 0x0064
+#define mmDAGB0_FIFO_EMPTY_BASE_IDX 0
+#define mmDAGB0_FIFO_FULL 0x0065
+#define mmDAGB0_FIFO_FULL_BASE_IDX 0
+#define mmDAGB0_WR_CREDITS_FULL 0x0066
+#define mmDAGB0_WR_CREDITS_FULL_BASE_IDX 0
+#define mmDAGB0_RD_CREDITS_FULL 0x0067
+#define mmDAGB0_RD_CREDITS_FULL_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER_LO 0x0068
+#define mmDAGB0_PERFCOUNTER_LO_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER_HI 0x0069
+#define mmDAGB0_PERFCOUNTER_HI_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER0_CFG 0x006a
+#define mmDAGB0_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER1_CFG 0x006b
+#define mmDAGB0_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER2_CFG 0x006c
+#define mmDAGB0_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmDAGB0_PERFCOUNTER_RSLT_CNTL 0x006d
+#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmDAGB0_RESERVE0 0x006e
+#define mmDAGB0_RESERVE0_BASE_IDX 0
+#define mmDAGB0_RESERVE1 0x006f
+#define mmDAGB0_RESERVE1_BASE_IDX 0
+#define mmDAGB0_RESERVE2 0x0070
+#define mmDAGB0_RESERVE2_BASE_IDX 0
+#define mmDAGB0_RESERVE3 0x0071
+#define mmDAGB0_RESERVE3_BASE_IDX 0
+#define mmDAGB0_RESERVE4 0x0072
+#define mmDAGB0_RESERVE4_BASE_IDX 0
+#define mmDAGB0_RESERVE5 0x0073
+#define mmDAGB0_RESERVE5_BASE_IDX 0
+#define mmDAGB0_RESERVE6 0x0074
+#define mmDAGB0_RESERVE6_BASE_IDX 0
+#define mmDAGB0_RESERVE7 0x0075
+#define mmDAGB0_RESERVE7_BASE_IDX 0
+#define mmDAGB0_RESERVE8 0x0076
+#define mmDAGB0_RESERVE8_BASE_IDX 0
+#define mmDAGB0_RESERVE9 0x0077
+#define mmDAGB0_RESERVE9_BASE_IDX 0
+#define mmDAGB0_RESERVE10 0x0078
+#define mmDAGB0_RESERVE10_BASE_IDX 0
+#define mmDAGB0_RESERVE11 0x0079
+#define mmDAGB0_RESERVE11_BASE_IDX 0
+#define mmDAGB0_RESERVE12 0x007a
+#define mmDAGB0_RESERVE12_BASE_IDX 0
+#define mmDAGB0_RESERVE13 0x007b
+#define mmDAGB0_RESERVE13_BASE_IDX 0
+#define mmDAGB0_RESERVE14 0x007c
+#define mmDAGB0_RESERVE14_BASE_IDX 0
+#define mmDAGB0_RESERVE15 0x007d
+#define mmDAGB0_RESERVE15_BASE_IDX 0
+#define mmDAGB0_RESERVE16 0x007e
+#define mmDAGB0_RESERVE16_BASE_IDX 0
+#define mmDAGB0_RESERVE17 0x007f
+#define mmDAGB0_RESERVE17_BASE_IDX 0
+#define mmDAGB1_RDCLI0 0x0080
+#define mmDAGB1_RDCLI0_BASE_IDX 0
+#define mmDAGB1_RDCLI1 0x0081
+#define mmDAGB1_RDCLI1_BASE_IDX 0
+#define mmDAGB1_RDCLI2 0x0082
+#define mmDAGB1_RDCLI2_BASE_IDX 0
+#define mmDAGB1_RDCLI3 0x0083
+#define mmDAGB1_RDCLI3_BASE_IDX 0
+#define mmDAGB1_RDCLI4 0x0084
+#define mmDAGB1_RDCLI4_BASE_IDX 0
+#define mmDAGB1_RDCLI5 0x0085
+#define mmDAGB1_RDCLI5_BASE_IDX 0
+#define mmDAGB1_RDCLI6 0x0086
+#define mmDAGB1_RDCLI6_BASE_IDX 0
+#define mmDAGB1_RDCLI7 0x0087
+#define mmDAGB1_RDCLI7_BASE_IDX 0
+#define mmDAGB1_RDCLI8 0x0088
+#define mmDAGB1_RDCLI8_BASE_IDX 0
+#define mmDAGB1_RDCLI9 0x0089
+#define mmDAGB1_RDCLI9_BASE_IDX 0
+#define mmDAGB1_RDCLI10 0x008a
+#define mmDAGB1_RDCLI10_BASE_IDX 0
+#define mmDAGB1_RDCLI11 0x008b
+#define mmDAGB1_RDCLI11_BASE_IDX 0
+#define mmDAGB1_RDCLI12 0x008c
+#define mmDAGB1_RDCLI12_BASE_IDX 0
+#define mmDAGB1_RDCLI13 0x008d
+#define mmDAGB1_RDCLI13_BASE_IDX 0
+#define mmDAGB1_RDCLI14 0x008e
+#define mmDAGB1_RDCLI14_BASE_IDX 0
+#define mmDAGB1_RDCLI15 0x008f
+#define mmDAGB1_RDCLI15_BASE_IDX 0
+#define mmDAGB1_RD_CNTL 0x0090
+#define mmDAGB1_RD_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_GMI_CNTL 0x0091
+#define mmDAGB1_RD_GMI_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_ADDR_DAGB 0x0092
+#define mmDAGB1_RD_ADDR_DAGB_BASE_IDX 0
+#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST 0x0093
+#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER 0x0094
+#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define mmDAGB1_RD_CGTT_CLK_CTRL 0x0095
+#define mmDAGB1_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL 0x0096
+#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL 0x0097
+#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0 0x0098
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0 0x0099
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1 0x009a
+#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1 0x009b
+#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB1_RD_VC0_CNTL 0x009c
+#define mmDAGB1_RD_VC0_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC1_CNTL 0x009d
+#define mmDAGB1_RD_VC1_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC2_CNTL 0x009e
+#define mmDAGB1_RD_VC2_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC3_CNTL 0x009f
+#define mmDAGB1_RD_VC3_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC4_CNTL 0x00a0
+#define mmDAGB1_RD_VC4_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC5_CNTL 0x00a1
+#define mmDAGB1_RD_VC5_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC6_CNTL 0x00a2
+#define mmDAGB1_RD_VC6_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_VC7_CNTL 0x00a3
+#define mmDAGB1_RD_VC7_CNTL_BASE_IDX 0
+#define mmDAGB1_RD_CNTL_MISC 0x00a4
+#define mmDAGB1_RD_CNTL_MISC_BASE_IDX 0
+#define mmDAGB1_RD_TLB_CREDIT 0x00a5
+#define mmDAGB1_RD_TLB_CREDIT_BASE_IDX 0
+#define mmDAGB1_RDCLI_ASK_PENDING 0x00a6
+#define mmDAGB1_RDCLI_ASK_PENDING_BASE_IDX 0
+#define mmDAGB1_RDCLI_GO_PENDING 0x00a7
+#define mmDAGB1_RDCLI_GO_PENDING_BASE_IDX 0
+#define mmDAGB1_RDCLI_GBLSEND_PENDING 0x00a8
+#define mmDAGB1_RDCLI_GBLSEND_PENDING_BASE_IDX 0
+#define mmDAGB1_RDCLI_TLB_PENDING 0x00a9
+#define mmDAGB1_RDCLI_TLB_PENDING_BASE_IDX 0
+#define mmDAGB1_RDCLI_OARB_PENDING 0x00aa
+#define mmDAGB1_RDCLI_OARB_PENDING_BASE_IDX 0
+#define mmDAGB1_RDCLI_OSD_PENDING 0x00ab
+#define mmDAGB1_RDCLI_OSD_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI0 0x00ac
+#define mmDAGB1_WRCLI0_BASE_IDX 0
+#define mmDAGB1_WRCLI1 0x00ad
+#define mmDAGB1_WRCLI1_BASE_IDX 0
+#define mmDAGB1_WRCLI2 0x00ae
+#define mmDAGB1_WRCLI2_BASE_IDX 0
+#define mmDAGB1_WRCLI3 0x00af
+#define mmDAGB1_WRCLI3_BASE_IDX 0
+#define mmDAGB1_WRCLI4 0x00b0
+#define mmDAGB1_WRCLI4_BASE_IDX 0
+#define mmDAGB1_WRCLI5 0x00b1
+#define mmDAGB1_WRCLI5_BASE_IDX 0
+#define mmDAGB1_WRCLI6 0x00b2
+#define mmDAGB1_WRCLI6_BASE_IDX 0
+#define mmDAGB1_WRCLI7 0x00b3
+#define mmDAGB1_WRCLI7_BASE_IDX 0
+#define mmDAGB1_WRCLI8 0x00b4
+#define mmDAGB1_WRCLI8_BASE_IDX 0
+#define mmDAGB1_WRCLI9 0x00b5
+#define mmDAGB1_WRCLI9_BASE_IDX 0
+#define mmDAGB1_WRCLI10 0x00b6
+#define mmDAGB1_WRCLI10_BASE_IDX 0
+#define mmDAGB1_WRCLI11 0x00b7
+#define mmDAGB1_WRCLI11_BASE_IDX 0
+#define mmDAGB1_WRCLI12 0x00b8
+#define mmDAGB1_WRCLI12_BASE_IDX 0
+#define mmDAGB1_WRCLI13 0x00b9
+#define mmDAGB1_WRCLI13_BASE_IDX 0
+#define mmDAGB1_WRCLI14 0x00ba
+#define mmDAGB1_WRCLI14_BASE_IDX 0
+#define mmDAGB1_WRCLI15 0x00bb
+#define mmDAGB1_WRCLI15_BASE_IDX 0
+#define mmDAGB1_WR_CNTL 0x00bc
+#define mmDAGB1_WR_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_GMI_CNTL 0x00bd
+#define mmDAGB1_WR_GMI_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_ADDR_DAGB 0x00be
+#define mmDAGB1_WR_ADDR_DAGB_BASE_IDX 0
+#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST 0x00bf
+#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 0
+#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER 0x00c0
+#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 0
+#define mmDAGB1_WR_CGTT_CLK_CTRL 0x00c1
+#define mmDAGB1_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL 0x00c2
+#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL 0x00c3
+#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0 0x00c4
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0 0x00c5
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1 0x00c6
+#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1 0x00c7
+#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB1_WR_DATA_DAGB 0x00c8
+#define mmDAGB1_WR_DATA_DAGB_BASE_IDX 0
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0 0x00c9
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 0
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0 0x00ca
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 0
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1 0x00cb
+#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 0
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1 0x00cc
+#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 0
+#define mmDAGB1_WR_VC0_CNTL 0x00cd
+#define mmDAGB1_WR_VC0_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC1_CNTL 0x00ce
+#define mmDAGB1_WR_VC1_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC2_CNTL 0x00cf
+#define mmDAGB1_WR_VC2_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC3_CNTL 0x00d0
+#define mmDAGB1_WR_VC3_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC4_CNTL 0x00d1
+#define mmDAGB1_WR_VC4_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC5_CNTL 0x00d2
+#define mmDAGB1_WR_VC5_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC6_CNTL 0x00d3
+#define mmDAGB1_WR_VC6_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_VC7_CNTL 0x00d4
+#define mmDAGB1_WR_VC7_CNTL_BASE_IDX 0
+#define mmDAGB1_WR_CNTL_MISC 0x00d5
+#define mmDAGB1_WR_CNTL_MISC_BASE_IDX 0
+#define mmDAGB1_WR_TLB_CREDIT 0x00d6
+#define mmDAGB1_WR_TLB_CREDIT_BASE_IDX 0
+#define mmDAGB1_WR_DATA_CREDIT 0x00d7
+#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 0
+#define mmDAGB1_WR_MISC_CREDIT 0x00d8
+#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 0
+#define mmDAGB1_WRCLI_ASK_PENDING 0x00d9
+#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_GO_PENDING 0x00da
+#define mmDAGB1_WRCLI_GO_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_GBLSEND_PENDING 0x00db
+#define mmDAGB1_WRCLI_GBLSEND_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_TLB_PENDING 0x00dc
+#define mmDAGB1_WRCLI_TLB_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_OARB_PENDING 0x00dd
+#define mmDAGB1_WRCLI_OARB_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_OSD_PENDING 0x00de
+#define mmDAGB1_WRCLI_OSD_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_DBUS_ASK_PENDING 0x00df
+#define mmDAGB1_WRCLI_DBUS_ASK_PENDING_BASE_IDX 0
+#define mmDAGB1_WRCLI_DBUS_GO_PENDING 0x00e0
+#define mmDAGB1_WRCLI_DBUS_GO_PENDING_BASE_IDX 0
+#define mmDAGB1_DAGB_DLY 0x00e1
+#define mmDAGB1_DAGB_DLY_BASE_IDX 0
+#define mmDAGB1_CNTL_MISC 0x00e2
+#define mmDAGB1_CNTL_MISC_BASE_IDX 0
+#define mmDAGB1_CNTL_MISC2 0x00e3
+#define mmDAGB1_CNTL_MISC2_BASE_IDX 0
+#define mmDAGB1_FIFO_EMPTY 0x00e4
+#define mmDAGB1_FIFO_EMPTY_BASE_IDX 0
+#define mmDAGB1_FIFO_FULL 0x00e5
+#define mmDAGB1_FIFO_FULL_BASE_IDX 0
+#define mmDAGB1_WR_CREDITS_FULL 0x00e6
+#define mmDAGB1_WR_CREDITS_FULL_BASE_IDX 0
+#define mmDAGB1_RD_CREDITS_FULL 0x00e7
+#define mmDAGB1_RD_CREDITS_FULL_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER_LO 0x00e8
+#define mmDAGB1_PERFCOUNTER_LO_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER_HI 0x00e9
+#define mmDAGB1_PERFCOUNTER_HI_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER0_CFG 0x00ea
+#define mmDAGB1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER1_CFG 0x00eb
+#define mmDAGB1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER2_CFG 0x00ec
+#define mmDAGB1_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmDAGB1_PERFCOUNTER_RSLT_CNTL 0x00ed
+#define mmDAGB1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmDAGB1_RESERVE0 0x00ee
+#define mmDAGB1_RESERVE0_BASE_IDX 0
+#define mmDAGB1_RESERVE1 0x00ef
+#define mmDAGB1_RESERVE1_BASE_IDX 0
+#define mmDAGB1_RESERVE2 0x00f0
+#define mmDAGB1_RESERVE2_BASE_IDX 0
+#define mmDAGB1_RESERVE3 0x00f1
+#define mmDAGB1_RESERVE3_BASE_IDX 0
+#define mmDAGB1_RESERVE4 0x00f2
+#define mmDAGB1_RESERVE4_BASE_IDX 0
+#define mmDAGB1_RESERVE5 0x00f3
+#define mmDAGB1_RESERVE5_BASE_IDX 0
+#define mmDAGB1_RESERVE6 0x00f4
+#define mmDAGB1_RESERVE6_BASE_IDX 0
+#define mmDAGB1_RESERVE7 0x00f5
+#define mmDAGB1_RESERVE7_BASE_IDX 0
+#define mmDAGB1_RESERVE8 0x00f6
+#define mmDAGB1_RESERVE8_BASE_IDX 0
+#define mmDAGB1_RESERVE9 0x00f7
+#define mmDAGB1_RESERVE9_BASE_IDX 0
+#define mmDAGB1_RESERVE10 0x00f8
+#define mmDAGB1_RESERVE10_BASE_IDX 0
+#define mmDAGB1_RESERVE11 0x00f9
+#define mmDAGB1_RESERVE11_BASE_IDX 0
+#define mmDAGB1_RESERVE12 0x00fa
+#define mmDAGB1_RESERVE12_BASE_IDX 0
+#define mmDAGB1_RESERVE13 0x00fb
+#define mmDAGB1_RESERVE13_BASE_IDX 0
+#define mmDAGB1_RESERVE14 0x00fc
+#define mmDAGB1_RESERVE14_BASE_IDX 0
+#define mmDAGB1_RESERVE15 0x00fd
+#define mmDAGB1_RESERVE15_BASE_IDX 0
+#define mmDAGB1_RESERVE16 0x00fe
+#define mmDAGB1_RESERVE16_BASE_IDX 0
+#define mmDAGB1_RESERVE17 0x00ff
+#define mmDAGB1_RESERVE17_BASE_IDX 0
+
+
+// addressBlock: mmhub_ea_mmeadec
+// base address: 0x68400
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0 0x0100
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1 0x0101
+#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0 0x0102
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1 0x0103
+#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_GRP2VC_MAP 0x0104
+#define mmMMEA0_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_GRP2VC_MAP 0x0105
+#define mmMMEA0_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_LAZY 0x0106
+#define mmMMEA0_DRAM_RD_LAZY_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_LAZY 0x0107
+#define mmMMEA0_DRAM_WR_LAZY_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_CAM_CNTL 0x0108
+#define mmMMEA0_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_CAM_CNTL 0x0109
+#define mmMMEA0_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define mmMMEA0_DRAM_PAGE_BURST 0x010a
+#define mmMMEA0_DRAM_PAGE_BURST_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_AGE 0x010b
+#define mmMMEA0_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_AGE 0x010c
+#define mmMMEA0_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_QUEUING 0x010d
+#define mmMMEA0_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_QUEUING 0x010e
+#define mmMMEA0_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_FIXED 0x010f
+#define mmMMEA0_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_FIXED 0x0110
+#define mmMMEA0_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_URGENCY 0x0111
+#define mmMMEA0_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_URGENCY 0x0112
+#define mmMMEA0_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1 0x0113
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2 0x0114
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3 0x0115
+#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1 0x0116
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2 0x0117
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3 0x0118
+#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA0_ADDRNORM_BASE_ADDR0 0x0134
+#define mmMMEA0_ADDRNORM_BASE_ADDR0_BASE_IDX 0
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR0 0x0135
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_BASE_IDX 0
+#define mmMMEA0_ADDRNORM_BASE_ADDR1 0x0136
+#define mmMMEA0_ADDRNORM_BASE_ADDR1_BASE_IDX 0
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR1 0x0137
+#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_BASE_IDX 0
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR1 0x0138
+#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_BASE_IDX 0
+#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL 0x0143
+#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 0
+#define mmMMEA0_ADDRNORMDRAM_TRICHANNEL_CFG 0x0145
+#define mmMMEA0_ADDRNORMDRAM_TRICHANNEL_CFG_BASE_IDX 0
+#define mmMMEA0_ADDRDEC_BANK_CFG 0x0147
+#define mmMMEA0_ADDRDEC_BANK_CFG_BASE_IDX 0
+#define mmMMEA0_ADDRDEC_MISC_CFG 0x0148
+#define mmMMEA0_ADDRDEC_MISC_CFG_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0 0x0149
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1 0x014a
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2 0x014b
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3 0x014c
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4 0x014d
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC 0x014e
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2 0x014f
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0 0x0150
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1 0x0151
+#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 0
+#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE 0x0152
+#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0 0x015d
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1 0x015e
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2 0x015f
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3 0x0160
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0 0x0161
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1 0x0162
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2 0x0163
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3 0x0164
+#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01 0x0165
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23 0x0166
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01 0x0167
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23 0x0168
+#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01 0x0169
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23 0x016a
+#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01 0x016b
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23 0x016c
+#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01 0x016d
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23 0x016e
+#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01 0x016f
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23 0x0170
+#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS01 0x0171
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS23 0x0172
+#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01 0x0173
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23 0x0174
+#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0 0x0175
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1 0x0176
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2 0x0177
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3 0x0178
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0 0x0179
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1 0x017a
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2 0x017b
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3 0x017c
+#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01 0x017d
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23 0x017e
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01 0x017f
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23 0x0180
+#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01 0x0181
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23 0x0182
+#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01 0x0183
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23 0x0184
+#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01 0x0185
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23 0x0186
+#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01 0x0187
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23 0x0188
+#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS01 0x0189
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS23 0x018a
+#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01 0x018b
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 0
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23 0x018c
+#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 0
+#define mmMMEA0_IO_RD_CLI2GRP_MAP0 0x01d5
+#define mmMMEA0_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA0_IO_RD_CLI2GRP_MAP1 0x01d6
+#define mmMMEA0_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA0_IO_WR_CLI2GRP_MAP0 0x01d7
+#define mmMMEA0_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA0_IO_WR_CLI2GRP_MAP1 0x01d8
+#define mmMMEA0_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA0_IO_RD_COMBINE_FLUSH 0x01d9
+#define mmMMEA0_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define mmMMEA0_IO_WR_COMBINE_FLUSH 0x01da
+#define mmMMEA0_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define mmMMEA0_IO_GROUP_BURST 0x01db
+#define mmMMEA0_IO_GROUP_BURST_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_AGE 0x01dc
+#define mmMMEA0_IO_RD_PRI_AGE_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_AGE 0x01dd
+#define mmMMEA0_IO_WR_PRI_AGE_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_QUEUING 0x01de
+#define mmMMEA0_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_QUEUING 0x01df
+#define mmMMEA0_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_FIXED 0x01e0
+#define mmMMEA0_IO_RD_PRI_FIXED_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_FIXED 0x01e1
+#define mmMMEA0_IO_WR_PRI_FIXED_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_URGENCY 0x01e2
+#define mmMMEA0_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_URGENCY 0x01e3
+#define mmMMEA0_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_URGENCY_MASK 0x01e4
+#define mmMMEA0_IO_RD_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_URGENCY_MASK 0x01e5
+#define mmMMEA0_IO_WR_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI1 0x01e6
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI2 0x01e7
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI3 0x01e8
+#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI1 0x01e9
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI2 0x01ea
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI3 0x01eb
+#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA0_SDP_ARB_DRAM 0x01ec
+#define mmMMEA0_SDP_ARB_DRAM_BASE_IDX 0
+#define mmMMEA0_SDP_ARB_FINAL 0x01ee
+#define mmMMEA0_SDP_ARB_FINAL_BASE_IDX 0
+#define mmMMEA0_SDP_DRAM_PRIORITY 0x01ef
+#define mmMMEA0_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define mmMMEA0_SDP_IO_PRIORITY 0x01f1
+#define mmMMEA0_SDP_IO_PRIORITY_BASE_IDX 0
+#define mmMMEA0_SDP_CREDITS 0x01f2
+#define mmMMEA0_SDP_CREDITS_BASE_IDX 0
+#define mmMMEA0_SDP_TAG_RESERVE0 0x01f3
+#define mmMMEA0_SDP_TAG_RESERVE0_BASE_IDX 0
+#define mmMMEA0_SDP_TAG_RESERVE1 0x01f4
+#define mmMMEA0_SDP_TAG_RESERVE1_BASE_IDX 0
+#define mmMMEA0_SDP_VCC_RESERVE0 0x01f5
+#define mmMMEA0_SDP_VCC_RESERVE0_BASE_IDX 0
+#define mmMMEA0_SDP_VCC_RESERVE1 0x01f6
+#define mmMMEA0_SDP_VCC_RESERVE1_BASE_IDX 0
+#define mmMMEA0_SDP_VCD_RESERVE0 0x01f7
+#define mmMMEA0_SDP_VCD_RESERVE0_BASE_IDX 0
+#define mmMMEA0_SDP_VCD_RESERVE1 0x01f8
+#define mmMMEA0_SDP_VCD_RESERVE1_BASE_IDX 0
+#define mmMMEA0_SDP_REQ_CNTL 0x01f9
+#define mmMMEA0_SDP_REQ_CNTL_BASE_IDX 0
+#define mmMMEA0_MISC 0x01fa
+#define mmMMEA0_MISC_BASE_IDX 0
+#define mmMMEA0_LATENCY_SAMPLING 0x01fb
+#define mmMMEA0_LATENCY_SAMPLING_BASE_IDX 0
+#define mmMMEA0_PERFCOUNTER_LO 0x01fc
+#define mmMMEA0_PERFCOUNTER_LO_BASE_IDX 0
+#define mmMMEA0_PERFCOUNTER_HI 0x01fd
+#define mmMMEA0_PERFCOUNTER_HI_BASE_IDX 0
+#define mmMMEA0_PERFCOUNTER0_CFG 0x01fe
+#define mmMMEA0_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmMMEA0_PERFCOUNTER1_CFG 0x01ff
+#define mmMMEA0_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmMMEA0_PERFCOUNTER_RSLT_CNTL 0x0200
+#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmMMEA0_EDC_CNT 0x0206
+#define mmMMEA0_EDC_CNT_BASE_IDX 0
+#define mmMMEA0_EDC_CNT2 0x0207
+#define mmMMEA0_EDC_CNT2_BASE_IDX 0
+#define mmMMEA0_DSM_CNTL 0x0208
+#define mmMMEA0_DSM_CNTL_BASE_IDX 0
+#define mmMMEA0_DSM_CNTLA 0x0209
+#define mmMMEA0_DSM_CNTLA_BASE_IDX 0
+#define mmMMEA0_DSM_CNTLB 0x020a
+#define mmMMEA0_DSM_CNTLB_BASE_IDX 0
+#define mmMMEA0_DSM_CNTL2 0x020b
+#define mmMMEA0_DSM_CNTL2_BASE_IDX 0
+#define mmMMEA0_DSM_CNTL2A 0x020c
+#define mmMMEA0_DSM_CNTL2A_BASE_IDX 0
+#define mmMMEA0_DSM_CNTL2B 0x020d
+#define mmMMEA0_DSM_CNTL2B_BASE_IDX 0
+#define mmMMEA0_CGTT_CLK_CTRL 0x020f
+#define mmMMEA0_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmMMEA0_EDC_MODE 0x0210
+#define mmMMEA0_EDC_MODE_BASE_IDX 0
+#define mmMMEA0_ERR_STATUS 0x0211
+#define mmMMEA0_ERR_STATUS_BASE_IDX 0
+#define mmMMEA0_MISC2 0x0212
+#define mmMMEA0_MISC2_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0 0x0240
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1 0x0241
+#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0 0x0242
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1 0x0243
+#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_GRP2VC_MAP 0x0244
+#define mmMMEA1_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_GRP2VC_MAP 0x0245
+#define mmMMEA1_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_LAZY 0x0246
+#define mmMMEA1_DRAM_RD_LAZY_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_LAZY 0x0247
+#define mmMMEA1_DRAM_WR_LAZY_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_CAM_CNTL 0x0248
+#define mmMMEA1_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_CAM_CNTL 0x0249
+#define mmMMEA1_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define mmMMEA1_DRAM_PAGE_BURST 0x024a
+#define mmMMEA1_DRAM_PAGE_BURST_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_AGE 0x024b
+#define mmMMEA1_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_AGE 0x024c
+#define mmMMEA1_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_QUEUING 0x024d
+#define mmMMEA1_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_QUEUING 0x024e
+#define mmMMEA1_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_FIXED 0x024f
+#define mmMMEA1_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_FIXED 0x0250
+#define mmMMEA1_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_URGENCY 0x0251
+#define mmMMEA1_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_URGENCY 0x0252
+#define mmMMEA1_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1 0x0253
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2 0x0254
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3 0x0255
+#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1 0x0256
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2 0x0257
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3 0x0258
+#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA1_ADDRNORM_BASE_ADDR0 0x0274
+#define mmMMEA1_ADDRNORM_BASE_ADDR0_BASE_IDX 0
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR0 0x0275
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_BASE_IDX 0
+#define mmMMEA1_ADDRNORM_BASE_ADDR1 0x0276
+#define mmMMEA1_ADDRNORM_BASE_ADDR1_BASE_IDX 0
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR1 0x0277
+#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_BASE_IDX 0
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR1 0x0278
+#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_BASE_IDX 0
+#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL 0x0283
+#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 0
+#define mmMMEA1_ADDRNORMDRAM_TRICHANNEL_CFG 0x0285
+#define mmMMEA1_ADDRNORMDRAM_TRICHANNEL_CFG_BASE_IDX 0
+#define mmMMEA1_ADDRDEC_BANK_CFG 0x0287
+#define mmMMEA1_ADDRDEC_BANK_CFG_BASE_IDX 0
+#define mmMMEA1_ADDRDEC_MISC_CFG 0x0288
+#define mmMMEA1_ADDRDEC_MISC_CFG_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0 0x0289
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1 0x028a
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2 0x028b
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3 0x028c
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4 0x028d
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC 0x028e
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2 0x028f
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0 0x0290
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1 0x0291
+#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 0
+#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE 0x0292
+#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0 0x029d
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1 0x029e
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2 0x029f
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3 0x02a0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0 0x02a1
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1 0x02a2
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2 0x02a3
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3 0x02a4
+#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01 0x02a5
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23 0x02a6
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01 0x02a7
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23 0x02a8
+#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01 0x02a9
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23 0x02aa
+#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01 0x02ab
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23 0x02ac
+#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01 0x02ad
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23 0x02ae
+#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01 0x02af
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23 0x02b0
+#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS01 0x02b1
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS23 0x02b2
+#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01 0x02b3
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23 0x02b4
+#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0 0x02b5
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1 0x02b6
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2 0x02b7
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3 0x02b8
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0 0x02b9
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1 0x02ba
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2 0x02bb
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3 0x02bc
+#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01 0x02bd
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23 0x02be
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01 0x02bf
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23 0x02c0
+#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01 0x02c1
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23 0x02c2
+#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01 0x02c3
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23 0x02c4
+#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01 0x02c5
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23 0x02c6
+#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01 0x02c7
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23 0x02c8
+#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS01 0x02c9
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS23 0x02ca
+#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01 0x02cb
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 0
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23 0x02cc
+#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 0
+#define mmMMEA1_IO_RD_CLI2GRP_MAP0 0x0315
+#define mmMMEA1_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA1_IO_RD_CLI2GRP_MAP1 0x0316
+#define mmMMEA1_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA1_IO_WR_CLI2GRP_MAP0 0x0317
+#define mmMMEA1_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define mmMMEA1_IO_WR_CLI2GRP_MAP1 0x0318
+#define mmMMEA1_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define mmMMEA1_IO_RD_COMBINE_FLUSH 0x0319
+#define mmMMEA1_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define mmMMEA1_IO_WR_COMBINE_FLUSH 0x031a
+#define mmMMEA1_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define mmMMEA1_IO_GROUP_BURST 0x031b
+#define mmMMEA1_IO_GROUP_BURST_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_AGE 0x031c
+#define mmMMEA1_IO_RD_PRI_AGE_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_AGE 0x031d
+#define mmMMEA1_IO_WR_PRI_AGE_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_QUEUING 0x031e
+#define mmMMEA1_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_QUEUING 0x031f
+#define mmMMEA1_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_FIXED 0x0320
+#define mmMMEA1_IO_RD_PRI_FIXED_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_FIXED 0x0321
+#define mmMMEA1_IO_WR_PRI_FIXED_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_URGENCY 0x0322
+#define mmMMEA1_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_URGENCY 0x0323
+#define mmMMEA1_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_URGENCY_MASK 0x0324
+#define mmMMEA1_IO_RD_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_URGENCY_MASK 0x0325
+#define mmMMEA1_IO_WR_PRI_URGENCY_MASK_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI1 0x0326
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI2 0x0327
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI3 0x0328
+#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI1 0x0329
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI2 0x032a
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI3 0x032b
+#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define mmMMEA1_SDP_ARB_DRAM 0x032c
+#define mmMMEA1_SDP_ARB_DRAM_BASE_IDX 0
+#define mmMMEA1_SDP_ARB_FINAL 0x032e
+#define mmMMEA1_SDP_ARB_FINAL_BASE_IDX 0
+#define mmMMEA1_SDP_DRAM_PRIORITY 0x032f
+#define mmMMEA1_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define mmMMEA1_SDP_IO_PRIORITY 0x0331
+#define mmMMEA1_SDP_IO_PRIORITY_BASE_IDX 0
+#define mmMMEA1_SDP_CREDITS 0x0332
+#define mmMMEA1_SDP_CREDITS_BASE_IDX 0
+#define mmMMEA1_SDP_TAG_RESERVE0 0x0333
+#define mmMMEA1_SDP_TAG_RESERVE0_BASE_IDX 0
+#define mmMMEA1_SDP_TAG_RESERVE1 0x0334
+#define mmMMEA1_SDP_TAG_RESERVE1_BASE_IDX 0
+#define mmMMEA1_SDP_VCC_RESERVE0 0x0335
+#define mmMMEA1_SDP_VCC_RESERVE0_BASE_IDX 0
+#define mmMMEA1_SDP_VCC_RESERVE1 0x0336
+#define mmMMEA1_SDP_VCC_RESERVE1_BASE_IDX 0
+#define mmMMEA1_SDP_VCD_RESERVE0 0x0337
+#define mmMMEA1_SDP_VCD_RESERVE0_BASE_IDX 0
+#define mmMMEA1_SDP_VCD_RESERVE1 0x0338
+#define mmMMEA1_SDP_VCD_RESERVE1_BASE_IDX 0
+#define mmMMEA1_SDP_REQ_CNTL 0x0339
+#define mmMMEA1_SDP_REQ_CNTL_BASE_IDX 0
+#define mmMMEA1_MISC 0x033a
+#define mmMMEA1_MISC_BASE_IDX 0
+#define mmMMEA1_LATENCY_SAMPLING 0x033b
+#define mmMMEA1_LATENCY_SAMPLING_BASE_IDX 0
+#define mmMMEA1_PERFCOUNTER_LO 0x033c
+#define mmMMEA1_PERFCOUNTER_LO_BASE_IDX 0
+#define mmMMEA1_PERFCOUNTER_HI 0x033d
+#define mmMMEA1_PERFCOUNTER_HI_BASE_IDX 0
+#define mmMMEA1_PERFCOUNTER0_CFG 0x033e
+#define mmMMEA1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmMMEA1_PERFCOUNTER1_CFG 0x033f
+#define mmMMEA1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmMMEA1_PERFCOUNTER_RSLT_CNTL 0x0340
+#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define mmMMEA1_EDC_CNT 0x0346
+#define mmMMEA1_EDC_CNT_BASE_IDX 0
+#define mmMMEA1_EDC_CNT2 0x0347
+#define mmMMEA1_EDC_CNT2_BASE_IDX 0
+#define mmMMEA1_DSM_CNTL 0x0348
+#define mmMMEA1_DSM_CNTL_BASE_IDX 0
+#define mmMMEA1_DSM_CNTLA 0x0349
+#define mmMMEA1_DSM_CNTLA_BASE_IDX 0
+#define mmMMEA1_DSM_CNTLB 0x034a
+#define mmMMEA1_DSM_CNTLB_BASE_IDX 0
+#define mmMMEA1_DSM_CNTL2 0x034b
+#define mmMMEA1_DSM_CNTL2_BASE_IDX 0
+#define mmMMEA1_DSM_CNTL2A 0x034c
+#define mmMMEA1_DSM_CNTL2A_BASE_IDX 0
+#define mmMMEA1_DSM_CNTL2B 0x034d
+#define mmMMEA1_DSM_CNTL2B_BASE_IDX 0
+#define mmMMEA1_CGTT_CLK_CTRL 0x034f
+#define mmMMEA1_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmMMEA1_EDC_MODE 0x0350
+#define mmMMEA1_EDC_MODE_BASE_IDX 0
+#define mmMMEA1_ERR_STATUS 0x0351
+#define mmMMEA1_ERR_STATUS_BASE_IDX 0
+#define mmMMEA1_MISC2 0x0352
+#define mmMMEA1_MISC2_BASE_IDX 0
+
+
+// addressBlock: mmhub_pctldec
+// base address: 0x68e00
+#define mmPCTL_MISC 0x0380
+#define mmPCTL_MISC_BASE_IDX 0
+#define mmPCTL_MMHUB_DEEPSLEEP 0x0381
+#define mmPCTL_MMHUB_DEEPSLEEP_BASE_IDX 0
+#define mmPCTL_MMHUB_DEEPSLEEP_OVERRIDE 0x0382
+#define mmPCTL_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 0
+#define mmPCTL_PG_IGNORE_DEEPSLEEP 0x0383
+#define mmPCTL_PG_IGNORE_DEEPSLEEP_BASE_IDX 0
+#define mmPCTL_PG_DAGB 0x0384
+#define mmPCTL_PG_DAGB_BASE_IDX 0
+#define mmPCTL0_RENG_RAM_INDEX 0x0385
+#define mmPCTL0_RENG_RAM_INDEX_BASE_IDX 0
+#define mmPCTL0_RENG_RAM_DATA 0x0386
+#define mmPCTL0_RENG_RAM_DATA_BASE_IDX 0
+#define mmPCTL0_RENG_EXECUTE 0x0387
+#define mmPCTL0_RENG_EXECUTE_BASE_IDX 0
+#define mmPCTL0_MISC 0x0388
+#define mmPCTL0_MISC_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0 0x0389
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE1 0x038a
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE2 0x038b
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE3 0x038c
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE4 0x038d
+#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET 0x038e
+#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET_BASE_IDX 0
+#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1 0x038f
+#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 0
+#define mmPCTL1_RENG_RAM_INDEX 0x0390
+#define mmPCTL1_RENG_RAM_INDEX_BASE_IDX 0
+#define mmPCTL1_RENG_RAM_DATA 0x0391
+#define mmPCTL1_RENG_RAM_DATA_BASE_IDX 0
+#define mmPCTL1_RENG_EXECUTE 0x0392
+#define mmPCTL1_RENG_EXECUTE_BASE_IDX 0
+#define mmPCTL1_MISC 0x0393
+#define mmPCTL1_MISC_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0 0x0394
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1 0x0395
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2 0x0396
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE3 0x0397
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE4 0x0398
+#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET 0x0399
+#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET_BASE_IDX 0
+#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1 0x039a
+#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 0
+#define mmPCTL2_RENG_RAM_INDEX 0x039b
+#define mmPCTL2_RENG_RAM_INDEX_BASE_IDX 0
+#define mmPCTL2_RENG_RAM_DATA 0x039c
+#define mmPCTL2_RENG_RAM_DATA_BASE_IDX 0
+#define mmPCTL2_RENG_EXECUTE 0x039d
+#define mmPCTL2_RENG_EXECUTE_BASE_IDX 0
+#define mmPCTL2_MISC 0x039e
+#define mmPCTL2_MISC_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE0 0x039f
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE1 0x03a0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE2 0x03a1
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE3 0x03a2
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE4 0x03a3
+#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET 0x03a4
+#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET_BASE_IDX 0
+#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x03a5
+#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 0
+
+
+// addressBlock: mmhub_l1tlb_vml1dec
+// base address: 0x69600
+#define mmMC_VM_MX_L1_TLB0_STATUS 0x0588
+#define mmMC_VM_MX_L1_TLB0_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB1_STATUS 0x0589
+#define mmMC_VM_MX_L1_TLB1_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB2_STATUS 0x058a
+#define mmMC_VM_MX_L1_TLB2_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB3_STATUS 0x058b
+#define mmMC_VM_MX_L1_TLB3_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB4_STATUS 0x058c
+#define mmMC_VM_MX_L1_TLB4_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB5_STATUS 0x058d
+#define mmMC_VM_MX_L1_TLB5_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB6_STATUS 0x058e
+#define mmMC_VM_MX_L1_TLB6_STATUS_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB7_STATUS 0x058f
+#define mmMC_VM_MX_L1_TLB7_STATUS_BASE_IDX 0
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec
+// base address: 0x69650
+#define mmMC_VM_MX_L1_PERFCOUNTER0_CFG 0x0594
+#define mmMC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmMC_VM_MX_L1_PERFCOUNTER1_CFG 0x0595
+#define mmMC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmMC_VM_MX_L1_PERFCOUNTER2_CFG 0x0596
+#define mmMC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmMC_VM_MX_L1_PERFCOUNTER3_CFG 0x0597
+#define mmMC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 0
+#define mmMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x0598
+#define mmMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec
+// base address: 0x69670
+#define mmMC_VM_MX_L1_PERFCOUNTER_LO 0x059c
+#define mmMC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 0
+#define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d
+#define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_atcl2dec
+// base address: 0x69900
+#define mmATC_L2_CNTL 0x0640
+#define mmATC_L2_CNTL_BASE_IDX 0
+#define mmATC_L2_CNTL2 0x0641
+#define mmATC_L2_CNTL2_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA0 0x0644
+#define mmATC_L2_CACHE_DATA0_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA1 0x0645
+#define mmATC_L2_CACHE_DATA1_BASE_IDX 0
+#define mmATC_L2_CACHE_DATA2 0x0646
+#define mmATC_L2_CACHE_DATA2_BASE_IDX 0
+#define mmATC_L2_CNTL3 0x0647
+#define mmATC_L2_CNTL3_BASE_IDX 0
+#define mmATC_L2_STATUS 0x0648
+#define mmATC_L2_STATUS_BASE_IDX 0
+#define mmATC_L2_STATUS2 0x0649
+#define mmATC_L2_STATUS2_BASE_IDX 0
+#define mmATC_L2_MISC_CG 0x064a
+#define mmATC_L2_MISC_CG_BASE_IDX 0
+#define mmATC_L2_MEM_POWER_LS 0x064b
+#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define mmATC_L2_CGTT_CLK_CTRL 0x064c
+#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec
+// base address: 0x69a00
+#define mmVM_L2_CNTL 0x0680
+#define mmVM_L2_CNTL_BASE_IDX 0
+#define mmVM_L2_CNTL2 0x0681
+#define mmVM_L2_CNTL2_BASE_IDX 0
+#define mmVM_L2_CNTL3 0x0682
+#define mmVM_L2_CNTL3_BASE_IDX 0
+#define mmVM_L2_STATUS 0x0683
+#define mmVM_L2_STATUS_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_CNTL 0x0684
+#define mmVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x0685
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x0686
+#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_CNTL 0x0687
+#define mmVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_CNTL2 0x0688
+#define mmVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3 0x0689
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4 0x068a
+#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_STATUS 0x068b
+#define mmVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32 0x068c
+#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32 0x068d
+#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x068e
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x068f
+#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x0691
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x0692
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x0693
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x0694
+#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x0695
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x0696
+#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define mmVM_L2_CNTL4 0x0697
+#define mmVM_L2_CNTL4_BASE_IDX 0
+#define mmVM_L2_MM_GROUP_RT_CLASSES 0x0698
+#define mmVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define mmVM_L2_BANK_SELECT_RESERVED_CID 0x0699
+#define mmVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define mmVM_L2_BANK_SELECT_RESERVED_CID2 0x069a
+#define mmVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define mmVM_L2_CACHE_PARITY_CNTL 0x069b
+#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define mmVM_L2_CGTT_CLK_CTRL 0x069e
+#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec
+// base address: 0x69b00
+#define mmVM_CONTEXT0_CNTL 0x06c0
+#define mmVM_CONTEXT0_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT1_CNTL 0x06c1
+#define mmVM_CONTEXT1_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT2_CNTL 0x06c2
+#define mmVM_CONTEXT2_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT3_CNTL 0x06c3
+#define mmVM_CONTEXT3_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT4_CNTL 0x06c4
+#define mmVM_CONTEXT4_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT5_CNTL 0x06c5
+#define mmVM_CONTEXT5_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT6_CNTL 0x06c6
+#define mmVM_CONTEXT6_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT7_CNTL 0x06c7
+#define mmVM_CONTEXT7_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT8_CNTL 0x06c8
+#define mmVM_CONTEXT8_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT9_CNTL 0x06c9
+#define mmVM_CONTEXT9_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT10_CNTL 0x06ca
+#define mmVM_CONTEXT10_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT11_CNTL 0x06cb
+#define mmVM_CONTEXT11_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT12_CNTL 0x06cc
+#define mmVM_CONTEXT12_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT13_CNTL 0x06cd
+#define mmVM_CONTEXT13_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT14_CNTL 0x06ce
+#define mmVM_CONTEXT14_CNTL_BASE_IDX 0
+#define mmVM_CONTEXT15_CNTL 0x06cf
+#define mmVM_CONTEXT15_CNTL_BASE_IDX 0
+#define mmVM_CONTEXTS_DISABLE 0x06d0
+#define mmVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_SEM 0x06d1
+#define mmVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_SEM 0x06d2
+#define mmVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_SEM 0x06d3
+#define mmVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_SEM 0x06d4
+#define mmVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_SEM 0x06d5
+#define mmVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_SEM 0x06d6
+#define mmVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_SEM 0x06d7
+#define mmVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_SEM 0x06d8
+#define mmVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_SEM 0x06d9
+#define mmVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_SEM 0x06da
+#define mmVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_SEM 0x06db
+#define mmVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_SEM 0x06dc
+#define mmVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_SEM 0x06dd
+#define mmVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_SEM 0x06de
+#define mmVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_SEM 0x06df
+#define mmVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_SEM 0x06e0
+#define mmVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_SEM 0x06e1
+#define mmVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_SEM 0x06e2
+#define mmVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_REQ 0x06e3
+#define mmVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_REQ 0x06e4
+#define mmVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_REQ 0x06e5
+#define mmVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_REQ 0x06e6
+#define mmVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_REQ 0x06e7
+#define mmVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_REQ 0x06e8
+#define mmVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_REQ 0x06e9
+#define mmVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_REQ 0x06ea
+#define mmVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_REQ 0x06eb
+#define mmVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_REQ 0x06ec
+#define mmVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_REQ 0x06ed
+#define mmVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_REQ 0x06ee
+#define mmVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_REQ 0x06ef
+#define mmVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_REQ 0x06f0
+#define mmVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_REQ 0x06f1
+#define mmVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_REQ 0x06f2
+#define mmVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_REQ 0x06f3
+#define mmVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_REQ 0x06f4
+#define mmVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ACK 0x06f5
+#define mmVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ACK 0x06f6
+#define mmVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ACK 0x06f7
+#define mmVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ACK 0x06f8
+#define mmVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ACK 0x06f9
+#define mmVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ACK 0x06fa
+#define mmVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ACK 0x06fb
+#define mmVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ACK 0x06fc
+#define mmVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ACK 0x06fd
+#define mmVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ACK 0x06fe
+#define mmVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ACK 0x06ff
+#define mmVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ACK 0x0700
+#define mmVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ACK 0x0701
+#define mmVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ACK 0x0702
+#define mmVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ACK 0x0703
+#define mmVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ACK 0x0704
+#define mmVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ACK 0x0705
+#define mmVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ACK 0x0706
+#define mmVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x0707
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x0708
+#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x0709
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x070a
+#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x070b
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x070c
+#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x070d
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x070e
+#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x070f
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x0710
+#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x0711
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x0712
+#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x0713
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x0714
+#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x0715
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x0716
+#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x0717
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x0718
+#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x0719
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x071a
+#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x071b
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x071c
+#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x071d
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x071e
+#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x071f
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x0720
+#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x0721
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x0722
+#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x0723
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x0724
+#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x0725
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x0726
+#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728
+#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x0729
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x072a
+#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c
+#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x072d
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x072e
+#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x072f
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0730
+#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0731
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x0732
+#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0733
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0734
+#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x0735
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x0736
+#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0737
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0738
+#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x0739
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x073a
+#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x073b
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x073c
+#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x073d
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x073e
+#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x073f
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x0740
+#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x0741
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x0742
+#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x0743
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x0744
+#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x0745
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x0746
+#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x0747
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x0748
+#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x0749
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x074a
+#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c
+#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x074d
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x074e
+#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x074f
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x0750
+#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0751
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0752
+#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0753
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0754
+#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0755
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x0756
+#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0757
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0758
+#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x0759
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x075a
+#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x075b
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x075c
+#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x075d
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x075e
+#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x075f
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0760
+#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0761
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0762
+#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0763
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0764
+#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0765
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0766
+#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0767
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0768
+#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0769
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x076a
+#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c
+#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x076d
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x076e
+#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x076f
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0770
+#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0771
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0772
+#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0773
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0774
+#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0775
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0776
+#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0777
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0778
+#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0779
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x077a
+#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x077b
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x077c
+#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x077d
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x077e
+#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x077f
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0780
+#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0781
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0782
+#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0783
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0784
+#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0785
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0786
+#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0787
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0788
+#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0789
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x078a
+#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vml2pldec
+// base address: 0x69e90
+#define mmMC_VM_L2_PERFCOUNTER0_CFG 0x07a4
+#define mmMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER1_CFG 0x07a5
+#define mmMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER2_CFG 0x07a6
+#define mmMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER3_CFG 0x07a7
+#define mmMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER4_CFG 0x07a8
+#define mmMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER5_CFG 0x07a9
+#define mmMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER6_CFG 0x07aa
+#define mmMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER7_CFG 0x07ab
+#define mmMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x07ac
+#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vml2prdec
+// base address: 0x69ee0
+#define mmMC_VM_L2_PERFCOUNTER_LO 0x07b8
+#define mmMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 0
+#define mmMC_VM_L2_PERFCOUNTER_HI 0x07b9
+#define mmMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec
+// base address: 0x69f30
+#define mmMC_VM_FB_SIZE_OFFSET_VF0 0x07cc
+#define mmMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF1 0x07cd
+#define mmMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF2 0x07ce
+#define mmMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF3 0x07cf
+#define mmMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF4 0x07d0
+#define mmMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF5 0x07d1
+#define mmMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF6 0x07d2
+#define mmMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF7 0x07d3
+#define mmMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF8 0x07d4
+#define mmMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF9 0x07d5
+#define mmMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF10 0x07d6
+#define mmMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF11 0x07d7
+#define mmMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF12 0x07d8
+#define mmMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF13 0x07d9
+#define mmMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF14 0x07da
+#define mmMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 0
+#define mmMC_VM_FB_SIZE_OFFSET_VF15 0x07db
+#define mmMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 0
+#define mmVM_IOMMU_MMIO_CNTRL_1 0x07dc
+#define mmVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_LO_0 0x07dd
+#define mmMC_VM_MARC_BASE_LO_0_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_LO_1 0x07de
+#define mmMC_VM_MARC_BASE_LO_1_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_LO_2 0x07df
+#define mmMC_VM_MARC_BASE_LO_2_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_LO_3 0x07e0
+#define mmMC_VM_MARC_BASE_LO_3_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_HI_0 0x07e1
+#define mmMC_VM_MARC_BASE_HI_0_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_HI_1 0x07e2
+#define mmMC_VM_MARC_BASE_HI_1_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_HI_2 0x07e3
+#define mmMC_VM_MARC_BASE_HI_2_BASE_IDX 0
+#define mmMC_VM_MARC_BASE_HI_3 0x07e4
+#define mmMC_VM_MARC_BASE_HI_3_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_LO_0 0x07e5
+#define mmMC_VM_MARC_RELOC_LO_0_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_LO_1 0x07e6
+#define mmMC_VM_MARC_RELOC_LO_1_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_LO_2 0x07e7
+#define mmMC_VM_MARC_RELOC_LO_2_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_LO_3 0x07e8
+#define mmMC_VM_MARC_RELOC_LO_3_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_HI_0 0x07e9
+#define mmMC_VM_MARC_RELOC_HI_0_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_HI_1 0x07ea
+#define mmMC_VM_MARC_RELOC_HI_1_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_HI_2 0x07eb
+#define mmMC_VM_MARC_RELOC_HI_2_BASE_IDX 0
+#define mmMC_VM_MARC_RELOC_HI_3 0x07ec
+#define mmMC_VM_MARC_RELOC_HI_3_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_LO_0 0x07ed
+#define mmMC_VM_MARC_LEN_LO_0_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_LO_1 0x07ee
+#define mmMC_VM_MARC_LEN_LO_1_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_LO_2 0x07ef
+#define mmMC_VM_MARC_LEN_LO_2_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_LO_3 0x07f0
+#define mmMC_VM_MARC_LEN_LO_3_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_HI_0 0x07f1
+#define mmMC_VM_MARC_LEN_HI_0_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_HI_1 0x07f2
+#define mmMC_VM_MARC_LEN_HI_1_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_HI_2 0x07f3
+#define mmMC_VM_MARC_LEN_HI_2_BASE_IDX 0
+#define mmMC_VM_MARC_LEN_HI_3 0x07f4
+#define mmMC_VM_MARC_LEN_HI_3_BASE_IDX 0
+#define mmVM_IOMMU_CONTROL_REGISTER 0x07f5
+#define mmVM_IOMMU_CONTROL_REGISTER_BASE_IDX 0
+#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x07f6
+#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL 0x07f7
+#define mmVM_PCIE_ATS_CNTL_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_0 0x07f8
+#define mmVM_PCIE_ATS_CNTL_VF_0_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_1 0x07f9
+#define mmVM_PCIE_ATS_CNTL_VF_1_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_2 0x07fa
+#define mmVM_PCIE_ATS_CNTL_VF_2_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_3 0x07fb
+#define mmVM_PCIE_ATS_CNTL_VF_3_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_4 0x07fc
+#define mmVM_PCIE_ATS_CNTL_VF_4_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_5 0x07fd
+#define mmVM_PCIE_ATS_CNTL_VF_5_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_6 0x07fe
+#define mmVM_PCIE_ATS_CNTL_VF_6_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_7 0x07ff
+#define mmVM_PCIE_ATS_CNTL_VF_7_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_8 0x0800
+#define mmVM_PCIE_ATS_CNTL_VF_8_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_9 0x0801
+#define mmVM_PCIE_ATS_CNTL_VF_9_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_10 0x0802
+#define mmVM_PCIE_ATS_CNTL_VF_10_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_11 0x0803
+#define mmVM_PCIE_ATS_CNTL_VF_11_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_12 0x0804
+#define mmVM_PCIE_ATS_CNTL_VF_12_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_13 0x0805
+#define mmVM_PCIE_ATS_CNTL_VF_13_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_14 0x0806
+#define mmVM_PCIE_ATS_CNTL_VF_14_BASE_IDX 0
+#define mmVM_PCIE_ATS_CNTL_VF_15 0x0807
+#define mmVM_PCIE_ATS_CNTL_VF_15_BASE_IDX 0
+#define mmUTCL2_CGTT_CLK_CTRL 0x0808
+#define mmUTCL2_CGTT_CLK_CTRL_BASE_IDX 0
+#define mmMC_SHARED_ACTIVE_FCN_ID 0x0809
+#define mmMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMC_VM_XGMI_GPUIOV_ENABLE 0x080a
+#define mmMC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_NB_MMIOBASE 0x0810
+#define mmMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define mmMC_VM_NB_MMIOLIMIT 0x0811
+#define mmMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define mmMC_VM_NB_PCI_CTRL 0x0812
+#define mmMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define mmMC_VM_NB_PCI_ARB 0x0813
+#define mmMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1 0x0814
+#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2 0x0815
+#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2 0x0816
+#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define mmMC_VM_FB_OFFSET 0x0817
+#define mmMC_VM_FB_OFFSET_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x0818
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x0819
+#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define mmMC_VM_STEERING 0x081a
+#define mmMC_VM_STEERING_BASE_IDX 0
+#define mmMC_SHARED_VIRT_RESET_REQ 0x081b
+#define mmMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define mmMC_MEM_POWER_LS 0x081c
+#define mmMC_MEM_POWER_LS_BASE_IDX 0
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x081d
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x081e
+#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define mmMC_VM_APT_CNTL 0x081f
+#define mmMC_VM_APT_CNTL_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_START 0x0820
+#define mmMC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_END 0x0821
+#define mmMC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 0
+#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0822
+#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec
+// base address: 0x6a0b0
+#define mmMC_VM_FB_LOCATION_BASE 0x082c
+#define mmMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define mmMC_VM_FB_LOCATION_TOP 0x082d
+#define mmMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define mmMC_VM_AGP_TOP 0x082e
+#define mmMC_VM_AGP_TOP_BASE_IDX 0
+#define mmMC_VM_AGP_BOT 0x082f
+#define mmMC_VM_AGP_BOT_BASE_IDX 0
+#define mmMC_VM_AGP_BASE 0x0830
+#define mmMC_VM_AGP_BASE_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0831
+#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0832
+#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define mmMC_VM_MX_L1_TLB_CNTL 0x0833
+#define mmMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec
+// base address: 0x6a100
+#define mmATC_L2_PERFCOUNTER_LO 0x0840
+#define mmATC_L2_PERFCOUNTER_LO_BASE_IDX 0
+#define mmATC_L2_PERFCOUNTER_HI 0x0841
+#define mmATC_L2_PERFCOUNTER_HI_BASE_IDX 0
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec
+// base address: 0x6a120
+#define mmATC_L2_PERFCOUNTER0_CFG 0x0848
+#define mmATC_L2_PERFCOUNTER0_CFG_BASE_IDX 0
+#define mmATC_L2_PERFCOUNTER1_CFG 0x0849
+#define mmATC_L2_PERFCOUNTER1_CFG_BASE_IDX 0
+#define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x084a
+#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_sh_mask.h
new file mode 100644
index 0000000..3936c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_sh_mask.h
@@ -0,0 +1,10265 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_3_0_SH_MASK_HEADER
+#define _mmhub_9_3_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_dagbdec
+//DAGB0_RDCLI0
+#define DAGB0_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI1
+#define DAGB0_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI2
+#define DAGB0_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI3
+#define DAGB0_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI4
+#define DAGB0_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI5
+#define DAGB0_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI6
+#define DAGB0_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI7
+#define DAGB0_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI8
+#define DAGB0_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI9
+#define DAGB0_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI10
+#define DAGB0_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI11
+#define DAGB0_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI12
+#define DAGB0_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI13
+#define DAGB0_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI14
+#define DAGB0_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RDCLI15
+#define DAGB0_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_RD_CNTL
+#define DAGB0_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB0_RD_GMI_CNTL
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_RD_ADDR_DAGB
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_RD_CGTT_CLK_CTRL
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_RD_VC0_CNTL
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC1_CNTL
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC2_CNTL
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC3_CNTL
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC4_CNTL
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC5_CNTL
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC6_CNTL
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_VC7_CNTL
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_RD_CNTL_MISC
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+//DAGB0_RD_TLB_CREDIT
+#define DAGB0_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_RDCLI_ASK_PENDING
+#define DAGB0_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GO_PENDING
+#define DAGB0_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_GBLSEND_PENDING
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_TLB_PENDING
+#define DAGB0_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OARB_PENDING
+#define DAGB0_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_RDCLI_OSD_PENDING
+#define DAGB0_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI0
+#define DAGB0_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI1
+#define DAGB0_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI2
+#define DAGB0_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI3
+#define DAGB0_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI4
+#define DAGB0_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI5
+#define DAGB0_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI6
+#define DAGB0_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI7
+#define DAGB0_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI8
+#define DAGB0_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI9
+#define DAGB0_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI10
+#define DAGB0_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI11
+#define DAGB0_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI12
+#define DAGB0_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI13
+#define DAGB0_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI14
+#define DAGB0_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WRCLI15
+#define DAGB0_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB0_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB0_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB0_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB0_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB0_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB0_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB0_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB0_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB0_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB0_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB0_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB0_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB0_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB0_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB0_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB0_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB0_WR_CNTL
+#define DAGB0_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB0_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB0_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB0_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB0_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB0_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB0_WR_GMI_CNTL
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB0_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB0_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB0_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB0_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB0_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB0_WR_ADDR_DAGB
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB0_WR_CGTT_CLK_CTRL
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB0_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB0_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB0_WR_DATA_DAGB_MAX_BURST0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_MAX_BURST1
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB0_WR_VC0_CNTL
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC1_CNTL
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC2_CNTL
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC3_CNTL
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC4_CNTL
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC5_CNTL
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC6_CNTL
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_VC7_CNTL
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB0_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB0_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB0_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB0_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB0_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB0_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB0_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB0_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB0_WR_CNTL_MISC
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB0_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+//DAGB0_WR_TLB_CREDIT
+#define DAGB0_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB0_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB0_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB0_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB0_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB0_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB0_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB0_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB0_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB0_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB0_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB0_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB0_WR_DATA_CREDIT
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB0_WR_MISC_CREDIT
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB0_WRCLI_ASK_PENDING
+#define DAGB0_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GO_PENDING
+#define DAGB0_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GBLSEND_PENDING
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_TLB_PENDING
+#define DAGB0_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OARB_PENDING
+#define DAGB0_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_OSD_PENDING
+#define DAGB0_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_ASK_PENDING
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_DBUS_GO_PENDING
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_DAGB_DLY
+#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB0_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB0_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB0_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB0_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB0_CNTL_MISC
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB0_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB0_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB0_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB0_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB0_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB0_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB0_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB0_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB0_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB0_CNTL_MISC2
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB0_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB0_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+//DAGB0_FIFO_EMPTY
+#define DAGB0_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB0_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB0_FIFO_FULL
+#define DAGB0_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB0_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB0_WR_CREDITS_FULL
+#define DAGB0_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_WR_CREDITS_FULL__FULL_MASK 0x0007FFFFL
+//DAGB0_RD_CREDITS_FULL
+#define DAGB0_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB0_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB0_PERFCOUNTER_LO
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB0_PERFCOUNTER_HI
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB0_PERFCOUNTER0_CFG
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER1_CFG
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER2_CFG
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB0_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB0_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB0_PERFCOUNTER_RSLT_CNTL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB0_RESERVE0
+#define DAGB0_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE1
+#define DAGB0_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE2
+#define DAGB0_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE3
+#define DAGB0_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE4
+#define DAGB0_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE5
+#define DAGB0_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE6
+#define DAGB0_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE7
+#define DAGB0_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE8
+#define DAGB0_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE9
+#define DAGB0_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE10
+#define DAGB0_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE11
+#define DAGB0_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE12
+#define DAGB0_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE13
+#define DAGB0_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE14
+#define DAGB0_RESERVE14__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE14__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE15
+#define DAGB0_RESERVE15__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE15__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE16
+#define DAGB0_RESERVE16__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE16__RESERVE_MASK 0xFFFFFFFFL
+//DAGB0_RESERVE17
+#define DAGB0_RESERVE17__RESERVE__SHIFT 0x0
+#define DAGB0_RESERVE17__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI0
+#define DAGB1_RDCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI1
+#define DAGB1_RDCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI2
+#define DAGB1_RDCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI3
+#define DAGB1_RDCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI4
+#define DAGB1_RDCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI5
+#define DAGB1_RDCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI6
+#define DAGB1_RDCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI7
+#define DAGB1_RDCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI8
+#define DAGB1_RDCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI9
+#define DAGB1_RDCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI10
+#define DAGB1_RDCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI11
+#define DAGB1_RDCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI12
+#define DAGB1_RDCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI13
+#define DAGB1_RDCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI14
+#define DAGB1_RDCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RDCLI15
+#define DAGB1_RDCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_RDCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_RDCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_RDCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_RDCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_RDCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_RDCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_RDCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_RDCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_RDCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_RDCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_RDCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_RD_CNTL
+#define DAGB1_RD_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_RD_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_RD_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB1_RD_GMI_CNTL
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_RD_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_RD_ADDR_DAGB
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_RD_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_RD_CGTT_CLK_CTRL
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_L1TLB_RD_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_ATCVM_RD_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_MAX_BURST1
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_RD_VC0_CNTL
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC1_CNTL
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC2_CNTL
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC3_CNTL
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC4_CNTL
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC5_CNTL
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC6_CNTL
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_VC7_CNTL
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_RD_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_RD_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_RD_CNTL_MISC
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+//DAGB1_RD_TLB_CREDIT
+#define DAGB1_RD_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_RD_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_RD_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_RD_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_RD_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_RD_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_RDCLI_ASK_PENDING
+#define DAGB1_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GO_PENDING
+#define DAGB1_RDCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_GBLSEND_PENDING
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_TLB_PENDING
+#define DAGB1_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OARB_PENDING
+#define DAGB1_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_RDCLI_OSD_PENDING
+#define DAGB1_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI0
+#define DAGB1_WRCLI0__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI0__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI0__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI0__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI0__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI0__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI0__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI0__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI0__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI0__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI0__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI0__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI1
+#define DAGB1_WRCLI1__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI1__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI1__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI1__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI1__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI1__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI1__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI1__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI1__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI1__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI1__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI1__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI2
+#define DAGB1_WRCLI2__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI2__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI2__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI2__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI2__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI2__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI2__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI2__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI2__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI2__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI2__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI2__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI3
+#define DAGB1_WRCLI3__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI3__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI3__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI3__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI3__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI3__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI3__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI3__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI3__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI3__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI3__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI3__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI4
+#define DAGB1_WRCLI4__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI4__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI4__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI4__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI4__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI4__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI4__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI4__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI4__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI4__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI4__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI4__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI5
+#define DAGB1_WRCLI5__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI5__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI5__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI5__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI5__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI5__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI5__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI5__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI5__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI5__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI5__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI5__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI6
+#define DAGB1_WRCLI6__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI6__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI6__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI6__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI6__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI6__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI6__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI6__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI6__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI6__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI6__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI6__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI7
+#define DAGB1_WRCLI7__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI7__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI7__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI7__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI7__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI7__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI7__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI7__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI7__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI7__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI7__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI7__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI8
+#define DAGB1_WRCLI8__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI8__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI8__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI8__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI8__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI8__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI8__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI8__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI8__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI8__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI8__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI8__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI9
+#define DAGB1_WRCLI9__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI9__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI9__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI9__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI9__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI9__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI9__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI9__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI9__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI9__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI9__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI9__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI10
+#define DAGB1_WRCLI10__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI10__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI10__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI10__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI10__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI10__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI10__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI10__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI10__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI10__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI10__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI10__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI11
+#define DAGB1_WRCLI11__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI11__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI11__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI11__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI11__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI11__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI11__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI11__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI11__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI11__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI11__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI11__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI12
+#define DAGB1_WRCLI12__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI12__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI12__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI12__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI12__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI12__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI12__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI12__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI12__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI12__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI12__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI12__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI13
+#define DAGB1_WRCLI13__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI13__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI13__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI13__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI13__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI13__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI13__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI13__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI13__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI13__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI13__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI13__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI14
+#define DAGB1_WRCLI14__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI14__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI14__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI14__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI14__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI14__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI14__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI14__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI14__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI14__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI14__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI14__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WRCLI15
+#define DAGB1_WRCLI15__VIRT_CHAN__SHIFT 0x0
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3
+#define DAGB1_WRCLI15__URG_HIGH__SHIFT 0x4
+#define DAGB1_WRCLI15__URG_LOW__SHIFT 0x8
+#define DAGB1_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc
+#define DAGB1_WRCLI15__MAX_BW__SHIFT 0xd
+#define DAGB1_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15
+#define DAGB1_WRCLI15__MIN_BW__SHIFT 0x16
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19
+#define DAGB1_WRCLI15__MAX_OSD__SHIFT 0x1a
+#define DAGB1_WRCLI15__VIRT_CHAN_MASK 0x00000007L
+#define DAGB1_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L
+#define DAGB1_WRCLI15__URG_HIGH_MASK 0x000000F0L
+#define DAGB1_WRCLI15__URG_LOW_MASK 0x00000F00L
+#define DAGB1_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L
+#define DAGB1_WRCLI15__MAX_BW_MASK 0x001FE000L
+#define DAGB1_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L
+#define DAGB1_WRCLI15__MIN_BW_MASK 0x01C00000L
+#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L
+#define DAGB1_WRCLI15__MAX_OSD_MASK 0xFC000000L
+//DAGB1_WR_CNTL
+#define DAGB1_WR_CNTL__SCLK_FREQ__SHIFT 0x0
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10
+#define DAGB1_WR_CNTL__IO_LEVEL__SHIFT 0x11
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14
+#define DAGB1_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17
+#define DAGB1_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL
+#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L
+#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L
+#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L
+#define DAGB1_WR_CNTL__IO_LEVEL_MASK 0x000E0000L
+#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L
+#define DAGB1_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L
+//DAGB1_WR_GMI_CNTL
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0
+#define DAGB1_WR_GMI_CNTL__LEVEL__SHIFT 0x6
+#define DAGB1_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd
+#define DAGB1_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L
+#define DAGB1_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L
+#define DAGB1_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L
+//DAGB1_WR_ADDR_DAGB
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_WR_OUTPUT_DAGB_MAX_BURST
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L
+//DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L
+#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L
+//DAGB1_WR_CGTT_CLK_CTRL
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_L1TLB_WR_CGTT_CLK_CTRL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_ATCVM_WR_CGTT_CLK_CTRL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L
+#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_MAX_BURST1
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_ADDR_DAGB_LAZY_TIMER1
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6
+#define DAGB1_WR_DATA_DAGB__WHOAMI__SHIFT 0x7
+#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L
+#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L
+#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L
+#define DAGB1_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L
+//DAGB1_WR_DATA_DAGB_MAX_BURST0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_MAX_BURST1
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_DATA_DAGB_LAZY_TIMER1
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L
+#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L
+//DAGB1_WR_VC0_CNTL
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC0_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC0_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC1_CNTL
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC1_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC1_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC2_CNTL
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC2_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC2_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC3_CNTL
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC3_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC3_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC4_CNTL
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC4_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC4_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC5_CNTL
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC5_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC5_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC6_CNTL
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC6_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC6_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_VC7_CNTL
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb
+#define DAGB1_WR_VC7_CNTL__MAX_BW__SHIFT 0xc
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14
+#define DAGB1_WR_VC7_CNTL__MIN_BW__SHIFT 0x15
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18
+#define DAGB1_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19
+#define DAGB1_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL
+#define DAGB1_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L
+#define DAGB1_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L
+#define DAGB1_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L
+#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L
+#define DAGB1_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L
+//DAGB1_WR_CNTL_MISC
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd
+#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15
+#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L
+#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L
+#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L
+#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L
+#define DAGB1_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L
+//DAGB1_WR_TLB_CREDIT
+#define DAGB1_WR_TLB_CREDIT__TLB0__SHIFT 0x0
+#define DAGB1_WR_TLB_CREDIT__TLB1__SHIFT 0x5
+#define DAGB1_WR_TLB_CREDIT__TLB2__SHIFT 0xa
+#define DAGB1_WR_TLB_CREDIT__TLB3__SHIFT 0xf
+#define DAGB1_WR_TLB_CREDIT__TLB4__SHIFT 0x14
+#define DAGB1_WR_TLB_CREDIT__TLB5__SHIFT 0x19
+#define DAGB1_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL
+#define DAGB1_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L
+#define DAGB1_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L
+#define DAGB1_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L
+#define DAGB1_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L
+#define DAGB1_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L
+//DAGB1_WR_DATA_CREDIT
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18
+#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL
+#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L
+#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L
+#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L
+//DAGB1_WR_MISC_CREDIT
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10
+#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL
+#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L
+#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L
+#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L
+//DAGB1_WRCLI_ASK_PENDING
+#define DAGB1_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GO_PENDING
+#define DAGB1_WRCLI_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GBLSEND_PENDING
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_TLB_PENDING
+#define DAGB1_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OARB_PENDING
+#define DAGB1_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_OSD_PENDING
+#define DAGB1_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_ASK_PENDING
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_DBUS_GO_PENDING
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
+#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_DAGB_DLY
+#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
+#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
+#define DAGB1_DAGB_DLY__POS__SHIFT 0x10
+#define DAGB1_DAGB_DLY__DLY_MASK 0x000000FFL
+#define DAGB1_DAGB_DLY__CLI_MASK 0x0000FF00L
+#define DAGB1_DAGB_DLY__POS_MASK 0x000F0000L
+//DAGB1_CNTL_MISC
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e
+#define DAGB1_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L
+#define DAGB1_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L
+#define DAGB1_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L
+#define DAGB1_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L
+#define DAGB1_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L
+#define DAGB1_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L
+#define DAGB1_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L
+#define DAGB1_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L
+#define DAGB1_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L
+#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L
+//DAGB1_CNTL_MISC2
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9
+#define DAGB1_CNTL_MISC2__SWAP_CTL__SHIFT 0xa
+#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L
+#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L
+#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L
+#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L
+#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L
+#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L
+#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L
+#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L
+#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L
+#define DAGB1_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L
+//DAGB1_FIFO_EMPTY
+#define DAGB1_FIFO_EMPTY__EMPTY__SHIFT 0x0
+#define DAGB1_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL
+//DAGB1_FIFO_FULL
+#define DAGB1_FIFO_FULL__FULL__SHIFT 0x0
+#define DAGB1_FIFO_FULL__FULL_MASK 0x007FFFFFL
+//DAGB1_WR_CREDITS_FULL
+#define DAGB1_WR_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_WR_CREDITS_FULL__FULL_MASK 0x0007FFFFL
+//DAGB1_RD_CREDITS_FULL
+#define DAGB1_RD_CREDITS_FULL__FULL__SHIFT 0x0
+#define DAGB1_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL
+//DAGB1_PERFCOUNTER_LO
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//DAGB1_PERFCOUNTER_HI
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//DAGB1_PERFCOUNTER0_CFG
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER1_CFG
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER2_CFG
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define DAGB1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define DAGB1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//DAGB1_PERFCOUNTER_RSLT_CNTL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//DAGB1_RESERVE0
+#define DAGB1_RESERVE0__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE0__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE1
+#define DAGB1_RESERVE1__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE1__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE2
+#define DAGB1_RESERVE2__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE2__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE3
+#define DAGB1_RESERVE3__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE3__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE4
+#define DAGB1_RESERVE4__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE4__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE5
+#define DAGB1_RESERVE5__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE5__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE6
+#define DAGB1_RESERVE6__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE6__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE7
+#define DAGB1_RESERVE7__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE7__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE8
+#define DAGB1_RESERVE8__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE8__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE9
+#define DAGB1_RESERVE9__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE9__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE10
+#define DAGB1_RESERVE10__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE10__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE11
+#define DAGB1_RESERVE11__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE11__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE12
+#define DAGB1_RESERVE12__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE12__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE13
+#define DAGB1_RESERVE13__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE13__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE14
+#define DAGB1_RESERVE14__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE14__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE15
+#define DAGB1_RESERVE15__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE15__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE16
+#define DAGB1_RESERVE16__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE16__RESERVE_MASK 0xFFFFFFFFL
+//DAGB1_RESERVE17
+#define DAGB1_RESERVE17__RESERVE__SHIFT 0x0
+#define DAGB1_RESERVE17__RESERVE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: mmhub_ea_mmeadec
+//MMEA0_DRAM_RD_CLI2GRP_MAP0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_CLI2GRP_MAP1
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_WR_CLI2GRP_MAP1
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_DRAM_RD_GRP2VC_MAP
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_WR_GRP2VC_MAP
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA0_DRAM_RD_LAZY
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//MMEA0_DRAM_WR_LAZY
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//MMEA0_DRAM_RD_CAM_CNTL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//MMEA0_DRAM_WR_CAM_CNTL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//MMEA0_DRAM_PAGE_BURST
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_AGE
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_WR_PRI_AGE
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_DRAM_RD_PRI_QUEUING
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_QUEUING
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_FIXED
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_WR_PRI_FIXED
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_DRAM_RD_PRI_URGENCY
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_WR_PRI_URGENCY
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_ADDRNORM_BASE_ADDR0
+#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x4
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000700L
+#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR0
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES__SHIFT 0xa
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES_MASK 0x00000C00L
+#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_BASE_ADDR1
+#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x4
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x8
+#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000700L
+#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_LIMIT_ADDR1
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES__SHIFT 0xa
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES_MASK 0x00000C00L
+#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA0_ADDRNORM_OFFSET_ADDR1
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA0_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA0_ADDRNORMDRAM_TRICHANNEL_CFG
+#define MMEA0_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE__SHIFT 0x0
+#define MMEA0_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE_MASK 0x0000003FL
+//MMEA0_ADDRDEC_BANK_CFG
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
+#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
+#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
+//MMEA0_ADDRDEC_MISC_CFG
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
+#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA0_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
+//MMEA0_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+//MMEA0_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC0_RM_SEL_CS01
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_CS23
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA0_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA0_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA0_ADDRDEC1_RM_SEL_CS01
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_CS23
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA0_IO_RD_CLI2GRP_MAP0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_CLI2GRP_MAP1
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA0_IO_WR_CLI2GRP_MAP1
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA0_IO_RD_COMBINE_FLUSH
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//MMEA0_IO_WR_COMBINE_FLUSH
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//MMEA0_IO_GROUP_BURST
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_AGE
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_WR_PRI_AGE
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA0_IO_RD_PRI_QUEUING
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_QUEUING
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_FIXED
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_WR_PRI_FIXED
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA0_IO_RD_PRI_URGENCY
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_WR_PRI_URGENCY
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA0_IO_RD_PRI_URGENCY_MASK
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_RD_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_WR_PRI_URGENCY_MASK
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define MMEA0_IO_WR_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI1
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI2
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_RD_PRI_QUANT_PRI3
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI1
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI2
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_IO_WR_PRI_QUANT_PRI3
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA0_SDP_ARB_DRAM
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+//MMEA0_SDP_ARB_FINAL
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+//MMEA0_SDP_DRAM_PRIORITY
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_IO_PRIORITY
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA0_SDP_CREDITS
+#define MMEA0_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA0_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA0_SDP_TAG_RESERVE0
+#define MMEA0_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA0_SDP_TAG_RESERVE1
+#define MMEA0_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA0_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA0_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA0_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA0_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA0_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA0_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA0_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA0_SDP_VCC_RESERVE0
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCC_RESERVE1
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_VCD_RESERVE0
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA0_SDP_VCD_RESERVE1
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA0_SDP_REQ_CNTL
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//MMEA0_MISC
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA0_MISC__RRET_SWAP_MODE__SHIFT 0x6
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA__SHIFT 0x7
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0x8
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0xa
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0xc
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0xe
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x13
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x14
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x15
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x16
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x17
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x18
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA0_MISC__RRET_SWAP_MODE_MASK 0x00000040L
+#define MMEA0_MISC__EARLY_SDP_ORIGDATA_MASK 0x00000080L
+#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00000300L
+#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00000C00L
+#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00003000L
+#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x0007C000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x00080000L
+#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x00100000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x00200000L
+#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x00400000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x00800000L
+#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x01000000L
+//MMEA0_LATENCY_SAMPLING
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA0_PERFCOUNTER_LO
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA0_PERFCOUNTER_HI
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA0_PERFCOUNTER0_CFG
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER1_CFG
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA0_PERFCOUNTER_RSLT_CNTL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_EDC_CNT
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA0_EDC_CNT2
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+//MMEA0_DSM_CNTL
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA0_DSM_CNTLA
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA0_DSM_CNTLB
+//MMEA0_DSM_CNTL2
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA0_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA0_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA0_DSM_CNTL2A
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA0_DSM_CNTL2B
+//MMEA0_CGTT_CLK_CTRL
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA0_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA0_EDC_MODE
+#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA0_ERR_STATUS
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA0_MISC2
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+//MMEA1_DRAM_RD_CLI2GRP_MAP0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_CLI2GRP_MAP1
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_WR_CLI2GRP_MAP1
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_DRAM_RD_GRP2VC_MAP
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_WR_GRP2VC_MAP
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//MMEA1_DRAM_RD_LAZY
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//MMEA1_DRAM_WR_LAZY
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+//MMEA1_DRAM_RD_CAM_CNTL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//MMEA1_DRAM_WR_CAM_CNTL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+//MMEA1_DRAM_PAGE_BURST
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_AGE
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_WR_PRI_AGE
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_DRAM_RD_PRI_QUEUING
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_QUEUING
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_FIXED
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_WR_PRI_FIXED
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_DRAM_RD_PRI_URGENCY
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_WR_PRI_URGENCY
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI1
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI2
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_RD_PRI_QUANT_PRI3
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI1
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI2
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_DRAM_WR_PRI_QUANT_PRI3
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_ADDRNORM_BASE_ADDR0
+#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x4
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000700L
+#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR0
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES__SHIFT 0xa
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__INTLV_NUM_DIES_MASK 0x00000C00L
+#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_BASE_ADDR1
+#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0
+#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x4
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x8
+#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L
+#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x000000F0L
+#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000700L
+#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_LIMIT_ADDR1
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES__SHIFT 0xa
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__INTLV_NUM_DIES_MASK 0x00000C00L
+#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L
+//MMEA1_ADDRNORM_OFFSET_ADDR1
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L
+#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L
+//MMEA1_ADDRNORMDRAM_HOLE_CNTL
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L
+#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L
+//MMEA1_ADDRNORMDRAM_TRICHANNEL_CFG
+#define MMEA1_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE__SHIFT 0x0
+#define MMEA1_ADDRNORMDRAM_TRICHANNEL_CFG__LOG2_ADDR64K_SPACE_MASK 0x0000003FL
+//MMEA1_ADDRDEC_BANK_CFG
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x5
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xa
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xd
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x10
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x11
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000001FL
+#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x000003E0L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00001C00L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x0000E000L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00010000L
+#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00020000L
+//MMEA1_ADDRDEC_MISC_CFG
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN3__SHIFT 0x3
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN4__SHIFT 0x4
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN3_MASK 0x00000008L
+#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN4_MASK 0x00000010L
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L
+#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L
+#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L
+#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L
+#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_PC
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L
+//MMEA1_ADDRDECDRAM_ADDR_HASH_PC2
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000001FL
+//MMEA1_ADDRDECDRAM_ADDR_HASH_CS0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECDRAM_ADDR_HASH_CS1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDECDRAM_HARVEST_ENABLE
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L
+#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L
+//MMEA1_ADDRDEC0_BASE_ADDR_CS0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS2
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_CS3
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS2
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_BASE_ADDR_SECCS3
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_CS01
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_CS23
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_SECCS01
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_MASK_SECCS23
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC0_ADDR_CFG_CS01
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC0_ADDR_CFG_CS23
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC0_ADDR_SEL_CS01
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_ADDR_SEL_CS23
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_LO_CS01
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_LO_CS23
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_HI_CS01
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_COL_SEL_HI_CS23
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC0_RM_SEL_CS01
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_CS23
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_SECCS01
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC0_RM_SEL_SECCS23
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_BASE_ADDR_CS0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS2
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_CS3
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS2
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_BASE_ADDR_SECCS3
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L
+#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_CS01
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_CS23
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_SECCS01
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_MASK_SECCS23
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1
+#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL
+//MMEA1_ADDRDEC1_ADDR_CFG_CS01
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC1_ADDR_CFG_CS23
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x2
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000CL
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L
+#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L
+//MMEA1_ADDRDEC1_ADDR_SEL_CS01
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_ADDR_SEL_CS23
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_LO_CS01
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_LO_CS23
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_HI_CS01
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_COL_SEL_HI_CS23
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L
+#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L
+//MMEA1_ADDRDEC1_RM_SEL_CS01
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_CS23
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_SECCS01
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_ADDRDEC1_RM_SEL_SECCS23
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L
+#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L
+//MMEA1_IO_RD_CLI2GRP_MAP0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_CLI2GRP_MAP1
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//MMEA1_IO_WR_CLI2GRP_MAP1
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//MMEA1_IO_RD_COMBINE_FLUSH
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//MMEA1_IO_WR_COMBINE_FLUSH
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+//MMEA1_IO_GROUP_BURST
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_AGE
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_WR_PRI_AGE
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//MMEA1_IO_RD_PRI_QUEUING
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_QUEUING
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_FIXED
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_WR_PRI_FIXED
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//MMEA1_IO_RD_PRI_URGENCY
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_WR_PRI_URGENCY
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//MMEA1_IO_RD_PRI_URGENCY_MASK
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_RD_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_WR_PRI_URGENCY_MASK
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID0_MASK__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID1_MASK__SHIFT 0x1
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID2_MASK__SHIFT 0x2
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID3_MASK__SHIFT 0x3
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID4_MASK__SHIFT 0x4
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID5_MASK__SHIFT 0x5
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID6_MASK__SHIFT 0x6
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID7_MASK__SHIFT 0x7
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID8_MASK__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID9_MASK__SHIFT 0x9
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID10_MASK__SHIFT 0xa
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID11_MASK__SHIFT 0xb
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID12_MASK__SHIFT 0xc
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID13_MASK__SHIFT 0xd
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID14_MASK__SHIFT 0xe
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID15_MASK__SHIFT 0xf
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID16_MASK__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID17_MASK__SHIFT 0x11
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID18_MASK__SHIFT 0x12
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID19_MASK__SHIFT 0x13
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID20_MASK__SHIFT 0x14
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID21_MASK__SHIFT 0x15
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID22_MASK__SHIFT 0x16
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID23_MASK__SHIFT 0x17
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID24_MASK__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID25_MASK__SHIFT 0x19
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID26_MASK__SHIFT 0x1a
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID27_MASK__SHIFT 0x1b
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID28_MASK__SHIFT 0x1c
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID29_MASK__SHIFT 0x1d
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID30_MASK__SHIFT 0x1e
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID31_MASK__SHIFT 0x1f
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID0_MASK_MASK 0x00000001L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID1_MASK_MASK 0x00000002L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID2_MASK_MASK 0x00000004L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID3_MASK_MASK 0x00000008L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID4_MASK_MASK 0x00000010L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID5_MASK_MASK 0x00000020L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID6_MASK_MASK 0x00000040L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID7_MASK_MASK 0x00000080L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID8_MASK_MASK 0x00000100L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID9_MASK_MASK 0x00000200L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID10_MASK_MASK 0x00000400L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID11_MASK_MASK 0x00000800L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID12_MASK_MASK 0x00001000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID13_MASK_MASK 0x00002000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID14_MASK_MASK 0x00004000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID15_MASK_MASK 0x00008000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID16_MASK_MASK 0x00010000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID17_MASK_MASK 0x00020000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID18_MASK_MASK 0x00040000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID19_MASK_MASK 0x00080000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID20_MASK_MASK 0x00100000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID21_MASK_MASK 0x00200000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID22_MASK_MASK 0x00400000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID23_MASK_MASK 0x00800000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID24_MASK_MASK 0x01000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID25_MASK_MASK 0x02000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID26_MASK_MASK 0x04000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID27_MASK_MASK 0x08000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID28_MASK_MASK 0x10000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID29_MASK_MASK 0x20000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID30_MASK_MASK 0x40000000L
+#define MMEA1_IO_WR_PRI_URGENCY_MASK__CID31_MASK_MASK 0x80000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI1
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI2
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_RD_PRI_QUANT_PRI3
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI1
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI2
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_IO_WR_PRI_QUANT_PRI3
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//MMEA1_SDP_ARB_DRAM
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+//MMEA1_SDP_ARB_FINAL
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+//MMEA1_SDP_DRAM_PRIORITY
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_IO_PRIORITY
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//MMEA1_SDP_CREDITS
+#define MMEA1_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define MMEA1_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//MMEA1_SDP_TAG_RESERVE0
+#define MMEA1_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//MMEA1_SDP_TAG_RESERVE1
+#define MMEA1_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define MMEA1_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define MMEA1_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define MMEA1_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define MMEA1_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define MMEA1_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define MMEA1_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define MMEA1_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//MMEA1_SDP_VCC_RESERVE0
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCC_RESERVE1
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_VCD_RESERVE0
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//MMEA1_SDP_VCD_RESERVE1
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//MMEA1_SDP_REQ_CNTL
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//MMEA1_MISC
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define MMEA1_MISC__RRET_SWAP_MODE__SHIFT 0x6
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA__SHIFT 0x7
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0x8
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0xa
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0xc
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0xe
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x13
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x14
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x15
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x16
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x17
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x18
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define MMEA1_MISC__RRET_SWAP_MODE_MASK 0x00000040L
+#define MMEA1_MISC__EARLY_SDP_ORIGDATA_MASK 0x00000080L
+#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00000300L
+#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00000C00L
+#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00003000L
+#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x0007C000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x00080000L
+#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x00100000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x00200000L
+#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x00400000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x00800000L
+#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x01000000L
+//MMEA1_LATENCY_SAMPLING
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//MMEA1_PERFCOUNTER_LO
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MMEA1_PERFCOUNTER_HI
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//MMEA1_PERFCOUNTER0_CFG
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER1_CFG
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MMEA1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MMEA1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MMEA1_PERFCOUNTER_RSLT_CNTL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA1_EDC_CNT
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14
+#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16
+#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18
+#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a
+#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L
+#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L
+#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L
+#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L
+#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L
+//MMEA1_EDC_CNT2
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+//MMEA1_DSM_CNTL
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//MMEA1_DSM_CNTLA
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//MMEA1_DSM_CNTLB
+//MMEA1_DSM_CNTL2
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define MMEA1_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define MMEA1_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//MMEA1_DSM_CNTL2A
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//MMEA1_DSM_CNTL2B
+//MMEA1_CGTT_CLK_CTRL
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define MMEA1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//MMEA1_EDC_MODE
+#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14
+#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f
+#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L
+//MMEA1_ERR_STATUS
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//MMEA1_MISC2
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+
+
+// addressBlock: mmhub_pctldec
+//PCTL_MISC
+#define PCTL_MISC__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x0
+#define PCTL_MISC__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x3
+#define PCTL_MISC__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0x6
+#define PCTL_MISC__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0xb
+#define PCTL_MISC__IGNORE_EA0_SDP_ACK__SHIFT 0xc
+#define PCTL_MISC__IGNORE_EA1_SDP_ACK__SHIFT 0xd
+#define PCTL_MISC__PGFSM_CMD_STATUS__SHIFT 0xe
+#define PCTL_MISC__ALLOW_DEEP_SLEEP_MODE_MASK 0x00000007L
+#define PCTL_MISC__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x00000038L
+#define PCTL_MISC__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x000007C0L
+#define PCTL_MISC__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00000800L
+#define PCTL_MISC__IGNORE_EA0_SDP_ACK_MASK 0x00001000L
+#define PCTL_MISC__IGNORE_EA1_SDP_ACK_MASK 0x00002000L
+#define PCTL_MISC__PGFSM_CMD_STATUS_MASK 0x0000C000L
+//PCTL_MMHUB_DEEPSLEEP
+#define PCTL_MMHUB_DEEPSLEEP__DS0__SHIFT 0x0
+#define PCTL_MMHUB_DEEPSLEEP__DS1__SHIFT 0x1
+#define PCTL_MMHUB_DEEPSLEEP__DS2__SHIFT 0x2
+#define PCTL_MMHUB_DEEPSLEEP__DS3__SHIFT 0x3
+#define PCTL_MMHUB_DEEPSLEEP__DS4__SHIFT 0x4
+#define PCTL_MMHUB_DEEPSLEEP__DS5__SHIFT 0x5
+#define PCTL_MMHUB_DEEPSLEEP__DS6__SHIFT 0x6
+#define PCTL_MMHUB_DEEPSLEEP__DS7__SHIFT 0x7
+#define PCTL_MMHUB_DEEPSLEEP__DS8__SHIFT 0x8
+#define PCTL_MMHUB_DEEPSLEEP__DS9__SHIFT 0x9
+#define PCTL_MMHUB_DEEPSLEEP__DS10__SHIFT 0xa
+#define PCTL_MMHUB_DEEPSLEEP__DS11__SHIFT 0xb
+#define PCTL_MMHUB_DEEPSLEEP__DS12__SHIFT 0xc
+#define PCTL_MMHUB_DEEPSLEEP__DS13__SHIFT 0xd
+#define PCTL_MMHUB_DEEPSLEEP__DS14__SHIFT 0xe
+#define PCTL_MMHUB_DEEPSLEEP__DS15__SHIFT 0xf
+#define PCTL_MMHUB_DEEPSLEEP__DS16__SHIFT 0x10
+#define PCTL_MMHUB_DEEPSLEEP__SETCLEAR__SHIFT 0x1f
+#define PCTL_MMHUB_DEEPSLEEP__DS0_MASK 0x00000001L
+#define PCTL_MMHUB_DEEPSLEEP__DS1_MASK 0x00000002L
+#define PCTL_MMHUB_DEEPSLEEP__DS2_MASK 0x00000004L
+#define PCTL_MMHUB_DEEPSLEEP__DS3_MASK 0x00000008L
+#define PCTL_MMHUB_DEEPSLEEP__DS4_MASK 0x00000010L
+#define PCTL_MMHUB_DEEPSLEEP__DS5_MASK 0x00000020L
+#define PCTL_MMHUB_DEEPSLEEP__DS6_MASK 0x00000040L
+#define PCTL_MMHUB_DEEPSLEEP__DS7_MASK 0x00000080L
+#define PCTL_MMHUB_DEEPSLEEP__DS8_MASK 0x00000100L
+#define PCTL_MMHUB_DEEPSLEEP__DS9_MASK 0x00000200L
+#define PCTL_MMHUB_DEEPSLEEP__DS10_MASK 0x00000400L
+#define PCTL_MMHUB_DEEPSLEEP__DS11_MASK 0x00000800L
+#define PCTL_MMHUB_DEEPSLEEP__DS12_MASK 0x00001000L
+#define PCTL_MMHUB_DEEPSLEEP__DS13_MASK 0x00002000L
+#define PCTL_MMHUB_DEEPSLEEP__DS14_MASK 0x00004000L
+#define PCTL_MMHUB_DEEPSLEEP__DS15_MASK 0x00008000L
+#define PCTL_MMHUB_DEEPSLEEP__DS16_MASK 0x00010000L
+#define PCTL_MMHUB_DEEPSLEEP__SETCLEAR_MASK 0x80000000L
+//PCTL_MMHUB_DEEPSLEEP_OVERRIDE
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L
+#define PCTL_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L
+//PCTL_PG_IGNORE_DEEPSLEEP
+#define PCTL_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x0
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x1
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x2
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x3
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x4
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x5
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x6
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x7
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x8
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x9
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0xa
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xb
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xc
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xd
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xe
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xf
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0x10
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x11
+#define PCTL_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00000001L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000002L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000004L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000008L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000010L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000020L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000040L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000080L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000100L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000200L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000400L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000800L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00001000L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00002000L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00004000L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00008000L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00010000L
+#define PCTL_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00020000L
+//PCTL_PG_DAGB
+#define PCTL_PG_DAGB__DS0__SHIFT 0x0
+#define PCTL_PG_DAGB__DS1__SHIFT 0x1
+#define PCTL_PG_DAGB__DS2__SHIFT 0x2
+#define PCTL_PG_DAGB__DS3__SHIFT 0x3
+#define PCTL_PG_DAGB__DS4__SHIFT 0x4
+#define PCTL_PG_DAGB__DS5__SHIFT 0x5
+#define PCTL_PG_DAGB__DS6__SHIFT 0x6
+#define PCTL_PG_DAGB__DS7__SHIFT 0x7
+#define PCTL_PG_DAGB__DS8__SHIFT 0x8
+#define PCTL_PG_DAGB__DS9__SHIFT 0x9
+#define PCTL_PG_DAGB__DS10__SHIFT 0xa
+#define PCTL_PG_DAGB__DS11__SHIFT 0xb
+#define PCTL_PG_DAGB__DS12__SHIFT 0xc
+#define PCTL_PG_DAGB__DS13__SHIFT 0xd
+#define PCTL_PG_DAGB__DS14__SHIFT 0xe
+#define PCTL_PG_DAGB__DS15__SHIFT 0xf
+#define PCTL_PG_DAGB__DS16__SHIFT 0x10
+#define PCTL_PG_DAGB__DS0_MASK 0x00000001L
+#define PCTL_PG_DAGB__DS1_MASK 0x00000002L
+#define PCTL_PG_DAGB__DS2_MASK 0x00000004L
+#define PCTL_PG_DAGB__DS3_MASK 0x00000008L
+#define PCTL_PG_DAGB__DS4_MASK 0x00000010L
+#define PCTL_PG_DAGB__DS5_MASK 0x00000020L
+#define PCTL_PG_DAGB__DS6_MASK 0x00000040L
+#define PCTL_PG_DAGB__DS7_MASK 0x00000080L
+#define PCTL_PG_DAGB__DS8_MASK 0x00000100L
+#define PCTL_PG_DAGB__DS9_MASK 0x00000200L
+#define PCTL_PG_DAGB__DS10_MASK 0x00000400L
+#define PCTL_PG_DAGB__DS11_MASK 0x00000800L
+#define PCTL_PG_DAGB__DS12_MASK 0x00001000L
+#define PCTL_PG_DAGB__DS13_MASK 0x00002000L
+#define PCTL_PG_DAGB__DS14_MASK 0x00004000L
+#define PCTL_PG_DAGB__DS15_MASK 0x00008000L
+#define PCTL_PG_DAGB__DS16_MASK 0x00010000L
+//PCTL0_RENG_RAM_INDEX
+#define PCTL0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL
+//PCTL0_RENG_RAM_DATA
+#define PCTL0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL0_RENG_EXECUTE
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x0
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x1
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x2
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x3
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xe
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x19
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK 0x00000001L
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000002L
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000004L
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00003FF8L
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x01FFC000L
+#define PCTL0_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x02000000L
+//PCTL0_MISC
+#define PCTL0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb
+#define PCTL0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc
+#define PCTL0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf
+#define PCTL0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10
+#define PCTL0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L
+#define PCTL0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L
+#define PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L
+#define PCTL0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L
+//PCTL0_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
+#define PCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
+//PCTL1_RENG_RAM_INDEX
+#define PCTL1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL1_RENG_RAM_DATA
+#define PCTL1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL1_RENG_EXECUTE
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x0
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x1
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x2
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x3
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x17
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK 0x00000001L
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000002L
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000004L
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FF8L
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x007FE000L
+#define PCTL1_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00800000L
+//PCTL1_MISC
+#define PCTL1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+//PCTL1_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
+#define PCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
+//PCTL2_RENG_RAM_INDEX
+#define PCTL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0
+#define PCTL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL
+//PCTL2_RENG_RAM_DATA
+#define PCTL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0
+#define PCTL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL
+//PCTL2_RENG_EXECUTE
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP__SHIFT 0x0
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x1
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x2
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x3
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x17
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK 0x00000001L
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000002L
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000004L
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FF8L
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x007FE000L
+#define PCTL2_RENG_EXECUTE__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00800000L
+//PCTL2_MISC
+#define PCTL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa
+#define PCTL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb
+#define PCTL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe
+#define PCTL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf
+#define PCTL2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10
+#define PCTL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L
+#define PCTL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L
+#define PCTL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L
+#define PCTL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L
+#define PCTL2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L
+//PCTL2_STCTRL_REGISTER_SAVE_RANGE0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_RANGE1
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_RANGE2
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_RANGE3
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_RANGE4
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L
+//PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2__SHIFT 0x0
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3__SHIFT 0x10
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL2_MASK 0x0000FFFFL
+#define PCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL3_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_l1tlb_vml1dec
+//MC_VM_MX_L1_TLB0_STATUS
+#define MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB1_STATUS
+#define MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB2_STATUS
+#define MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB3_STATUS
+#define MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB4_STATUS
+#define MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB5_STATUS
+#define MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB6_STATUS
+#define MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+//MC_VM_MX_L1_TLB7_STATUS
+#define MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0
+#define MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+
+
+// addressBlock: mmhub_l1tlb_vml1pldec
+//MC_VM_MX_L1_PERFCOUNTER0_CFG
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER1_CFG
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER2_CFG
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER3_CFG
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_l1tlb_vml1prdec
+//MC_VM_MX_L1_PERFCOUNTER_LO
+#define MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_MX_L1_PERFCOUNTER_HI
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2dec
+//ATC_L2_CNTL
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x8
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00000700L
+#define ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+//ATC_L2_CNTL2
+#define ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf
+#define ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L
+#define ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L
+#define ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L
+#define ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L
+//ATC_L2_CACHE_DATA0
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17
+#define ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL
+#define ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L
+//ATC_L2_CACHE_DATA1
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//ATC_L2_CACHE_DATA2
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//ATC_L2_CNTL3
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9
+#define ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L
+#define ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L
+#define ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L
+//ATC_L2_STATUS
+#define ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1
+#define ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x3FFFFFFEL
+//ATC_L2_STATUS2
+#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//ATC_L2_MISC_CG
+#define ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//ATC_L2_MEM_POWER_LS
+#define ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//ATC_L2_CGTT_CLK_CTRL
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: mmhub_utcl2_vml2pfdec
+//VM_L2_CNTL
+#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//VM_L2_CNTL2
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//VM_L2_CNTL3
+#define VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//VM_L2_STATUS
+#define VM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define VM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//VM_DUMMY_PAGE_FAULT_CNTL
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//VM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_CNTL
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//VM_L2_PROTECTION_FAULT_CNTL2
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//VM_L2_PROTECTION_FAULT_MM_CNTL3
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_MM_CNTL4
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_STATUS
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+//VM_L2_PROTECTION_FAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//VM_L2_CNTL4
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+//VM_L2_MM_GROUP_RT_CLASSES
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//VM_L2_BANK_SELECT_RESERVED_CID
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_BANK_SELECT_RESERVED_CID2
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+//VM_L2_CACHE_PARITY_CNTL
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//VM_L2_CGTT_CLK_CTRL
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+
+
+// addressBlock: mmhub_utcl2_vml2vcdec
+//VM_CONTEXT0_CNTL
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT1_CNTL
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT2_CNTL
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT3_CNTL
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT4_CNTL
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT5_CNTL
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT6_CNTL
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT7_CNTL
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT8_CNTL
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT9_CNTL
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT10_CNTL
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT11_CNTL
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT12_CNTL
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT13_CNTL
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT14_CNTL
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXT15_CNTL
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+//VM_CONTEXTS_DISABLE
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//VM_INVALIDATE_ENG0_SEM
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG1_SEM
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG2_SEM
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG3_SEM
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG4_SEM
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG5_SEM
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG6_SEM
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG7_SEM
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG8_SEM
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG9_SEM
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG10_SEM
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG11_SEM
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG12_SEM
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG13_SEM
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG14_SEM
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG15_SEM
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG16_SEM
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG17_SEM
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//VM_INVALIDATE_ENG0_REQ
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG1_REQ
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG2_REQ
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG3_REQ
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG4_REQ
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG5_REQ
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG6_REQ
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG7_REQ
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG8_REQ
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG9_REQ
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG10_REQ
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG11_REQ
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG12_REQ
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG13_REQ
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG14_REQ
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG15_REQ
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG16_REQ
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG17_REQ
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17
+#define VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L
+#define VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L
+#define VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L
+//VM_INVALIDATE_ENG0_ACK
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG1_ACK
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG2_ACK
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG3_ACK
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG4_ACK
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG5_ACK
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG6_ACK
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG7_ACK
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG8_ACK
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG9_ACK
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG10_ACK
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG11_ACK
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG12_ACK
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG13_ACK
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG14_ACK
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG15_ACK
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG16_ACK
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG17_ACK
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//VM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//VM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+
+
+// addressBlock: mmhub_utcl2_vml2pldec
+//MC_VM_L2_PERFCOUNTER0_CFG
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER1_CFG
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER2_CFG
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER3_CFG
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER4_CFG
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER5_CFG
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER6_CFG
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER7_CFG
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//MC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: mmhub_utcl2_vml2prdec
+//MC_VM_L2_PERFCOUNTER_LO
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//MC_VM_L2_PERFCOUNTER_HI
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_vmsharedhvdec
+//MC_VM_FB_SIZE_OFFSET_VF0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF1
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF2
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF3
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF4
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF5
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF6
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF7
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF8
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF9
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF11
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF12
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF13
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF14
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//MC_VM_FB_SIZE_OFFSET_VF15
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+//VM_IOMMU_MMIO_CNTRL_1
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//MC_VM_MARC_BASE_LO_0
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_1
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_2
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_LO_3
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_BASE_HI_0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_1
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_2
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_BASE_HI_3
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_LO_0
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_1
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_2
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_LO_3
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_RELOC_HI_0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_1
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_2
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_RELOC_HI_3
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_LO_0
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_1
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_2
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_LO_3
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//MC_VM_MARC_LEN_HI_0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_1
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_2
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//MC_VM_MARC_LEN_HI_3
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//VM_IOMMU_CONTROL_REGISTER
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//VM_PCIE_ATS_CNTL
+#define VM_PCIE_ATS_CNTL__STU__SHIFT 0x10
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
+#define VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_0
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_1
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_2
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_3
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_4
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_5
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_6
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_7
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_8
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_9
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_10
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_11
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_12
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_13
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_14
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
+//VM_PCIE_ATS_CNTL_VF_15
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
+#define VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
+//UTCL2_CGTT_CLK_CTRL
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18
+#define UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L
+#define UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
+#define UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
+//MC_SHARED_ACTIVE_FCN_ID
+#define MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MC_VM_XGMI_GPUIOV_ENABLE
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L
+#define MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_NB_MMIOBASE
+#define MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//MC_VM_NB_MMIOLIMIT
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//MC_VM_NB_PCI_CTRL
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//MC_VM_NB_PCI_ARB
+#define MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//MC_VM_NB_TOP_OF_DRAM_SLOT1
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//MC_VM_NB_LOWER_TOP_OF_DRAM2
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//MC_VM_NB_UPPER_TOP_OF_DRAM2
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//MC_VM_FB_OFFSET
+#define MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//MC_VM_STEERING
+#define MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//MC_SHARED_VIRT_RESET_REQ
+#define MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//MC_MEM_POWER_LS
+#define MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//MC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_APT_CNTL
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+//MC_VM_LOCAL_HBM_ADDRESS_START
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_END
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+
+// addressBlock: mmhub_utcl2_vmsharedvcdec
+//MC_VM_FB_LOCATION_BASE
+#define MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//MC_VM_FB_LOCATION_TOP
+#define MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_TOP
+#define MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//MC_VM_AGP_BOT
+#define MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//MC_VM_AGP_BASE
+#define MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//MC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//MC_VM_MX_L1_TLB_CNTL
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L
+#define MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntrdec
+//ATC_L2_PERFCOUNTER_LO
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//ATC_L2_PERFCOUNTER_HI
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: mmhub_utcl2_atcl2pfcntldec
+//ATC_L2_PERFCOUNTER0_CFG
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER1_CFG
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_default.h
index f087a2b..f087a2b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_offset.h
index 1063e5e..1063e5e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_sh_mask.h
index 9b0c8c5..9b0c8c5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MP/mp_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h
new file mode 100644
index 0000000..299e526
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_9_0_OFFSET_HEADER
+#define _mp_9_0_OFFSET_HEADER
+
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+// base address: 0x0
+#define mmMP0_SMN_C2PMSG_32 0x0060
+#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_33 0x0061
+#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_34 0x0062
+#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_35 0x0063
+#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_36 0x0064
+#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_37 0x0065
+#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_38 0x0066
+#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_39 0x0067
+#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_40 0x0068
+#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_41 0x0069
+#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_42 0x006a
+#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_43 0x006b
+#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_44 0x006c
+#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_45 0x006d
+#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_46 0x006e
+#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_47 0x006f
+#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_48 0x0070
+#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_49 0x0071
+#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_50 0x0072
+#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_51 0x0073
+#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_52 0x0074
+#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_53 0x0075
+#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_54 0x0076
+#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_55 0x0077
+#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_56 0x0078
+#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_57 0x0079
+#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_58 0x007a
+#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_59 0x007b
+#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_60 0x007c
+#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_61 0x007d
+#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_62 0x007e
+#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_63 0x007f
+#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_64 0x0080
+#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_65 0x0081
+#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_66 0x0082
+#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_67 0x0083
+#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_68 0x0084
+#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_69 0x0085
+#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_70 0x0086
+#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_71 0x0087
+#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_72 0x0088
+#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_73 0x0089
+#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_74 0x008a
+#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_75 0x008b
+#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_76 0x008c
+#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_77 0x008d
+#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_78 0x008e
+#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_79 0x008f
+#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_80 0x0090
+#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_81 0x0091
+#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_82 0x0092
+#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_83 0x0093
+#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_84 0x0094
+#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_85 0x0095
+#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_86 0x0096
+#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_87 0x0097
+#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_88 0x0098
+#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_89 0x0099
+#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_90 0x009a
+#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_91 0x009b
+#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_92 0x009c
+#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_93 0x009d
+#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_94 0x009e
+#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_95 0x009f
+#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_96 0x00a0
+#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_97 0x00a1
+#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_98 0x00a2
+#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_99 0x00a3
+#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_100 0x00a4
+#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_101 0x00a5
+#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_102 0x00a6
+#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0
+#define mmMP0_SMN_C2PMSG_103 0x00a7
+#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0
+#define mmMP0_SMN_ACTIVE_FCN_ID 0x00c0
+#define mmMP0_SMN_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMP0_SMN_IH_CREDIT 0x00c1
+#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0
+#define mmMP0_SMN_IH_SW_INT 0x00c2
+#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0
+#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+// base address: 0x0
+#define mmMP1_SMN_ACP2MP_RESP 0x0240
+#define mmMP1_SMN_ACP2MP_RESP_BASE_IDX 0
+#define mmMP1_SMN_DC2MP_RESP 0x0241
+#define mmMP1_SMN_DC2MP_RESP_BASE_IDX 0
+#define mmMP1_SMN_UVD2MP_RESP 0x0242
+#define mmMP1_SMN_UVD2MP_RESP_BASE_IDX 0
+#define mmMP1_SMN_VCE2MP_RESP 0x0243
+#define mmMP1_SMN_VCE2MP_RESP_BASE_IDX 0
+#define mmMP1_SMN_RLC2MP_RESP 0x0244
+#define mmMP1_SMN_RLC2MP_RESP_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_32 0x0260
+#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_33 0x0261
+#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_34 0x0262
+#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_35 0x0263
+#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_36 0x0264
+#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_37 0x0265
+#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_38 0x0266
+#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_39 0x0267
+#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_40 0x0268
+#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_41 0x0269
+#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_42 0x026a
+#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_43 0x026b
+#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_44 0x026c
+#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_45 0x026d
+#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_46 0x026e
+#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_47 0x026f
+#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_48 0x0270
+#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_49 0x0271
+#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_50 0x0272
+#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_51 0x0273
+#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_52 0x0274
+#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_53 0x0275
+#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_54 0x0276
+#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_55 0x0277
+#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_56 0x0278
+#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_57 0x0279
+#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_58 0x027a
+#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_59 0x027b
+#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_60 0x027c
+#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_61 0x027d
+#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_62 0x027e
+#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_63 0x027f
+#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_64 0x0280
+#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_65 0x0281
+#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_67 0x0283
+#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_68 0x0284
+#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_69 0x0285
+#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_70 0x0286
+#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_71 0x0287
+#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_72 0x0288
+#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_73 0x0289
+#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_74 0x028a
+#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_75 0x028b
+#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_76 0x028c
+#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_77 0x028d
+#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_78 0x028e
+#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_79 0x028f
+#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_80 0x0290
+#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_81 0x0291
+#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_83 0x0293
+#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_84 0x0294
+#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_85 0x0295
+#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_86 0x0296
+#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_87 0x0297
+#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_88 0x0298
+#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_89 0x0299
+#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_91 0x029b
+#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_92 0x029c
+#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_93 0x029d
+#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_94 0x029e
+#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_95 0x029f
+#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_96 0x02a0
+#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_97 0x02a1
+#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_98 0x02a2
+#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_99 0x02a3
+#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_100 0x02a4
+#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_101 0x02a5
+#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_102 0x02a6
+#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_103 0x02a7
+#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0
+#define mmMP1_SMN_ACTIVE_FCN_ID 0x02c0
+#define mmMP1_SMN_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMP1_SMN_IH_CREDIT 0x02c1
+#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0
+#define mmMP1_SMN_IH_SW_INT 0x02c2
+#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0
+#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+#define mmMP1_SMN_FPS_CNT 0x02c4
+#define mmMP1_SMN_FPS_CNT_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH0 0x03c0
+#define mmMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH1 0x03c1
+#define mmMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH2 0x03c2
+#define mmMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH3 0x03c3
+#define mmMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH4 0x03c4
+#define mmMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH5 0x03c5
+#define mmMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH6 0x03c6
+#define mmMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH7 0x03c7
+#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+#define mmMP1_SMN_EXT_SCRATCH8 0x03c8
+#define mmMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
+
+
+// addressBlock: mp_SmuMp1Pub_CruDec
+// base address: 0x0
+#define mmMP1_SMN_PUB_CTRL 0x02c5
+#define mmMP1_SMN_PUB_CTRL_BASE_IDX 0
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h
new file mode 100644
index 0000000..d5a623d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_9_0_SH_MASK_HEADER
+#define _mp_9_0_SH_MASK_HEADER
+
+
+// addressBlock: mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_ACTIVE_FCN_ID
+#define MP0_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MP0_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MP0_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MP0_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x1
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000001FEL
+//MP0_SMN_IH_SW_INT_CTRL
+#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: mp_SmuMp1_SmnDec
+//MP1_SMN_ACP2MP_RESP
+#define MP1_SMN_ACP2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_SMN_ACP2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_DC2MP_RESP
+#define MP1_SMN_DC2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_SMN_DC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_UVD2MP_RESP
+#define MP1_SMN_UVD2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_SMN_UVD2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_VCE2MP_RESP
+#define MP1_SMN_VCE2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_SMN_VCE2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_RLC2MP_RESP
+#define MP1_SMN_RLC2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_SMN_RLC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_ACTIVE_FCN_ID
+#define MP1_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MP1_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MP1_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MP1_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x1
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000001FEL
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+
+
+
+
+// addressBlock: mp_SmuMp0Pub_CruDec
+//MP0_SOC_INFO
+#define MP0_SOC_INFO__SOC_DIE_ID__SHIFT 0x0
+#define MP0_SOC_INFO__SOC_PKG_TYPE__SHIFT 0x2
+#define MP0_SOC_INFO__SOC_DIE_ID_MASK 0x00000003L
+#define MP0_SOC_INFO__SOC_PKG_TYPE_MASK 0x0000001CL
+//MP0_PUB_SCRATCH0
+#define MP0_PUB_SCRATCH0__DATA__SHIFT 0x0
+#define MP0_PUB_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP0_PUB_SCRATCH1
+#define MP0_PUB_SCRATCH1__DATA__SHIFT 0x0
+#define MP0_PUB_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP0_PUB_SCRATCH2
+#define MP0_PUB_SCRATCH2__DATA__SHIFT 0x0
+#define MP0_PUB_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP0_PUB_SCRATCH3
+#define MP0_PUB_SCRATCH3__DATA__SHIFT 0x0
+#define MP0_PUB_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP0_FW_INTF
+#define MP0_FW_INTF__SS_SECURE__SHIFT 0x13
+#define MP0_FW_INTF__SS_SECURE_MASK 0x00080000L
+//MP0_C2PMSG_0
+#define MP0_C2PMSG_0__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_1
+#define MP0_C2PMSG_1__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_2
+#define MP0_C2PMSG_2__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_3
+#define MP0_C2PMSG_3__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_4
+#define MP0_C2PMSG_4__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_5
+#define MP0_C2PMSG_5__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_6
+#define MP0_C2PMSG_6__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_7
+#define MP0_C2PMSG_7__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_8
+#define MP0_C2PMSG_8__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_9
+#define MP0_C2PMSG_9__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_10
+#define MP0_C2PMSG_10__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_11
+#define MP0_C2PMSG_11__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_12
+#define MP0_C2PMSG_12__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_13
+#define MP0_C2PMSG_13__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_14
+#define MP0_C2PMSG_14__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_15
+#define MP0_C2PMSG_15__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_16
+#define MP0_C2PMSG_16__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_17
+#define MP0_C2PMSG_17__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_18
+#define MP0_C2PMSG_18__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_19
+#define MP0_C2PMSG_19__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_20
+#define MP0_C2PMSG_20__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_21
+#define MP0_C2PMSG_21__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_22
+#define MP0_C2PMSG_22__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_23
+#define MP0_C2PMSG_23__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_24
+#define MP0_C2PMSG_24__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_25
+#define MP0_C2PMSG_25__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_26
+#define MP0_C2PMSG_26__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_27
+#define MP0_C2PMSG_27__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_28
+#define MP0_C2PMSG_28__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_29
+#define MP0_C2PMSG_29__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_30
+#define MP0_C2PMSG_30__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_31
+#define MP0_C2PMSG_31__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2CMSG_0
+#define MP0_P2CMSG_0__CONTENT__SHIFT 0x0
+#define MP0_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2CMSG_1
+#define MP0_P2CMSG_1__CONTENT__SHIFT 0x0
+#define MP0_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2CMSG_2
+#define MP0_P2CMSG_2__CONTENT__SHIFT 0x0
+#define MP0_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2CMSG_3
+#define MP0_P2CMSG_3__CONTENT__SHIFT 0x0
+#define MP0_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2CMSG_INTEN
+#define MP0_P2CMSG_INTEN__INTEN__SHIFT 0x0
+#define MP0_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
+//MP0_P2CMSG_INTSTS
+#define MP0_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
+#define MP0_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
+#define MP0_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
+#define MP0_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
+#define MP0_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
+#define MP0_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
+#define MP0_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
+#define MP0_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
+//MP0_C2PMSG_ATTR_0
+#define MP0_C2PMSG_ATTR_0__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_0__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_1
+#define MP0_C2PMSG_ATTR_1__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_1__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_2
+#define MP0_C2PMSG_ATTR_2__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_2__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_3
+#define MP0_C2PMSG_ATTR_3__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_3__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_4
+#define MP0_C2PMSG_ATTR_4__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_4__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_5
+#define MP0_C2PMSG_ATTR_5__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_5__MSG_ATTR_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_ATTR_6
+#define MP0_C2PMSG_ATTR_6__MSG_ATTR__SHIFT 0x0
+#define MP0_C2PMSG_ATTR_6__MSG_ATTR_MASK 0x0000FFFFL
+//MP0_P2CMSG_ATTR
+#define MP0_P2CMSG_ATTR__MSG_ATTR__SHIFT 0x0
+#define MP0_P2CMSG_ATTR__MSG_ATTR_MASK 0x000000FFL
+//MP0_P2SMSG_0
+#define MP0_P2SMSG_0__CONTENT__SHIFT 0x0
+#define MP0_P2SMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2SMSG_1
+#define MP0_P2SMSG_1__CONTENT__SHIFT 0x0
+#define MP0_P2SMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2SMSG_2
+#define MP0_P2SMSG_2__CONTENT__SHIFT 0x0
+#define MP0_P2SMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2SMSG_3
+#define MP0_P2SMSG_3__CONTENT__SHIFT 0x0
+#define MP0_P2SMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP0_P2SMSG_ATTR
+#define MP0_P2SMSG_ATTR__MSG_ATTR__SHIFT 0x0
+#define MP0_P2SMSG_ATTR__MSG_ATTR_MASK 0x000000FFL
+//MP0_S2PMSG_ATTR
+#define MP0_S2PMSG_ATTR__MSG_ATTR__SHIFT 0x0
+#define MP0_S2PMSG_ATTR__MSG_ATTR_MASK 0x00000003L
+//MP0_P2SMSG_INTSTS
+#define MP0_P2SMSG_INTSTS__INTSTS0__SHIFT 0x0
+#define MP0_P2SMSG_INTSTS__INTSTS1__SHIFT 0x1
+#define MP0_P2SMSG_INTSTS__INTSTS2__SHIFT 0x2
+#define MP0_P2SMSG_INTSTS__INTSTS3__SHIFT 0x3
+#define MP0_P2SMSG_INTSTS__INTSTS0_MASK 0x00000001L
+#define MP0_P2SMSG_INTSTS__INTSTS1_MASK 0x00000002L
+#define MP0_P2SMSG_INTSTS__INTSTS2_MASK 0x00000004L
+#define MP0_P2SMSG_INTSTS__INTSTS3_MASK 0x00000008L
+//MP0_S2PMSG_0
+#define MP0_S2PMSG_0__CONTENT__SHIFT 0x0
+#define MP0_S2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_32
+#define MP0_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_33
+#define MP0_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_34
+#define MP0_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_35
+#define MP0_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_36
+#define MP0_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_37
+#define MP0_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_38
+#define MP0_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_39
+#define MP0_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_40
+#define MP0_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_41
+#define MP0_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_42
+#define MP0_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_43
+#define MP0_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_44
+#define MP0_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_45
+#define MP0_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_46
+#define MP0_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_47
+#define MP0_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_48
+#define MP0_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_49
+#define MP0_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_50
+#define MP0_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_51
+#define MP0_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_52
+#define MP0_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_53
+#define MP0_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_54
+#define MP0_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_55
+#define MP0_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_56
+#define MP0_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_57
+#define MP0_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_58
+#define MP0_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_59
+#define MP0_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_60
+#define MP0_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_61
+#define MP0_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_62
+#define MP0_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_63
+#define MP0_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_64
+#define MP0_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_65
+#define MP0_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_66
+#define MP0_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_67
+#define MP0_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_68
+#define MP0_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_69
+#define MP0_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_70
+#define MP0_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_71
+#define MP0_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_72
+#define MP0_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_73
+#define MP0_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_74
+#define MP0_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_75
+#define MP0_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_76
+#define MP0_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_77
+#define MP0_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_78
+#define MP0_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_79
+#define MP0_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_80
+#define MP0_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_81
+#define MP0_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_82
+#define MP0_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_83
+#define MP0_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_84
+#define MP0_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_85
+#define MP0_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_86
+#define MP0_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_87
+#define MP0_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_88
+#define MP0_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_89
+#define MP0_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_90
+#define MP0_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_91
+#define MP0_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_92
+#define MP0_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_93
+#define MP0_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_94
+#define MP0_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_95
+#define MP0_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_96
+#define MP0_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_97
+#define MP0_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_98
+#define MP0_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_99
+#define MP0_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_100
+#define MP0_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_101
+#define MP0_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_102
+#define MP0_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_C2PMSG_103
+#define MP0_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_ACTIVE_FCN_ID
+#define MP0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MP0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MP0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MP0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MP0_IH_CREDIT
+#define MP0_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_IH_SW_INT
+#define MP0_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_IH_SW_INT_CTRL
+#define MP0_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+//CGTT_DRM_CLK_CTRL0
+#define CGTT_DRM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_DRM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_DRM_CLK_CTRL0__DIV_ID__SHIFT 0xc
+#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_0__SHIFT 0x15
+#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_REG__SHIFT 0x16
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_DRM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_DRM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_DRM_CLK_CTRL0__DIV_ID_MASK 0x00007000L
+#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_0_MASK 0x00200000L
+#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_REG_MASK 0x00400000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//DRM_LIGHT_SLEEP_CTRL
+#define DRM_LIGHT_SLEEP_CTRL__MEM_LIGHT_SLEEP_EN__SHIFT 0x0
+#define DRM_LIGHT_SLEEP_CTRL__MEM_LIGHT_SLEEP_EN_MASK 0x00000001L
+
+
+// addressBlock: mp_SmuMp1Pub_CruDec
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__RESET_MASK 0x00000001L
+//MP1_FIRMWARE_FLAGS
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+//MP1_PUB_SCRATCH0
+#define MP1_PUB_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_PUB_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_PUB_SCRATCH1
+#define MP1_PUB_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_PUB_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_PUB_SCRATCH2
+#define MP1_PUB_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_PUB_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_PUB_SCRATCH3
+#define MP1_PUB_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_PUB_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_0
+#define MP1_C2PMSG_0__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_1
+#define MP1_C2PMSG_1__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_2
+#define MP1_C2PMSG_2__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_3
+#define MP1_C2PMSG_3__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_4
+#define MP1_C2PMSG_4__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_5
+#define MP1_C2PMSG_5__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_6
+#define MP1_C2PMSG_6__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_7
+#define MP1_C2PMSG_7__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_8
+#define MP1_C2PMSG_8__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_9
+#define MP1_C2PMSG_9__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_10
+#define MP1_C2PMSG_10__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_11
+#define MP1_C2PMSG_11__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_12
+#define MP1_C2PMSG_12__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_13
+#define MP1_C2PMSG_13__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_14
+#define MP1_C2PMSG_14__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_15
+#define MP1_C2PMSG_15__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_16
+#define MP1_C2PMSG_16__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_17
+#define MP1_C2PMSG_17__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_18
+#define MP1_C2PMSG_18__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_19
+#define MP1_C2PMSG_19__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_20
+#define MP1_C2PMSG_20__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_21
+#define MP1_C2PMSG_21__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_22
+#define MP1_C2PMSG_22__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_23
+#define MP1_C2PMSG_23__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_24
+#define MP1_C2PMSG_24__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_25
+#define MP1_C2PMSG_25__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_26
+#define MP1_C2PMSG_26__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_27
+#define MP1_C2PMSG_27__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_28
+#define MP1_C2PMSG_28__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_29
+#define MP1_C2PMSG_29__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_30
+#define MP1_C2PMSG_30__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_31
+#define MP1_C2PMSG_31__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_0
+#define MP1_P2CMSG_0__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_1
+#define MP1_P2CMSG_1__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_2
+#define MP1_P2CMSG_2__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_3
+#define MP1_P2CMSG_3__CONTENT__SHIFT 0x0
+#define MP1_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2CMSG_INTEN
+#define MP1_P2CMSG_INTEN__INTEN__SHIFT 0x0
+#define MP1_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
+//MP1_P2CMSG_INTSTS
+#define MP1_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
+#define MP1_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
+#define MP1_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
+#define MP1_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
+#define MP1_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
+#define MP1_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
+#define MP1_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
+#define MP1_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
+//MP1_P2SMSG_0
+#define MP1_P2SMSG_0__CONTENT__SHIFT 0x0
+#define MP1_P2SMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2SMSG_1
+#define MP1_P2SMSG_1__CONTENT__SHIFT 0x0
+#define MP1_P2SMSG_1__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2SMSG_2
+#define MP1_P2SMSG_2__CONTENT__SHIFT 0x0
+#define MP1_P2SMSG_2__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2SMSG_3
+#define MP1_P2SMSG_3__CONTENT__SHIFT 0x0
+#define MP1_P2SMSG_3__CONTENT_MASK 0xFFFFFFFFL
+//MP1_P2SMSG_INTSTS
+#define MP1_P2SMSG_INTSTS__INTSTS0__SHIFT 0x0
+#define MP1_P2SMSG_INTSTS__INTSTS1__SHIFT 0x1
+#define MP1_P2SMSG_INTSTS__INTSTS2__SHIFT 0x2
+#define MP1_P2SMSG_INTSTS__INTSTS3__SHIFT 0x3
+#define MP1_P2SMSG_INTSTS__INTSTS0_MASK 0x00000001L
+#define MP1_P2SMSG_INTSTS__INTSTS1_MASK 0x00000002L
+#define MP1_P2SMSG_INTSTS__INTSTS2_MASK 0x00000004L
+#define MP1_P2SMSG_INTSTS__INTSTS3_MASK 0x00000008L
+//MP1_S2PMSG_0
+#define MP1_S2PMSG_0__CONTENT__SHIFT 0x0
+#define MP1_S2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
+//MP1_ACP2MP_RESP
+#define MP1_ACP2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_ACP2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_DC2MP_RESP
+#define MP1_DC2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_DC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_UVD2MP_RESP
+#define MP1_UVD2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_UVD2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_VCE2MP_RESP
+#define MP1_VCE2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_VCE2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_RLC2MP_RESP
+#define MP1_RLC2MP_RESP__CONTENT__SHIFT 0x0
+#define MP1_RLC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_32
+#define MP1_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_33
+#define MP1_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_34
+#define MP1_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_35
+#define MP1_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_36
+#define MP1_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_37
+#define MP1_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_38
+#define MP1_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_39
+#define MP1_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_40
+#define MP1_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_41
+#define MP1_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_42
+#define MP1_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_43
+#define MP1_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_44
+#define MP1_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_45
+#define MP1_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_46
+#define MP1_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_47
+#define MP1_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_48
+#define MP1_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_49
+#define MP1_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_50
+#define MP1_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_51
+#define MP1_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_52
+#define MP1_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_53
+#define MP1_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_54
+#define MP1_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_55
+#define MP1_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_56
+#define MP1_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_57
+#define MP1_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_58
+#define MP1_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_59
+#define MP1_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_60
+#define MP1_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_61
+#define MP1_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_62
+#define MP1_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_63
+#define MP1_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_64
+#define MP1_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_65
+#define MP1_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_66
+#define MP1_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_67
+#define MP1_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_68
+#define MP1_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_69
+#define MP1_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_70
+#define MP1_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_71
+#define MP1_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_72
+#define MP1_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_73
+#define MP1_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_74
+#define MP1_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_75
+#define MP1_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_76
+#define MP1_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_77
+#define MP1_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_78
+#define MP1_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_79
+#define MP1_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_80
+#define MP1_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_81
+#define MP1_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_82
+#define MP1_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_83
+#define MP1_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_84
+#define MP1_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_85
+#define MP1_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_86
+#define MP1_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_87
+#define MP1_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_88
+#define MP1_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_89
+#define MP1_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_90
+#define MP1_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_91
+#define MP1_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_92
+#define MP1_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_93
+#define MP1_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_94
+#define MP1_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_95
+#define MP1_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_96
+#define MP1_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_97
+#define MP1_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_98
+#define MP1_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_99
+#define MP1_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_100
+#define MP1_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_101
+#define MP1_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_102
+#define MP1_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_C2PMSG_103
+#define MP1_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_ACTIVE_FCN_ID
+#define MP1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define MP1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define MP1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define MP1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//MP1_IH_CREDIT
+#define MP1_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_IH_SW_INT
+#define MP1_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_IH_SW_INT_CTRL
+#define MP1_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_FPS_CNT
+#define MP1_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_PUB_CTRL
+#define MP1_PUB_CTRL__RESET__SHIFT 0x0
+#define MP1_PUB_CTRL__RESET_MASK 0x00000001L
+//MP1_EXT_SCRATCH0
+#define MP1_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH1
+#define MP1_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH2
+#define MP1_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH3
+#define MP1_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH4
+#define MP1_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH5
+#define MP1_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH6
+#define MP1_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_EXT_SCRATCH7
+#define MP1_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
index 68d0ffa..68d0ffa 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_sh_mask.h
index c7518b8..c7518b8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_default.h
index 8058796..8058796 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h
index 13d4de6..13d4de6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h
index a02b679..a02b679 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_default.h
index f5fc31f..f5fc31f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
index 4354622..4354622 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h
index 8860247..8860247 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/NBIO/nbio_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_offset.h
new file mode 100644
index 0000000..54503d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_offset.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _osssys_4_0_1_OFFSET_HEADER
+#define _osssys_4_0_1_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define mmIH_VMID_0_LUT 0x0000
+#define mmIH_VMID_0_LUT_BASE_IDX 0
+#define mmIH_VMID_1_LUT 0x0001
+#define mmIH_VMID_1_LUT_BASE_IDX 0
+#define mmIH_VMID_2_LUT 0x0002
+#define mmIH_VMID_2_LUT_BASE_IDX 0
+#define mmIH_VMID_3_LUT 0x0003
+#define mmIH_VMID_3_LUT_BASE_IDX 0
+#define mmIH_VMID_4_LUT 0x0004
+#define mmIH_VMID_4_LUT_BASE_IDX 0
+#define mmIH_VMID_5_LUT 0x0005
+#define mmIH_VMID_5_LUT_BASE_IDX 0
+#define mmIH_VMID_6_LUT 0x0006
+#define mmIH_VMID_6_LUT_BASE_IDX 0
+#define mmIH_VMID_7_LUT 0x0007
+#define mmIH_VMID_7_LUT_BASE_IDX 0
+#define mmIH_VMID_8_LUT 0x0008
+#define mmIH_VMID_8_LUT_BASE_IDX 0
+#define mmIH_VMID_9_LUT 0x0009
+#define mmIH_VMID_9_LUT_BASE_IDX 0
+#define mmIH_VMID_10_LUT 0x000a
+#define mmIH_VMID_10_LUT_BASE_IDX 0
+#define mmIH_VMID_11_LUT 0x000b
+#define mmIH_VMID_11_LUT_BASE_IDX 0
+#define mmIH_VMID_12_LUT 0x000c
+#define mmIH_VMID_12_LUT_BASE_IDX 0
+#define mmIH_VMID_13_LUT 0x000d
+#define mmIH_VMID_13_LUT_BASE_IDX 0
+#define mmIH_VMID_14_LUT 0x000e
+#define mmIH_VMID_14_LUT_BASE_IDX 0
+#define mmIH_VMID_15_LUT 0x000f
+#define mmIH_VMID_15_LUT_BASE_IDX 0
+#define mmIH_VMID_0_LUT_MM 0x0010
+#define mmIH_VMID_0_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_1_LUT_MM 0x0011
+#define mmIH_VMID_1_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_2_LUT_MM 0x0012
+#define mmIH_VMID_2_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_3_LUT_MM 0x0013
+#define mmIH_VMID_3_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_4_LUT_MM 0x0014
+#define mmIH_VMID_4_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_5_LUT_MM 0x0015
+#define mmIH_VMID_5_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_6_LUT_MM 0x0016
+#define mmIH_VMID_6_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_7_LUT_MM 0x0017
+#define mmIH_VMID_7_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_8_LUT_MM 0x0018
+#define mmIH_VMID_8_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_9_LUT_MM 0x0019
+#define mmIH_VMID_9_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_10_LUT_MM 0x001a
+#define mmIH_VMID_10_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_11_LUT_MM 0x001b
+#define mmIH_VMID_11_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_12_LUT_MM 0x001c
+#define mmIH_VMID_12_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_13_LUT_MM 0x001d
+#define mmIH_VMID_13_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_14_LUT_MM 0x001e
+#define mmIH_VMID_14_LUT_MM_BASE_IDX 0
+#define mmIH_VMID_15_LUT_MM 0x001f
+#define mmIH_VMID_15_LUT_MM_BASE_IDX 0
+#define mmIH_COOKIE_0 0x0020
+#define mmIH_COOKIE_0_BASE_IDX 0
+#define mmIH_COOKIE_1 0x0021
+#define mmIH_COOKIE_1_BASE_IDX 0
+#define mmIH_COOKIE_2 0x0022
+#define mmIH_COOKIE_2_BASE_IDX 0
+#define mmIH_COOKIE_3 0x0023
+#define mmIH_COOKIE_3_BASE_IDX 0
+#define mmIH_COOKIE_4 0x0024
+#define mmIH_COOKIE_4_BASE_IDX 0
+#define mmIH_COOKIE_5 0x0025
+#define mmIH_COOKIE_5_BASE_IDX 0
+#define mmIH_COOKIE_6 0x0026
+#define mmIH_COOKIE_6_BASE_IDX 0
+#define mmIH_COOKIE_7 0x0027
+#define mmIH_COOKIE_7_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART0 0x003f
+#define mmIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_0 0x0040
+#define mmSEM_REQ_INPUT_0_BASE_IDX 0
+#define mmSEM_REQ_INPUT_1 0x0041
+#define mmSEM_REQ_INPUT_1_BASE_IDX 0
+#define mmSEM_REQ_INPUT_2 0x0042
+#define mmSEM_REQ_INPUT_2_BASE_IDX 0
+#define mmSEM_REQ_INPUT_3 0x0043
+#define mmSEM_REQ_INPUT_3_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART0 0x007f
+#define mmSEM_REGISTER_LAST_PART0_BASE_IDX 0
+#define mmIH_RB_CNTL 0x0080
+#define mmIH_RB_CNTL_BASE_IDX 0
+#define mmIH_RB_BASE 0x0081
+#define mmIH_RB_BASE_BASE_IDX 0
+#define mmIH_RB_BASE_HI 0x0082
+#define mmIH_RB_BASE_HI_BASE_IDX 0
+#define mmIH_RB_RPTR 0x0083
+#define mmIH_RB_RPTR_BASE_IDX 0
+#define mmIH_RB_WPTR 0x0084
+#define mmIH_RB_WPTR_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_HI 0x0085
+#define mmIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define mmIH_RB_WPTR_ADDR_LO 0x0086
+#define mmIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR 0x0087
+#define mmIH_DOORBELL_RPTR_BASE_IDX 0
+#define mmIH_RB_CNTL_RING1 0x0088
+#define mmIH_RB_CNTL_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_RING1 0x0089
+#define mmIH_RB_BASE_RING1_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING1 0x008a
+#define mmIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define mmIH_RB_RPTR_RING1 0x008b
+#define mmIH_RB_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_WPTR_RING1 0x008c
+#define mmIH_RB_WPTR_RING1_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING1 0x008f
+#define mmIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define mmIH_RB_CNTL_RING2 0x0090
+#define mmIH_RB_CNTL_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_RING2 0x0091
+#define mmIH_RB_BASE_RING2_BASE_IDX 0
+#define mmIH_RB_BASE_HI_RING2 0x0092
+#define mmIH_RB_BASE_HI_RING2_BASE_IDX 0
+#define mmIH_RB_RPTR_RING2 0x0093
+#define mmIH_RB_RPTR_RING2_BASE_IDX 0
+#define mmIH_RB_WPTR_RING2 0x0094
+#define mmIH_RB_WPTR_RING2_BASE_IDX 0
+#define mmIH_DOORBELL_RPTR_RING2 0x0097
+#define mmIH_DOORBELL_RPTR_RING2_BASE_IDX 0
+#define mmIH_VERSION 0x0098
+#define mmIH_VERSION_BASE_IDX 0
+#define mmIH_CNTL 0x00c0
+#define mmIH_CNTL_BASE_IDX 0
+#define mmIH_CNTL2 0x00c1
+#define mmIH_CNTL2_BASE_IDX 0
+#define mmIH_STATUS 0x00c2
+#define mmIH_STATUS_BASE_IDX 0
+#define mmIH_PERFMON_CNTL 0x00c3
+#define mmIH_PERFMON_CNTL_BASE_IDX 0
+#define mmIH_PERFCOUNTER0_RESULT 0x00c4
+#define mmIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmIH_PERFCOUNTER1_RESULT 0x00c5
+#define mmIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define mmIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define mmIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define mmIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define mmIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define mmIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define mmIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define mmIH_DSM_MATCH_FCN_ID 0x00cc
+#define mmIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define mmIH_LIMIT_INT_RATE_CNTL 0x00cd
+#define mmIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define mmIH_VF_RB_STATUS 0x00ce
+#define mmIH_VF_RB_STATUS_BASE_IDX 0
+#define mmIH_VF_RB_STATUS2 0x00cf
+#define mmIH_VF_RB_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS 0x00d0
+#define mmIH_VF_RB1_STATUS_BASE_IDX 0
+#define mmIH_VF_RB1_STATUS2 0x00d1
+#define mmIH_VF_RB1_STATUS2_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS 0x00d2
+#define mmIH_VF_RB2_STATUS_BASE_IDX 0
+#define mmIH_VF_RB2_STATUS2 0x00d3
+#define mmIH_VF_RB2_STATUS2_BASE_IDX 0
+#define mmIH_INT_FLOOD_CNTL 0x00d5
+#define mmIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define mmIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define mmIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define mmIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_RB2_INT_FLOOD_STATUS 0x00d8
+#define mmIH_RB2_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_INT_FLOOD_STATUS 0x00d9
+#define mmIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define mmIH_STORM_CLIENT_LIST_CNTL 0x00da
+#define mmIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define mmIH_CLK_CTRL 0x00db
+#define mmIH_CLK_CTRL_BASE_IDX 0
+#define mmIH_INT_FLAGS 0x00dc
+#define mmIH_INT_FLAGS_BASE_IDX 0
+#define mmIH_LAST_INT_INFO0 0x00dd
+#define mmIH_LAST_INT_INFO0_BASE_IDX 0
+#define mmIH_LAST_INT_INFO1 0x00de
+#define mmIH_LAST_INT_INFO1_BASE_IDX 0
+#define mmIH_LAST_INT_INFO2 0x00df
+#define mmIH_LAST_INT_INFO2_BASE_IDX 0
+#define mmIH_SCRATCH 0x00e0
+#define mmIH_SCRATCH_BASE_IDX 0
+#define mmIH_CLIENT_CREDIT_ERROR 0x00e1
+#define mmIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define mmIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define mmIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_COOKIE_REC_VIOLATION_LOG 0x00e3
+#define mmIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define mmIH_CREDIT_STATUS 0x00e4
+#define mmIH_CREDIT_STATUS_BASE_IDX 0
+#define mmIH_MMHUB_ERROR 0x00e5
+#define mmIH_MMHUB_ERROR_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART2 0x00ff
+#define mmIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmSEM_CLK_CTRL 0x0100
+#define mmSEM_CLK_CTRL_BASE_IDX 0
+#define mmSEM_UTC_CREDIT 0x0101
+#define mmSEM_UTC_CREDIT_BASE_IDX 0
+#define mmSEM_UTC_CONFIG 0x0102
+#define mmSEM_UTC_CONFIG_BASE_IDX 0
+#define mmSEM_UTCL2_TRAN_EN_LUT 0x0103
+#define mmSEM_UTCL2_TRAN_EN_LUT_BASE_IDX 0
+#define mmSEM_MCIF_CONFIG 0x0104
+#define mmSEM_MCIF_CONFIG_BASE_IDX 0
+#define mmSEM_PERFMON_CNTL 0x0105
+#define mmSEM_PERFMON_CNTL_BASE_IDX 0
+#define mmSEM_PERFCOUNTER0_RESULT 0x0106
+#define mmSEM_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSEM_PERFCOUNTER1_RESULT 0x0107
+#define mmSEM_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSEM_STATUS 0x0108
+#define mmSEM_STATUS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG 0x0109
+#define mmSEM_MAILBOX_CLIENTCONFIG_BASE_IDX 0
+#define mmSEM_MAILBOX 0x010a
+#define mmSEM_MAILBOX_BASE_IDX 0
+#define mmSEM_MAILBOX_CONTROL 0x010b
+#define mmSEM_MAILBOX_CONTROL_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS 0x010c
+#define mmSEM_CHICKEN_BITS_BASE_IDX 0
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA 0x010d
+#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_BASE_IDX 0
+#define mmSEM_GPU_IOV_VIOLATION_LOG 0x010e
+#define mmSEM_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSEM_OUTSTANDING_THRESHOLD 0x010f
+#define mmSEM_OUTSTANDING_THRESHOLD_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART2 0x017f
+#define mmSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define mmIH_ACTIVE_FCN_ID 0x0180
+#define mmIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmIH_VIRT_RESET_REQ 0x0181
+#define mmIH_VIRT_RESET_REQ_BASE_IDX 0
+#define mmIH_CLIENT_CFG 0x0184
+#define mmIH_CLIENT_CFG_BASE_IDX 0
+#define mmIH_CLIENT_CFG_INDEX 0x0188
+#define mmIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define mmIH_CLIENT_CFG_DATA 0x0189
+#define mmIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define mmIH_CID_REMAP_INDEX 0x018a
+#define mmIH_CID_REMAP_INDEX_BASE_IDX 0
+#define mmIH_CID_REMAP_DATA 0x018b
+#define mmIH_CID_REMAP_DATA_BASE_IDX 0
+#define mmIH_CHICKEN 0x018c
+#define mmIH_CHICKEN_BASE_IDX 0
+#define mmIH_MMHUB_CNTL 0x018d
+#define mmIH_MMHUB_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_CNTL 0x018e
+#define mmIH_INT_DROP_CNTL_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE0 0x018f
+#define mmIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_VALUE1 0x0190
+#define mmIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK0 0x0191
+#define mmIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define mmIH_INT_DROP_MATCH_MASK1 0x0192
+#define mmIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define mmIH_REGISTER_LAST_PART1 0x019f
+#define mmIH_REGISTER_LAST_PART1_BASE_IDX 0
+#define mmSEM_ACTIVE_FCN_ID 0x01a0
+#define mmSEM_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSEM_VIRT_RESET_REQ 0x01a1
+#define mmSEM_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSEM_RESP_SDMA0 0x01a4
+#define mmSEM_RESP_SDMA0_BASE_IDX 0
+#define mmSEM_RESP_SDMA1 0x01a5
+#define mmSEM_RESP_SDMA1_BASE_IDX 0
+#define mmSEM_RESP_UVD 0x01a6
+#define mmSEM_RESP_UVD_BASE_IDX 0
+#define mmSEM_RESP_VCE_0 0x01a7
+#define mmSEM_RESP_VCE_0_BASE_IDX 0
+#define mmSEM_RESP_ACP 0x01a8
+#define mmSEM_RESP_ACP_BASE_IDX 0
+#define mmSEM_RESP_ISP 0x01a9
+#define mmSEM_RESP_ISP_BASE_IDX 0
+#define mmSEM_RESP_VCE_1 0x01aa
+#define mmSEM_RESP_VCE_1_BASE_IDX 0
+#define mmSEM_RESP_VP8 0x01ab
+#define mmSEM_RESP_VP8_BASE_IDX 0
+#define mmSEM_RESP_GC 0x01ac
+#define mmSEM_RESP_GC_BASE_IDX 0
+#define mmSEM_CID_REMAP_INDEX 0x01b0
+#define mmSEM_CID_REMAP_INDEX_BASE_IDX 0
+#define mmSEM_CID_REMAP_DATA 0x01b1
+#define mmSEM_CID_REMAP_DATA_BASE_IDX 0
+#define mmSEM_ATOMIC_OP_LUT 0x01b2
+#define mmSEM_ATOMIC_OP_LUT_BASE_IDX 0
+#define mmSEM_EDC_CONFIG 0x01b3
+#define mmSEM_EDC_CONFIG_BASE_IDX 0
+#define mmSEM_CHICKEN_BITS2 0x01b4
+#define mmSEM_CHICKEN_BITS2_BASE_IDX 0
+#define mmSEM_MMHUB_CNTL 0x01b5
+#define mmSEM_MMHUB_CNTL_BASE_IDX 0
+#define mmSEM_REGISTER_LAST_PART1 0x01bf
+#define mmSEM_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_sh_mask.h
new file mode 100644
index 0000000..19c4a40
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_sh_mask.h
@@ -0,0 +1,1249 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _osssys_4_0_1_SH_MASK_HEADER
+#define _osssys_4_0_1_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_0
+#define SEM_REQ_INPUT_0__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_0__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_1
+#define SEM_REQ_INPUT_1__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_1__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_2
+#define SEM_REQ_INPUT_2__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_2__DATA_MASK 0xFFFFFFFFL
+//SEM_REQ_INPUT_3
+#define SEM_REQ_INPUT_3__DATA__SHIFT 0x0
+#define SEM_REQ_INPUT_3__DATA_MASK 0xFFFFFFFFL
+//SEM_REGISTER_LAST_PART0
+#define SEM_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING2
+#define IH_RB_CNTL_RING2__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING2__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE__SHIFT 0x7
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING2__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING2__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING2__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING2__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING2__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING2__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING2__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING2__RB_GPU_TS_ENABLE_MASK 0x00000080L
+#define IH_RB_CNTL_RING2__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING2__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING2__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING2__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING2__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING2__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING2__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING2__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING2__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING2__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_BASE_RING2
+#define IH_RB_BASE_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING2__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING2
+#define IH_RB_BASE_HI_RING2__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING2__ADDR_MASK 0x000000FFL
+//IH_RB_RPTR_RING2
+#define IH_RB_RPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING2__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING2
+#define IH_RB_WPTR_RING2__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING2__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING2__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING2__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING2__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING2__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_DOORBELL_RPTR_RING2
+#define IH_DOORBELL_RPTR_RING2__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING2__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING2__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING2__ENABLE_MASK 0x10000000L
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__RB2_FULL__SHIFT 0xf
+#define IH_STATUS__RB2_FULL_DRAIN__SHIFT 0x10
+#define IH_STATUS__RB2_OVERFLOW__SHIFT 0x11
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__RB2_FULL_MASK 0x00008000L
+#define IH_STATUS__RB2_FULL_DRAIN_MASK 0x00010000L
+#define IH_STATUS__RB2_OVERFLOW_MASK 0x00020000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x000007FCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x07FC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x1
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000001L
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000001EL
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF__SHIFT 0x10
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+#define IH_VF_RB_STATUS2__BIF_INTERRUPT_LINE_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB1_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_VF_RB2_STATUS
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF__SHIFT 0x10
+#define IH_VF_RB2_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+#define IH_VF_RB2_STATUS__RB_OVERFLOW_VF_MASK 0xFFFF0000L
+//IH_VF_RB2_STATUS2
+#define IH_VF_RB2_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB2_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB2_INT_FLOOD_STATUS
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB2_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1c
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x10000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x14
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00100000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define IH_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x00FF0000L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_CLK_CTRL
+#define SEM_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SEM_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SEM_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SEM_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SEM_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SEM_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SEM_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SEM_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SEM_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SEM_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SEM_UTC_CREDIT
+#define SEM_UTC_CREDIT__UTCL2_CREDIT__SHIFT 0x0
+#define SEM_UTC_CREDIT__WATERMARK__SHIFT 0x8
+#define SEM_UTC_CREDIT__UTCL2_CREDIT_MASK 0x0000001FL
+#define SEM_UTC_CREDIT__WATERMARK_MASK 0x00000F00L
+//SEM_UTC_CONFIG
+#define SEM_UTC_CONFIG__USE_MTYPE__SHIFT 0x0
+#define SEM_UTC_CONFIG__FORCE_SNOOP__SHIFT 0x3
+#define SEM_UTC_CONFIG__FORCE_GCC__SHIFT 0x4
+#define SEM_UTC_CONFIG__USE_PT_SNOOP__SHIFT 0x5
+#define SEM_UTC_CONFIG__USE_MTYPE_MASK 0x00000007L
+#define SEM_UTC_CONFIG__FORCE_SNOOP_MASK 0x00000008L
+#define SEM_UTC_CONFIG__FORCE_GCC_MASK 0x00000010L
+#define SEM_UTC_CONFIG__USE_PT_SNOOP_MASK 0x00000020L
+//SEM_UTCL2_TRAN_EN_LUT
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN__SHIFT 0x0
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN__SHIFT 0x1
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN__SHIFT 0x2
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN__SHIFT 0x3
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN__SHIFT 0x4
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN__SHIFT 0x5
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN__SHIFT 0x6
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN__SHIFT 0x7
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED__SHIFT 0x8
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN__SHIFT 0x1f
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA0_UTCL2_EN_MASK 0x00000001L
+#define SEM_UTCL2_TRAN_EN_LUT__SDMA1_UTCL2_EN_MASK 0x00000002L
+#define SEM_UTCL2_TRAN_EN_LUT__UVD_UTCL2_EN_MASK 0x00000004L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE0_UTCL2_EN_MASK 0x00000008L
+#define SEM_UTCL2_TRAN_EN_LUT__ACP_UTCL2_EN_MASK 0x00000010L
+#define SEM_UTCL2_TRAN_EN_LUT__ISP_UTCL2_EN_MASK 0x00000020L
+#define SEM_UTCL2_TRAN_EN_LUT__VCE1_UTCL2_EN_MASK 0x00000040L
+#define SEM_UTCL2_TRAN_EN_LUT__VP8_UTCL2_EN_MASK 0x00000080L
+#define SEM_UTCL2_TRAN_EN_LUT__RESERVED_MASK 0x7FFFFF00L
+#define SEM_UTCL2_TRAN_EN_LUT__CP_UTCL2_EN_MASK 0x80000000L
+//SEM_MCIF_CONFIG
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP__SHIFT 0x0
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT__SHIFT 0x2
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT__SHIFT 0x8
+#define SEM_MCIF_CONFIG__MC_REQ_SWAP_MASK 0x00000003L
+#define SEM_MCIF_CONFIG__MC_WRREQ_CREDIT_MASK 0x000000FCL
+#define SEM_MCIF_CONFIG__MC_RDREQ_CREDIT_MASK 0x00003F00L
+//SEM_PERFMON_CNTL
+#define SEM_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SEM_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SEM_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SEM_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SEM_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SEM_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SEM_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SEM_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SEM_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SEM_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SEM_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SEM_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SEM_PERFCOUNTER0_RESULT
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_PERFCOUNTER1_RESULT
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SEM_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SEM_STATUS
+#define SEM_STATUS__SEM_IDLE__SHIFT 0x0
+#define SEM_STATUS__SEM_INTERNAL_IDLE__SHIFT 0x1
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL__SHIFT 0x2
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL__SHIFT 0x3
+#define SEM_STATUS__WRITE1_FIFO_FULL__SHIFT 0x4
+#define SEM_STATUS__CHECK0_FIFO_FULL__SHIFT 0x5
+#define SEM_STATUS__MC_RDREQ_PENDING__SHIFT 0x6
+#define SEM_STATUS__MC_WRREQ_PENDING__SHIFT 0x7
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING__SHIFT 0x8
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING__SHIFT 0x9
+#define SEM_STATUS__UVD_MAILBOX_PENDING__SHIFT 0xa
+#define SEM_STATUS__VCE_MAILBOX_PENDING__SHIFT 0xb
+#define SEM_STATUS__CPG1_MAILBOX_PENDING__SHIFT 0xc
+#define SEM_STATUS__CPG2_MAILBOX_PENDING__SHIFT 0xd
+#define SEM_STATUS__VCE1_MAILBOX_PENDING__SHIFT 0xe
+#define SEM_STATUS__ATC_REQ_PENDING__SHIFT 0xf
+#define SEM_STATUS__OUTSTANDING_CLEAN__SHIFT 0x10
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH__SHIFT 0x11
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH__SHIFT 0x12
+#define SEM_STATUS__INVREQ_CNT_IDLE__SHIFT 0x13
+#define SEM_STATUS__ENTRYLIST_IDLE__SHIFT 0x14
+#define SEM_STATUS__MIF_IDLE__SHIFT 0x15
+#define SEM_STATUS__REGISTER_IDLE__SHIFT 0x16
+#define SEM_STATUS__ATCL2_INVREQ_IDLE__SHIFT 0x17
+#define SEM_STATUS__SWITCH_READY__SHIFT 0x1f
+#define SEM_STATUS__SEM_IDLE_MASK 0x00000001L
+#define SEM_STATUS__SEM_INTERNAL_IDLE_MASK 0x00000002L
+#define SEM_STATUS__MC_RDREQ_FIFO_FULL_MASK 0x00000004L
+#define SEM_STATUS__MC_WRREQ_FIFO_FULL_MASK 0x00000008L
+#define SEM_STATUS__WRITE1_FIFO_FULL_MASK 0x00000010L
+#define SEM_STATUS__CHECK0_FIFO_FULL_MASK 0x00000020L
+#define SEM_STATUS__MC_RDREQ_PENDING_MASK 0x00000040L
+#define SEM_STATUS__MC_WRREQ_PENDING_MASK 0x00000080L
+#define SEM_STATUS__SDMA0_MAILBOX_PENDING_MASK 0x00000100L
+#define SEM_STATUS__SDMA1_MAILBOX_PENDING_MASK 0x00000200L
+#define SEM_STATUS__UVD_MAILBOX_PENDING_MASK 0x00000400L
+#define SEM_STATUS__VCE_MAILBOX_PENDING_MASK 0x00000800L
+#define SEM_STATUS__CPG1_MAILBOX_PENDING_MASK 0x00001000L
+#define SEM_STATUS__CPG2_MAILBOX_PENDING_MASK 0x00002000L
+#define SEM_STATUS__VCE1_MAILBOX_PENDING_MASK 0x00004000L
+#define SEM_STATUS__ATC_REQ_PENDING_MASK 0x00008000L
+#define SEM_STATUS__OUTSTANDING_CLEAN_MASK 0x00010000L
+#define SEM_STATUS__INVREQ_FLUSH_VF_MISMATCH_MASK 0x00020000L
+#define SEM_STATUS__INVREQ_NONFLUSH_VF_MISMATCH_MASK 0x00040000L
+#define SEM_STATUS__INVREQ_CNT_IDLE_MASK 0x00080000L
+#define SEM_STATUS__ENTRYLIST_IDLE_MASK 0x00100000L
+#define SEM_STATUS__MIF_IDLE_MASK 0x00200000L
+#define SEM_STATUS__REGISTER_IDLE_MASK 0x00400000L
+#define SEM_STATUS__ATCL2_INVREQ_IDLE_MASK 0x00800000L
+#define SEM_STATUS__SWITCH_READY_MASK 0x80000000L
+//SEM_MAILBOX_CLIENTCONFIG
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1__SHIFT 0x3
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2__SHIFT 0x6
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3__SHIFT 0x9
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0__SHIFT 0xc
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0__SHIFT 0xf
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0__SHIFT 0x12
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0__SHIFT 0x15
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT2_MASK 0x000001C0L
+#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT3_MASK 0x00000E00L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA_CLIENT0_MASK 0x00007000L
+#define SEM_MAILBOX_CLIENTCONFIG__UVD_CLIENT0_MASK 0x00038000L
+#define SEM_MAILBOX_CLIENTCONFIG__SDMA1_CLIENT0_MASK 0x001C0000L
+#define SEM_MAILBOX_CLIENTCONFIG__VCE_CLIENT0_MASK 0x00E00000L
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CONTROL
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE__SHIFT 0x0
+#define SEM_MAILBOX_CONTROL__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CONTROL__HOSTPORT_ENABLE_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CONTROL__RESERVED_MASK 0xFFFF0000L
+//SEM_CHICKEN_BITS
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN__SHIFT 0x0
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN__SHIFT 0x1
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN__SHIFT 0x2
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR__SHIFT 0x3
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN__SHIFT 0x6
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN__SHIFT 0x7
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX__SHIFT 0x8
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX__SHIFT 0xa
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID__SHIFT 0xc
+#define SEM_CHICKEN_BITS__ATOMIC_EN__SHIFT 0xe
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK__SHIFT 0xf
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX__SHIFT 0x10
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN__SHIFT 0x12
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK__SHIFT 0x13
+#define SEM_CHICKEN_BITS__VMID_PIPELINE_EN_MASK 0x00000001L
+#define SEM_CHICKEN_BITS__ENTRY_PIPELINE_EN_MASK 0x00000002L
+#define SEM_CHICKEN_BITS__CHECK_COUNTER_EN_MASK 0x00000004L
+#define SEM_CHICKEN_BITS__ECC_BEHAVIOR_MASK 0x00000018L
+#define SEM_CHICKEN_BITS__PHY_TRAN_EN_MASK 0x00000040L
+#define SEM_CHICKEN_BITS__ADDR_CMP_UNTRAN_EN_MASK 0x00000080L
+#define SEM_CHICKEN_BITS__IDLE_COUNTER_INDEX_MASK 0x00000300L
+#define SEM_CHICKEN_BITS__OUTSTANDING_CLEAN_COUNTER_INDEX_MASK 0x00000C00L
+#define SEM_CHICKEN_BITS__ATCL2_BUS_ID_MASK 0x00003000L
+#define SEM_CHICKEN_BITS__ATOMIC_EN_MASK 0x00004000L
+#define SEM_CHICKEN_BITS__EXTERNAL_ATOMIC_CHECK_MASK 0x00008000L
+#define SEM_CHICKEN_BITS__CLEAR_MAILBOX_MASK 0x00030000L
+#define SEM_CHICKEN_BITS__INVACK_AFTER_OUTSTANDING_CLEAN_MASK 0x00040000L
+#define SEM_CHICKEN_BITS__UTC_TAG_CONFLICT_CHECK_MASK 0x00080000L
+//SEM_MAILBOX_CLIENTCONFIG_EXTRA
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0__SHIFT 0x0
+#define SEM_MAILBOX_CLIENTCONFIG_EXTRA__VCE1_CLIENT0_MASK 0x0000000FL
+//SEM_GPU_IOV_VIOLATION_LOG
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define SEM_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x14
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SEM_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SEM_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SEM_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SEM_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SEM_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x00F00000L
+#define SEM_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SEM_OUTSTANDING_THRESHOLD
+#define SEM_OUTSTANDING_THRESHOLD__VALUE__SHIFT 0x0
+#define SEM_OUTSTANDING_THRESHOLD__VALUE_MASK 0x000000FFL
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000001FL
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__RING_ID__SHIFT 0x14
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__CREDIT_RETURN_ADDR_MASK 0x0001FFFFL
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__RING_ID_MASK 0x00300000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000700L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x00007000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x000F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+//SEM_ACTIVE_FCN_ID
+#define SEM_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SEM_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SEM_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SEM_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SEM_VIRT_RESET_REQ
+#define SEM_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SEM_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SEM_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SEM_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SEM_RESP_SDMA0
+#define SEM_RESP_SDMA0__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_SDMA1
+#define SEM_RESP_SDMA1__ADDR__SHIFT 0x2
+#define SEM_RESP_SDMA1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_UVD
+#define SEM_RESP_UVD__ADDR__SHIFT 0x2
+#define SEM_RESP_UVD__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_0
+#define SEM_RESP_VCE_0__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_0__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ACP
+#define SEM_RESP_ACP__ADDR__SHIFT 0x2
+#define SEM_RESP_ACP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_ISP
+#define SEM_RESP_ISP__ADDR__SHIFT 0x2
+#define SEM_RESP_ISP__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VCE_1
+#define SEM_RESP_VCE_1__ADDR__SHIFT 0x2
+#define SEM_RESP_VCE_1__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_VP8
+#define SEM_RESP_VP8__ADDR__SHIFT 0x2
+#define SEM_RESP_VP8__ADDR_MASK 0x000FFFFCL
+//SEM_RESP_GC
+#define SEM_RESP_GC__ADDR__SHIFT 0x2
+#define SEM_RESP_GC__ADDR_MASK 0x000FFFFCL
+//SEM_CID_REMAP_INDEX
+#define SEM_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define SEM_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//SEM_CID_REMAP_DATA
+#define SEM_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define SEM_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x10
+#define SEM_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define SEM_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0000FF00L
+#define SEM_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0x00FF0000L
+//SEM_ATOMIC_OP_LUT
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL__SHIFT 0x0
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1__SHIFT 0x7
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL__SHIFT 0xe
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0__SHIFT 0x15
+#define SEM_ATOMIC_OP_LUT__SIGNAL_NORMAL_MASK 0x0000007FL
+#define SEM_ATOMIC_OP_LUT__SIGNAL_WRITE1_MASK 0x00003F80L
+#define SEM_ATOMIC_OP_LUT__WAIT_NORMAL_MASK 0x001FC000L
+#define SEM_ATOMIC_OP_LUT__WAIT_CHECK0_MASK 0x0FE00000L
+//SEM_EDC_CONFIG
+#define SEM_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SEM_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SEM_CHICKEN_BITS2
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID__SHIFT 0x1
+#define SEM_CHICKEN_BITS2__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define SEM_CHICKEN_BITS2__MM_CLIENT_USE_CONFIG_VFID_MASK 0x00000002L
+//SEM_MMHUB_CNTL
+#define SEM_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SEM_MMHUB_CNTL__TLVL_VALUE__SHIFT 0x8
+#define SEM_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+#define SEM_MMHUB_CNTL__TLVL_VALUE_MASK 0x00000700L
+//SEM_REGISTER_LAST_PART1
+#define SEM_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_offset.h
index 96ab3fe..96ab3fe 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index 1ee3a23..1ee3a23 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h
deleted file mode 100644
index eac125c..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_default.h
+++ /dev/null
@@ -1,7988 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _dcn_1_0_DEFAULT_HEADER
-#define _dcn_1_0_DEFAULT_HEADER
-
-
-// addressBlock: dce_dc_hda_azcontroller_azdec
-#define smnAZCONTROLLER0_GLOBAL_CAPABILITIES_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_MINOR_VERSION_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_MAJOR_VERSION_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_OUTPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_INPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_WAKE_ENABLE_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_STATE_CHANGE_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_GLOBAL_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_OUTPUT_STREAM_PAYLOAD_CAPABILITY_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_INPUT_STREAM_PAYLOAD_CAPABILITY_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_WALL_CLOCK_COUNTER_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_STREAM_SYNCHRONIZATION_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_WRITE_POINTER_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_READ_POINTER_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_CONTROL_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_CORB_SIZE_DEFAULT 0x00000002
-#define smnAZCONTROLLER0_RIRB_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RIRB_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RIRB_WRITE_POINTER_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RESPONSE_INTERRUPT_COUNT_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RIRB_CONTROL_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RIRB_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_RIRB_SIZE_DEFAULT 0x00000002
-#define smnAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_IMMEDIATE_RESPONSE_INPUT_INTERFACE_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_IMMEDIATE_COMMAND_STATUS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_DMA_POSITION_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_DMA_POSITION_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZCONTROLLER0_WALL_CLOCK_COUNTER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azendpoint_azdec
-#define smnAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define smnAZENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azinputendpoint_azdec
-#define smnAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define smnAZINPUTENDPOINT0_AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azroot_azdec
-#define smnAZROOT0_AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define smnAZROOT0_AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream0_azdec
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM0_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream1_azdec
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM1_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream2_azdec
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM2_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream3_azdec
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM3_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream4_azdec
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM4_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream5_azdec
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM5_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream6_azdec
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM6_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream7_azdec
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define smnAZSTREAM7_0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_vga_dispdec[72..76]
-#define mmVGA_MEM_WRITE_PAGE_ADDR_DEFAULT 0x00000000
-#define mmVGA_MEM_READ_PAGE_ADDR_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_vga_dispdec[948..986]
-#define mmCRTC8_IDX_DEFAULT 0x00000000
-#define mmCRTC8_DATA_DEFAULT 0x00000000
-#define mmGENFC_WT_DEFAULT 0x00000000
-#define mmGENS1_DEFAULT 0x00000000
-#define mmATTRDW_DEFAULT 0x00000000
-#define mmATTRX_DEFAULT 0x00000000
-#define mmATTRDR_DEFAULT 0x00000000
-#define mmGENMO_WT_DEFAULT 0x00000000
-#define mmGENS0_DEFAULT 0x00000000
-#define mmGENENB_DEFAULT 0x00000000
-#define mmSEQ8_IDX_DEFAULT 0x00000000
-#define mmSEQ8_DATA_DEFAULT 0x00000000
-#define mmDAC_MASK_DEFAULT 0x00000000
-#define mmDAC_R_INDEX_DEFAULT 0x00000000
-#define mmDAC_W_INDEX_DEFAULT 0x00000000
-#define mmDAC_DATA_DEFAULT 0x00000000
-#define mmGENFC_RD_DEFAULT 0x00000000
-#define mmGENMO_RD_DEFAULT 0x00000000
-#define mmGRPH8_IDX_DEFAULT 0x00000000
-#define mmGRPH8_DATA_DEFAULT 0x00000000
-#define mmCRTC8_IDX_1_DEFAULT 0x00000000
-#define mmCRTC8_DATA_1_DEFAULT 0x00000000
-#define mmGENFC_WT_1_DEFAULT 0x00000000
-#define mmGENS1_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azcontroller_azdec
-#define mmCORB_WRITE_POINTER_DEFAULT 0x00000000
-#define mmCORB_READ_POINTER_DEFAULT 0x00000000
-#define mmCORB_CONTROL_DEFAULT 0x00000000
-#define mmCORB_STATUS_DEFAULT 0x00000000
-#define mmCORB_SIZE_DEFAULT 0x00000002
-#define mmRIRB_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmRIRB_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmRIRB_WRITE_POINTER_DEFAULT 0x00000000
-#define mmRESPONSE_INTERRUPT_COUNT_DEFAULT 0x00000000
-#define mmRIRB_CONTROL_DEFAULT 0x00000000
-#define mmRIRB_STATUS_DEFAULT 0x00000000
-#define mmRIRB_SIZE_DEFAULT 0x00000002
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define mmIMMEDIATE_RESPONSE_INPUT_INTERFACE_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_STATUS_DEFAULT 0x00000000
-#define mmDMA_POSITION_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmDMA_POSITION_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmWALL_CLOCK_COUNTER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azendpoint_azdec
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azinputendpoint_azdec
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azroot_azdec
-#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream0_azdec
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM0_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream1_azdec
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM1_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream2_azdec
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM2_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream3_azdec
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM3_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream4_azdec
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM4_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream5_azdec
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM5_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream6_azdec
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM6_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azstream7_azdec
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM7_1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_vga_dispdec[72..76]
-
-
-// addressBlock: dce_dc_mmhubbub_vga_dispdec
-#define mmVGA_RENDER_CONTROL_DEFAULT 0x0000000f
-#define mmVGA_SEQUENCER_RESET_CONTROL_DEFAULT 0x00003f3f
-#define mmVGA_MODE_CONTROL_DEFAULT 0x00000000
-#define mmVGA_SURFACE_PITCH_SELECT_DEFAULT 0x00000002
-#define mmVGA_MEMORY_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmVGA_DISPBUF1_SURFACE_ADDR_DEFAULT 0x00000000
-#define mmVGA_DISPBUF2_SURFACE_ADDR_DEFAULT 0x00000000
-#define mmVGA_MEMORY_BASE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmVGA_HDP_CONTROL_DEFAULT 0x00000000
-#define mmVGA_CACHE_CONTROL_DEFAULT 0x00000000
-#define mmD1VGA_CONTROL_DEFAULT 0x00000000
-#define mmD2VGA_CONTROL_DEFAULT 0x00000000
-#define mmVGA_STATUS_DEFAULT 0x00000000
-#define mmVGA_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmVGA_STATUS_CLEAR_DEFAULT 0x00000000
-#define mmVGA_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmVGA_MAIN_CONTROL_DEFAULT 0x00005018
-#define mmVGA_TEST_CONTROL_DEFAULT 0x00000000
-#define mmVGA_QOS_CTRL_DEFAULT 0x00000000
-#define mmD3VGA_CONTROL_DEFAULT 0x00000000
-#define mmD4VGA_CONTROL_DEFAULT 0x00000000
-#define mmD5VGA_CONTROL_DEFAULT 0x00000000
-#define mmD6VGA_CONTROL_DEFAULT 0x00000000
-#define mmVGA_SOURCE_SELECT_DEFAULT 0x00000100
-
-
-// addressBlock: dce_dc_dccg_dccg_dispdec
-#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLC_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLD_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO_DBUF_EN_DEFAULT 0x00000000
-#define mmDPREFCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmREFCLK_CNTL_DEFAULT 0x00000000
-#define mmMIPI_CLK_CNTL_DEFAULT 0x00000000
-#define mmREFCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmPHYPLLE_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDSICLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmDCCG_CBUS_WRCMD_DELAY_DEFAULT 0x00000003
-#define mmDCCG_DS_DTO_INCR_DEFAULT 0x00000000
-#define mmDCCG_DS_DTO_MODULO_DEFAULT 0x00000000
-#define mmDCCG_DS_CNTL_DEFAULT 0x00000000
-#define mmDCCG_DS_HW_CAL_INTERVAL_DEFAULT 0x00989680
-#define mmSYMCLKG_CLOCK_ENABLE_DEFAULT 0x00000600
-#define mmDPREFCLK_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK0_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK1_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK2_CNTL_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO2_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO2_MODULO_DEFAULT 0x00000001
-#define mmDCE_VERSION_DEFAULT 0x00000000
-#define mmPHYPLLG_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_GTC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_GTC_DTO_INCR_DEFAULT 0x00000000
-#define mmDCCG_GTC_DTO_MODULO_DEFAULT 0x00000000
-#define mmDCCG_GTC_CURRENT_DEFAULT 0x00000000
-#define mmMIPI_DTO_CNTL_DEFAULT 0x00000000
-#define mmMIPI_DTO_PHASE_DEFAULT 0x00000000
-#define mmMIPI_DTO_MODULO_DEFAULT 0x00000000
-#define mmDAC_CLK_ENABLE_DEFAULT 0x00000000
-#define mmDVO_CLK_ENABLE_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_WRITE_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_CONTROL_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_READ_DEFAULT 0x00000000
-#define mmMILLISECOND_TIME_BASE_DIV_DEFAULT 0x001186a0
-#define mmDISPCLK_FREQ_CHANGE_CNTL_DEFAULT 0x08010028
-#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_DEFAULT 0x00000001
-#define mmDCCG_PERFMON_CNTL_DEFAULT 0xfffff800
-#define mmDCCG_GATE_DISABLE_CNTL_DEFAULT 0x74ee02dd
-#define mmDISPCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmSOCCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmDCCG_CAC_STATUS_DEFAULT 0x00000000
-#define mmPIXCLK1_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPIXCLK2_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPIXCLK0_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmMICROSECOND_TIME_BASE_DIV_DEFAULT 0x00120464
-#define mmDCCG_GATE_DISABLE_CNTL2_DEFAULT 0x007f007f
-#define mmSYMCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmPHYPLLF_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_DISP_CNTL_REG_DEFAULT 0x00000000
-#define mmOTG0_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO0_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO0_MODULO_DEFAULT 0x00000000
-#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmOTG1_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO1_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO1_MODULO_DEFAULT 0x00000000
-#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmOTG2_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO2_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO2_MODULO_DEFAULT 0x00000000
-#define mmOTG2_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmOTG3_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO3_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO3_MODULO_DEFAULT 0x00000000
-#define mmOTG3_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmOTG4_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO4_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO4_MODULO_DEFAULT 0x00000000
-#define mmOTG4_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmOTG5_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO5_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO5_MODULO_DEFAULT 0x00000000
-#define mmOTG5_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDPPCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmSYMCLKA_CLOCK_ENABLE_DEFAULT 0x00000000
-#define mmSYMCLKB_CLOCK_ENABLE_DEFAULT 0x00000100
-#define mmSYMCLKC_CLOCK_ENABLE_DEFAULT 0x00000200
-#define mmSYMCLKD_CLOCK_ENABLE_DEFAULT 0x00000300
-#define mmSYMCLKE_CLOCK_ENABLE_DEFAULT 0x00000400
-#define mmSYMCLKF_CLOCK_ENABLE_DEFAULT 0x00000500
-#define mmDCCG_SOFT_RESET_DEFAULT 0x00000000
-#define mmDVOACLKD_CNTL_DEFAULT 0x00070000
-#define mmDVOACLKC_MVP_CNTL_DEFAULT 0x00030000
-#define mmDVOACLKC_CNTL_DEFAULT 0x00030000
-#define mmDCCG_AUDIO_DTO_SOURCE_DEFAULT 0x00000030
-#define mmDCCG_AUDIO_DTO0_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO0_MODULE_DEFAULT 0x00000001
-#define mmDCCG_AUDIO_DTO1_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO1_MODULE_DEFAULT 0x00000001
-#define mmDCCG_VSYNC_OTG0_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_OTG1_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_OTG2_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_OTG3_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_OTG4_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_OTG5_LATCH_VALUE_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_CNT_CTRL_DEFAULT 0x00000000
-#define mmDCCG_VSYNC_CNT_INT_CTRL_DEFAULT 0x00000000
-#define mmDCCG_TEST_CLK_SEL_DEFAULT 0x01ff01ff
-
-
-// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
-#define mmDENTIST_DISPCLK_CNTL_DEFAULT 0x64010064
-
-
-// addressBlock: dce_dc_dccg_dccg_dcperfmon0_dc_perfmon_dispdec
-#define mmDC_PERFMON0_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON0_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dccg_dccg_dcperfmon1_dc_perfmon_dispdec
-#define mmDC_PERFMON1_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON1_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dccg_dccg_pll_dispdec
-#define mmPLL_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_rbbmif_dispdec
-#define mmRBBMIF_TIMEOUT_DEFAULT 0x20000a00
-#define mmRBBMIF_STATUS_DEFAULT 0x00000000
-#define mmRBBMIF_INT_STATUS_DEFAULT 0x80000000
-#define mmRBBMIF_TIMEOUT_DIS_DEFAULT 0x00000000
-#define mmRBBMIF_STATUS_FLAG_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_dc_pg_dispdec
-#define mmDOMAIN0_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN0_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN1_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN1_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN2_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN2_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN3_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN3_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN4_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN4_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN5_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN5_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN6_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN6_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN7_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN7_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN8_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN8_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN9_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN9_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN10_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN10_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN11_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN11_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN12_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN12_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN13_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN13_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN14_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN14_PG_STATUS_DEFAULT 0x00000000
-#define mmDOMAIN15_PG_CONFIG_DEFAULT 0x00000001
-#define mmDOMAIN15_PG_STATUS_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_CONTROL_1_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_CONTROL_2_DEFAULT 0x00000000
-#define mmDC_IP_REQUEST_CNTL_DEFAULT 0x00000000
-#define mmDC_PGCNTL_STATUS_REG_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_dmu_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON2_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON2_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_dmu_misc_dispdec
-#define mmCC_DC_PIPE_DIS_DEFAULT 0x00000000
-#define mmDMU_CLK_CNTL_DEFAULT 0x00000000
-#define mmDMU_MEM_PWR_CNTL_DEFAULT 0x00000000
-#define mmDMCU_SMU_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmSMU_INTERRUPT_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_dmcu_dispdec
-#define mmDMCU_CTRL_DEFAULT 0xffff0101
-#define mmDMCU_STATUS_DEFAULT 0x00000001
-#define mmDMCU_PC_START_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_START_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_END_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_ISR_START_ADDR_DEFAULT 0x00000004
-#define mmDMCU_FW_CS_HI_DEFAULT 0x00000000
-#define mmDMCU_FW_CS_LO_DEFAULT 0x00000000
-#define mmDMCU_RAM_ACCESS_CTRL_DEFAULT 0x00000000
-#define mmDMCU_ERAM_WR_CTRL_DEFAULT 0x000f0000
-#define mmDMCU_ERAM_WR_DATA_DEFAULT 0x00000000
-#define mmDMCU_ERAM_RD_CTRL_DEFAULT 0x000f0000
-#define mmDMCU_ERAM_RD_DATA_DEFAULT 0x00000000
-#define mmDMCU_IRAM_WR_CTRL_DEFAULT 0x00000000
-#define mmDMCU_IRAM_WR_DATA_DEFAULT 0x00000000
-#define mmDMCU_IRAM_RD_CTRL_DEFAULT 0x00000000
-#define mmDMCU_IRAM_RD_DATA_DEFAULT 0x00000000
-#define mmDMCU_EVENT_TRIGGER_DEFAULT 0x00000000
-#define mmDMCU_UC_INTERNAL_INT_STATUS_DEFAULT 0x00000000
-#define mmDMCU_SS_INTERRUPT_CNTL_STATUS_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_STATUS_1_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1_DEFAULT 0x00000000
-#define mmDC_DMCU_SCRATCH_DEFAULT 0x00000000
-#define mmDMCU_INT_CNT_DEFAULT 0x00000000
-#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS_DEFAULT 0x00000000
-#define mmDMCU_UC_CLK_GATING_CNTL_DEFAULT 0x00010102
-#define mmMASTER_COMM_DATA_REG1_DEFAULT 0x00000000
-#define mmMASTER_COMM_DATA_REG2_DEFAULT 0x00000000
-#define mmMASTER_COMM_DATA_REG3_DEFAULT 0x00000000
-#define mmMASTER_COMM_CMD_REG_DEFAULT 0x00000000
-#define mmMASTER_COMM_CNTL_REG_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG1_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG2_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG3_DEFAULT 0x00000000
-#define mmSLAVE_COMM_CMD_REG_DEFAULT 0x00000000
-#define mmSLAVE_COMM_CNTL_REG_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS4_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS5_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_STATUS1_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_STATUS_CONTINUE_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_CONTINUE_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_CONTINUE_DEFAULT 0x00000000
-#define mmDMCU_INT_CNT_CONTINUE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmu_ihc_dispdec
-#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_VSTARTUP_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_READ_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_READ_CNTL_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE2_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE3_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE4_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE5_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE6_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE7_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE8_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE9_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE10_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE11_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE12_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE13_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE14_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE15_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE16_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE17_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE18_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE19_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE20_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE21_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE22_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_VREADY_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_FLIP_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_NO_LOCK_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_FLIP_AWAY_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_wb0_dispdec_cnv_dispdec
-#define mmCNV0_WB_ENABLE_DEFAULT 0x00000000
-#define mmCNV0_WB_EC_CONFIG_DEFAULT 0x55000000
-#define mmCNV0_CNV_MODE_DEFAULT 0x00000000
-#define mmCNV0_CNV_WINDOW_START_DEFAULT 0x00000000
-#define mmCNV0_CNV_WINDOW_SIZE_DEFAULT 0x00100010
-#define mmCNV0_CNV_UPDATE_DEFAULT 0x00000000
-#define mmCNV0_CNV_SOURCE_SIZE_DEFAULT 0x00100010
-#define mmCNV0_CNV_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C11_C12_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C13_C14_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C21_C22_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C23_C24_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C31_C32_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_C33_C34_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_ROUND_OFFSET_R_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_ROUND_OFFSET_G_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_ROUND_OFFSET_B_DEFAULT 0x00000000
-#define mmCNV0_CNV_CSC_CLAMP_R_DEFAULT 0x00000fff
-#define mmCNV0_CNV_CSC_CLAMP_G_DEFAULT 0x00000fff
-#define mmCNV0_CNV_CSC_CLAMP_B_DEFAULT 0x00000fff
-#define mmCNV0_CNV_TEST_CNTL_DEFAULT 0x00000000
-#define mmCNV0_CNV_TEST_CRC_RED_DEFAULT 0x0000fff0
-#define mmCNV0_CNV_TEST_CRC_GREEN_DEFAULT 0x0000fff0
-#define mmCNV0_CNV_TEST_CRC_BLUE_DEFAULT 0x0000fff0
-#define mmCNV0_CNV_INPUT_SELECT_DEFAULT 0x00000001
-#define mmCNV0_WB_SOFT_RESET_DEFAULT 0x00000000
-#define mmCNV0_WB_WARM_UP_MODE_CTL1_DEFAULT 0x88700100
-#define mmCNV0_WB_WARM_UP_MODE_CTL2_DEFAULT 0x00000100
-
-
-// addressBlock: dce_dc_wb0_dispdec_wbscl_dispdec
-#define mmWBSCL0_WBSCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_MODE_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_TAP_CONTROL_DEFAULT 0x00001111
-#define mmWBSCL0_WBSCL_DEST_SIZE_DEFAULT 0x00010001
-#define mmWBSCL0_WBSCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL0_WBSCL_HORZ_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL0_WBSCL_HORZ_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL0_WBSCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL0_WBSCL_VERT_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL0_WBSCL_VERT_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL0_WBSCL_ROUND_OFFSET_DEFAULT 0x00800010
-#define mmWBSCL0_WBSCL_CLAMP_DEFAULT 0x01fe01fe
-#define mmWBSCL0_WBSCL_OVERFLOW_STATUS_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_OUTSIDE_PIX_STRATEGY_DEFAULT 0x80108000
-#define mmWBSCL0_WBSCL_TEST_CNTL_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_TEST_CRC_RED_DEFAULT 0x0000ff00
-#define mmWBSCL0_WBSCL_TEST_CRC_GREEN_DEFAULT 0x0000ffff
-#define mmWBSCL0_WBSCL_TEST_CRC_BLUE_DEFAULT 0x0000ff00
-#define mmWBSCL0_WBSCL_BACKPRESSURE_CNT_EN_DEFAULT 0x00000000
-#define mmWBSCL0_WB_MCIF_BACKPRESSURE_CNT_DEFAULT 0x00000000
-#define mmWBSCL0_WBSCL_RAM_SHUTDOWN_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_wb0_dispdec_wb_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON3_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON3_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_wb1_dispdec_cnv_dispdec
-#define mmCNV1_WB_ENABLE_DEFAULT 0x00000000
-#define mmCNV1_WB_EC_CONFIG_DEFAULT 0x55000000
-#define mmCNV1_CNV_MODE_DEFAULT 0x00000000
-#define mmCNV1_CNV_WINDOW_START_DEFAULT 0x00000000
-#define mmCNV1_CNV_WINDOW_SIZE_DEFAULT 0x00100010
-#define mmCNV1_CNV_UPDATE_DEFAULT 0x00000000
-#define mmCNV1_CNV_SOURCE_SIZE_DEFAULT 0x00100010
-#define mmCNV1_CNV_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C11_C12_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C13_C14_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C21_C22_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C23_C24_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C31_C32_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_C33_C34_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_ROUND_OFFSET_R_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_ROUND_OFFSET_G_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_ROUND_OFFSET_B_DEFAULT 0x00000000
-#define mmCNV1_CNV_CSC_CLAMP_R_DEFAULT 0x00000fff
-#define mmCNV1_CNV_CSC_CLAMP_G_DEFAULT 0x00000fff
-#define mmCNV1_CNV_CSC_CLAMP_B_DEFAULT 0x00000fff
-#define mmCNV1_CNV_TEST_CNTL_DEFAULT 0x00000000
-#define mmCNV1_CNV_TEST_CRC_RED_DEFAULT 0x0000fff0
-#define mmCNV1_CNV_TEST_CRC_GREEN_DEFAULT 0x0000fff0
-#define mmCNV1_CNV_TEST_CRC_BLUE_DEFAULT 0x0000fff0
-#define mmCNV1_CNV_INPUT_SELECT_DEFAULT 0x00000001
-#define mmCNV1_WB_SOFT_RESET_DEFAULT 0x00000000
-#define mmCNV1_WB_WARM_UP_MODE_CTL1_DEFAULT 0x88700100
-#define mmCNV1_WB_WARM_UP_MODE_CTL2_DEFAULT 0x00000100
-
-
-// addressBlock: dce_dc_wb1_dispdec_wbscl_dispdec
-#define mmWBSCL1_WBSCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_MODE_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_TAP_CONTROL_DEFAULT 0x00001111
-#define mmWBSCL1_WBSCL_DEST_SIZE_DEFAULT 0x00010001
-#define mmWBSCL1_WBSCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL1_WBSCL_HORZ_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL1_WBSCL_HORZ_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL1_WBSCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL1_WBSCL_VERT_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL1_WBSCL_VERT_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL1_WBSCL_ROUND_OFFSET_DEFAULT 0x00800010
-#define mmWBSCL1_WBSCL_CLAMP_DEFAULT 0x01fe01fe
-#define mmWBSCL1_WBSCL_OVERFLOW_STATUS_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_OUTSIDE_PIX_STRATEGY_DEFAULT 0x80108000
-#define mmWBSCL1_WBSCL_TEST_CNTL_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_TEST_CRC_RED_DEFAULT 0x0000ff00
-#define mmWBSCL1_WBSCL_TEST_CRC_GREEN_DEFAULT 0x0000ffff
-#define mmWBSCL1_WBSCL_TEST_CRC_BLUE_DEFAULT 0x0000ff00
-#define mmWBSCL1_WBSCL_BACKPRESSURE_CNT_EN_DEFAULT 0x00000000
-#define mmWBSCL1_WB_MCIF_BACKPRESSURE_CNT_DEFAULT 0x00000000
-#define mmWBSCL1_WBSCL_RAM_SHUTDOWN_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_wb1_dispdec_wb_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON4_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON4_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_mcif_wb0_dispdec
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_PITCH_DEFAULT 0x04000400
-#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_ARBITRATION_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_SCLK_CHANGE_DEFAULT 0x00000008
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL_DEFAULT 0x000f0000
-#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL_DEFAULT 0x00000040
-#define mmMCIF_WB0_MCIF_WB_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_WARM_UP_CNTL_DEFAULT 0x00001000
-#define mmMCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL_DEFAULT 0x00000002
-#define mmMCIF_WB0_MULTI_LEVEL_QOS_CTRL_DEFAULT 0x00000080
-#define mmMCIF_WB0_MCIF_WB_BUF_LUMA_SIZE_DEFAULT 0x000fffff
-#define mmMCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE_DEFAULT 0x000fffff
-
-
-// addressBlock: dce_dc_mmhubbub_mcif_wb1_dispdec
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_PITCH_DEFAULT 0x04000400
-#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_ARBITRATION_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_SCLK_CHANGE_DEFAULT 0x00000008
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL_DEFAULT 0x000f0000
-#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL_DEFAULT 0x00000040
-#define mmMCIF_WB1_MCIF_WB_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_WARM_UP_CNTL_DEFAULT 0x00001000
-#define mmMCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL_DEFAULT 0x00000002
-#define mmMCIF_WB1_MULTI_LEVEL_QOS_CTRL_DEFAULT 0x00000080
-#define mmMCIF_WB1_MCIF_WB_BUF_LUMA_SIZE_DEFAULT 0x000fffff
-#define mmMCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE_DEFAULT 0x000fffff
-
-
-// addressBlock: dce_dc_mmhubbub_mmhubbub_dispdec
-#define mmWBIF0_MISC_CTRL_DEFAULT 0x00010001
-#define mmWBIF0_SMU_WM_CONTROL_DEFAULT 0x00000000
-#define mmWBIF0_PHASE0_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmWBIF0_PHASE1_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmWBIF1_MISC_CTRL_DEFAULT 0x00010001
-#define mmWBIF1_SMU_WM_CONTROL_DEFAULT 0x00000000
-#define mmWBIF1_PHASE0_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmWBIF1_PHASE1_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmVGA_SRC_SPLIT_CNTL_DEFAULT 0x00000000
-#define mmMMHUBBUB_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmMMHUBBUB_MEM_PWR_CNTL_DEFAULT 0x0000c180
-#define mmMMHUBBUB_CLOCK_CNTL_DEFAULT 0x00000000
-#define mmMMHUBBUB_SOFT_RESET_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_vgaif_dispdec
-#define mmMCIF_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WRITE_COMBINE_CONTROL_DEFAULT 0x00000080
-#define mmMCIF_PHASE0_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmMCIF_PHASE1_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmMCIF_PHASE2_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mmhubbub_mmhubbub_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON5_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON5_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream0_dispdec
-#define mmAZF0STREAM0_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM0_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream1_dispdec
-#define mmAZF0STREAM1_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM1_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream2_dispdec
-#define mmAZF0STREAM2_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM2_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream3_dispdec
-#define mmAZF0STREAM3_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM3_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream4_dispdec
-#define mmAZF0STREAM4_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM4_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream5_dispdec
-#define mmAZF0STREAM5_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM5_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream6_dispdec
-#define mmAZF0STREAM6_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM6_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream7_dispdec
-#define mmAZF0STREAM7_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM7_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_az_misc_dispdec
-#define mmAZ_CLOCK_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_az_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON6_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON6_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
-#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
-#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint2_dispdec
-#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint3_dispdec
-#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint4_dispdec
-#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint5_dispdec
-#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint6_dispdec
-#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0endpoint7_dispdec
-#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0controller_dispdec
-#define mmAZALIA_CONTROLLER_CLOCK_GATING_DEFAULT 0x00000000
-#define mmAZALIA_AUDIO_DTO_DEFAULT 0x00300018
-#define mmAZALIA_AUDIO_DTO_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_SOCCLK_CONTROL_DEFAULT 0x00000001
-#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_DEFAULT 0x00000000
-#define mmAZALIA_DATA_DMA_CONTROL_DEFAULT 0x0000000a
-#define mmAZALIA_BDL_DMA_CONTROL_DEFAULT 0x0000000a
-#define mmAZALIA_RIRB_AND_DP_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_CORB_DMA_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_DEFAULT 0x00000000
-#define mmAZALIA_CYCLIC_BUFFER_SYNC_DEFAULT 0x00000000
-#define mmAZALIA_GLOBAL_CAPABILITIES_DEFAULT 0x00000000
-#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000060
-#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_DEFAULT 0x00080008
-#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000080
-#define mmAZALIA_INPUT_CRC0_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmAZALIA_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0root_dispdec
-#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_DEFAULT 0x1002aa01
-#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_DEFAULT 0x00100700
-#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_DEFAULT 0x0000000d
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_DEFAULT 0x00000001
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_DEFAULT 0xc0000009
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_DEFAULT 0x00000200
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_DEFAULT 0x00000000
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_DEFAULT 0x00aa0100
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_DEFAULT 0x00000000
-#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET0_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET1_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET2_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET3_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET4_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET5_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET6_DEFAULT 0x00000000
-#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream8_dispdec
-#define mmAZF0STREAM8_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM8_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream9_dispdec
-#define mmAZF0STREAM9_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM9_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream10_dispdec
-#define mmAZF0STREAM10_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM10_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream11_dispdec
-#define mmAZF0STREAM11_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM11_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream12_dispdec
-#define mmAZF0STREAM12_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM12_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream13_dispdec
-#define mmAZF0STREAM13_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM13_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream14_dispdec
-#define mmAZF0STREAM14_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM14_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0stream15_dispdec
-#define mmAZF0STREAM15_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM15_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
-#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
-#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
-#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint3_dispdec
-#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint4_dispdec
-#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint5_dispdec
-#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint6_dispdec
-#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hda_azf0inputendpoint7_dispdec
-#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec
-#define mmDCHUBBUB_SDPIF_CFG0_DEFAULT 0x00cd3001
-#define mmDCHUBBUB_SDPIF_CFG1_DEFAULT 0x0000005c
-#define mmDCHUBBUB_FORCE_IO_STATUS_0_DEFAULT 0x00000002
-#define mmDCHUBBUB_FORCE_IO_STATUS_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_FB_BASE_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_FB_TOP_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_FB_OFFSET_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_AGP_BOT_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_AGP_TOP_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_AGP_BASE_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_APER_BASE_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_APER_TOP_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_APER_DEF_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_APER_DEF_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_W_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_LO_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_HI_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_LO_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_HI_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_LO_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_HI_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_LO_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_HI_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_LO_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_HI_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_LO_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_HI_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_LO_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_HI_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_LO_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_HI_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_LO_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_HI_2_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_LO_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_BASE_HI_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_LO_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_RELOC_HI_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_LO_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MARC_LENGTH_HI_3_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_PIPE_SEC_LVL_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCHUBBUB_SDPIF_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
-#define mmDCHUBBUB_RET_PATH_DCC_CFG_DEFAULT 0x00000001
-#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCHUBBUB_CRC_CTRL_DEFAULT 0x00000000
-#define mmDCHUBBUB_CRC0_VAL_R_G_DEFAULT 0x00000000
-#define mmDCHUBBUB_CRC0_VAL_B_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_CRC1_VAL_R_G_DEFAULT 0x00000000
-#define mmDCHUBBUB_CRC1_VAL_B_A_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dchubbub_hubbub_dispdec
-#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND_DEFAULT 0x01000100
-#define mmDCHUBBUB_ARB_SAT_LEVEL_DEFAULT 0xffffffff
-#define mmDCHUBBUB_ARB_QOS_FORCE_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_DEFAULT 0x00000000
-#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_DEFAULT 0x00000010
-#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE_DEFAULT 0x00000000
-#define mmDCHUBBUB_GLOBAL_TIMER_CNTL_DEFAULT 0x00000000
-#define mmSURFACE_CHECK0_ADDRESS_LSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK0_ADDRESS_MSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK1_ADDRESS_LSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK1_ADDRESS_MSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK2_ADDRESS_LSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK2_ADDRESS_MSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK3_ADDRESS_LSB_DEFAULT 0x00000000
-#define mmSURFACE_CHECK3_ADDRESS_MSB_DEFAULT 0x00000000
-#define mmVTG0_CONTROL_DEFAULT 0x00000000
-#define mmVTG1_CONTROL_DEFAULT 0x00000000
-#define mmVTG2_CONTROL_DEFAULT 0x00000000
-#define mmVTG3_CONTROL_DEFAULT 0x00000000
-#define mmVTG4_CONTROL_DEFAULT 0x00000000
-#define mmVTG5_CONTROL_DEFAULT 0x00000000
-#define mmDCHUBBUB_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCHUBBUB_CLOCK_CNTL_DEFAULT 0x00000000
-#define mmDCFCLK_CNTL_DEFAULT 0x80000200
-#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL_DEFAULT 0x00000000
-#define mmDCHUBBUB_PERFORMANCE_MEASUREMENT_CNTL2_DEFAULT 0x00000000
-#define mmDCHUBBUB_VLINE_SNAPSHOT_DEFAULT 0x00000000
-#define mmDCHUBBUB_SPARE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON7_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON7_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
-#define mmHUBP0_DCSURF_SURFACE_CONFIG_DEFAULT 0x00000008
-#define mmHUBP0_DCSURF_ADDR_CONFIG_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_TILING_CONFIG_DEFAULT 0x00000080
-#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_DEFAULT 0x00000000
-#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_DEFAULT 0x00000000
-#define mmHUBP0_DCHUBP_CNTL_DEFAULT 0x00001001
-#define mmHUBP0_HUBP_CLK_CNTL_DEFAULT 0x00000000
-#define mmHUBP0_DCHUBP_VMPG_CONFIG_DEFAULT 0x00000000
-#define mmHUBP0_HUBPREQ_DEBUG_DB_DEFAULT 0x00000000
-#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_DEFAULT 0x00000000
-#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
-#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_FLIP_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2_DEFAULT 0x00003040
-#define mmHUBPREQ0_DCSURF_FRAME_PACING_CONTROL_DEFAULT 0x04000000
-#define mmHUBPREQ0_DCSURF_FRAME_PACING_TIME_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_EXPANSION_MODE_DEFAULT 0x00000055
-#define mmHUBPREQ0_DCN_TTU_QOS_WM_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_STATUS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ0_DCN_VM_CONTEXT0_CNTL_DEFAULT 0x00012010
-#define mmHUBPREQ0_DCN_VM_MX_L1_TLB_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ0_BLANK_OFFSET_0_DEFAULT 0x00000000
-#define mmHUBPREQ0_BLANK_OFFSET_1_DEFAULT 0x00000000
-#define mmHUBPREQ0_DST_DIMENSIONS_DEFAULT 0x00000000
-#define mmHUBPREQ0_DST_AFTER_SCALER_DEFAULT 0x00000000
-#define mmHUBPREQ0_PREFETCH_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ0_PREFETCH_SETTINS_C_DEFAULT 0x00000000
-#define mmHUBPREQ0_VBLANK_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ0_VBLANK_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ0_VBLANK_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ0_VBLANK_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ0_VBLANK_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_5_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_6_DEFAULT 0x00000000
-#define mmHUBPREQ0_NOM_PARAMETERS_7_DEFAULT 0x00000000
-#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE_DEFAULT 0x00000000
-#define mmHUBPREQ0_PER_LINE_DELIVERY_DEFAULT 0x00000000
-#define mmHUBPREQ0_CURSOR_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ_DEFAULT 0x00000000
-#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
-#define mmHUBPRET0_HUBPRET_CONTROL_DEFAULT 0x00e40000
-#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE0_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE1_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE_DEFAULT 0x00000000
-#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS_DEFAULT 0x00000421
-
-
-// addressBlock: dce_dc_dcbubp0_dispdec_cursor_dispdec
-#define mmCURSOR0_CURSOR_CONTROL_DEFAULT 0x01000000
-#define mmCURSOR0_CURSOR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_SIZE_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_POSITION_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_HOT_SPOT_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_DST_OFFSET_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCURSOR0_CURSOR_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON8_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON8_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
-#define mmHUBP1_DCSURF_SURFACE_CONFIG_DEFAULT 0x00000008
-#define mmHUBP1_DCSURF_ADDR_CONFIG_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_TILING_CONFIG_DEFAULT 0x00000080
-#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_DEFAULT 0x00000000
-#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_DEFAULT 0x00000000
-#define mmHUBP1_DCHUBP_CNTL_DEFAULT 0x00001001
-#define mmHUBP1_HUBP_CLK_CNTL_DEFAULT 0x00000000
-#define mmHUBP1_DCHUBP_VMPG_CONFIG_DEFAULT 0x00000000
-#define mmHUBP1_HUBPREQ_DEBUG_DB_DEFAULT 0x00000000
-#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_DEFAULT 0x00000000
-#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
-#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_FLIP_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2_DEFAULT 0x00003040
-#define mmHUBPREQ1_DCSURF_FRAME_PACING_CONTROL_DEFAULT 0x04000000
-#define mmHUBPREQ1_DCSURF_FRAME_PACING_TIME_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_EXPANSION_MODE_DEFAULT 0x00000055
-#define mmHUBPREQ1_DCN_TTU_QOS_WM_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_STATUS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_PROTECTION_FAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ1_DCN_VM_CONTEXT0_CNTL_DEFAULT 0x00012010
-#define mmHUBPREQ1_DCN_VM_MX_L1_TLB_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ1_BLANK_OFFSET_0_DEFAULT 0x00000000
-#define mmHUBPREQ1_BLANK_OFFSET_1_DEFAULT 0x00000000
-#define mmHUBPREQ1_DST_DIMENSIONS_DEFAULT 0x00000000
-#define mmHUBPREQ1_DST_AFTER_SCALER_DEFAULT 0x00000000
-#define mmHUBPREQ1_PREFETCH_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ1_PREFETCH_SETTINS_C_DEFAULT 0x00000000
-#define mmHUBPREQ1_VBLANK_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ1_VBLANK_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ1_VBLANK_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ1_VBLANK_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ1_VBLANK_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_5_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_6_DEFAULT 0x00000000
-#define mmHUBPREQ1_NOM_PARAMETERS_7_DEFAULT 0x00000000
-#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE_DEFAULT 0x00000000
-#define mmHUBPREQ1_PER_LINE_DELIVERY_DEFAULT 0x00000000
-#define mmHUBPREQ1_CURSOR_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ_DEFAULT 0x00000000
-#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
-#define mmHUBPRET1_HUBPRET_CONTROL_DEFAULT 0x00e40000
-#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE0_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE1_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE_DEFAULT 0x00000000
-#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS_DEFAULT 0x00000421
-
-
-// addressBlock: dce_dc_dcbubp1_dispdec_cursor_dispdec
-#define mmCURSOR1_CURSOR_CONTROL_DEFAULT 0x01000000
-#define mmCURSOR1_CURSOR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_SIZE_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_POSITION_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_HOT_SPOT_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_DST_OFFSET_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCURSOR1_CURSOR_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON9_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON9_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
-#define mmHUBP2_DCSURF_SURFACE_CONFIG_DEFAULT 0x00000008
-#define mmHUBP2_DCSURF_ADDR_CONFIG_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_TILING_CONFIG_DEFAULT 0x00000080
-#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_DEFAULT 0x00000000
-#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_DEFAULT 0x00000000
-#define mmHUBP2_DCHUBP_CNTL_DEFAULT 0x00001001
-#define mmHUBP2_HUBP_CLK_CNTL_DEFAULT 0x00000000
-#define mmHUBP2_DCHUBP_VMPG_CONFIG_DEFAULT 0x00000000
-#define mmHUBP2_HUBPREQ_DEBUG_DB_DEFAULT 0x00000000
-#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_DEFAULT 0x00000000
-#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
-#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_FLIP_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2_DEFAULT 0x00003040
-#define mmHUBPREQ2_DCSURF_FRAME_PACING_CONTROL_DEFAULT 0x04000000
-#define mmHUBPREQ2_DCSURF_FRAME_PACING_TIME_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_EXPANSION_MODE_DEFAULT 0x00000055
-#define mmHUBPREQ2_DCN_TTU_QOS_WM_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_STATUS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_PROTECTION_FAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ2_DCN_VM_CONTEXT0_CNTL_DEFAULT 0x00012010
-#define mmHUBPREQ2_DCN_VM_MX_L1_TLB_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ2_BLANK_OFFSET_0_DEFAULT 0x00000000
-#define mmHUBPREQ2_BLANK_OFFSET_1_DEFAULT 0x00000000
-#define mmHUBPREQ2_DST_DIMENSIONS_DEFAULT 0x00000000
-#define mmHUBPREQ2_DST_AFTER_SCALER_DEFAULT 0x00000000
-#define mmHUBPREQ2_PREFETCH_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ2_PREFETCH_SETTINS_C_DEFAULT 0x00000000
-#define mmHUBPREQ2_VBLANK_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ2_VBLANK_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ2_VBLANK_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ2_VBLANK_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ2_VBLANK_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_5_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_6_DEFAULT 0x00000000
-#define mmHUBPREQ2_NOM_PARAMETERS_7_DEFAULT 0x00000000
-#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE_DEFAULT 0x00000000
-#define mmHUBPREQ2_PER_LINE_DELIVERY_DEFAULT 0x00000000
-#define mmHUBPREQ2_CURSOR_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ_DEFAULT 0x00000000
-#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
-#define mmHUBPRET2_HUBPRET_CONTROL_DEFAULT 0x00e40000
-#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE0_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE1_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE_DEFAULT 0x00000000
-#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS_DEFAULT 0x00000421
-
-
-// addressBlock: dce_dc_dcbubp2_dispdec_cursor_dispdec
-#define mmCURSOR2_CURSOR_CONTROL_DEFAULT 0x01000000
-#define mmCURSOR2_CURSOR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_SIZE_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_POSITION_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_HOT_SPOT_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_DST_OFFSET_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCURSOR2_CURSOR_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON10_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON10_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
-#define mmHUBP3_DCSURF_SURFACE_CONFIG_DEFAULT 0x00000008
-#define mmHUBP3_DCSURF_ADDR_CONFIG_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_TILING_CONFIG_DEFAULT 0x00000080
-#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_DEFAULT 0x00000000
-#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_DEFAULT 0x00000000
-#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_DEFAULT 0x00000000
-#define mmHUBP3_DCHUBP_CNTL_DEFAULT 0x00001001
-#define mmHUBP3_HUBP_CLK_CNTL_DEFAULT 0x00000000
-#define mmHUBP3_DCHUBP_VMPG_CONFIG_DEFAULT 0x00000000
-#define mmHUBP3_HUBPREQ_DEBUG_DB_DEFAULT 0x00000000
-#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_DEFAULT 0x00000000
-#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
-#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_FLIP_CONTROL_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2_DEFAULT 0x00003040
-#define mmHUBPREQ3_DCSURF_FRAME_PACING_CONTROL_DEFAULT 0x04000000
-#define mmHUBPREQ3_DCSURF_FRAME_PACING_TIME_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_EXPANSION_MODE_DEFAULT 0x00000055
-#define mmHUBPREQ3_DCN_TTU_QOS_WM_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_STATUS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_PROTECTION_FAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmHUBPREQ3_DCN_VM_CONTEXT0_CNTL_DEFAULT 0x00012010
-#define mmHUBPREQ3_DCN_VM_MX_L1_TLB_CNTL_DEFAULT 0x00000000
-#define mmHUBPREQ3_BLANK_OFFSET_0_DEFAULT 0x00000000
-#define mmHUBPREQ3_BLANK_OFFSET_1_DEFAULT 0x00000000
-#define mmHUBPREQ3_DST_DIMENSIONS_DEFAULT 0x00000000
-#define mmHUBPREQ3_DST_AFTER_SCALER_DEFAULT 0x00000000
-#define mmHUBPREQ3_PREFETCH_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ3_PREFETCH_SETTINS_C_DEFAULT 0x00000000
-#define mmHUBPREQ3_VBLANK_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ3_VBLANK_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ3_VBLANK_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ3_VBLANK_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ3_VBLANK_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_0_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_1_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_2_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_3_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_4_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_5_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_6_DEFAULT 0x00000000
-#define mmHUBPREQ3_NOM_PARAMETERS_7_DEFAULT 0x00000000
-#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE_DEFAULT 0x00000000
-#define mmHUBPREQ3_PER_LINE_DELIVERY_DEFAULT 0x00000000
-#define mmHUBPREQ3_CURSOR_SETTINS_DEFAULT 0x00000000
-#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ_DEFAULT 0x00000000
-#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
-#define mmHUBPRET3_HUBPRET_CONTROL_DEFAULT 0x00e40000
-#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE0_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE1_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_INTERRUPT_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE_DEFAULT 0x00000000
-#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS_DEFAULT 0x00000421
-
-
-// addressBlock: dce_dc_dcbubp3_dispdec_cursor_dispdec
-#define mmCURSOR3_CURSOR_CONTROL_DEFAULT 0x01000000
-#define mmCURSOR3_CURSOR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_SIZE_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_POSITION_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_HOT_SPOT_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_DST_OFFSET_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCURSOR3_CURSOR_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON11_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON11_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
-#define mmDPP_TOP0_DPP_CONTROL_DEFAULT 0x70000000
-#define mmDPP_TOP0_DPP_SOFT_RESET_DEFAULT 0x00000000
-#define mmDPP_TOP0_DPP_CRC_VAL_R_G_DEFAULT 0x00000000
-#define mmDPP_TOP0_DPP_CRC_VAL_B_A_DEFAULT 0x00000000
-#define mmDPP_TOP0_DPP_CRC_CTRL_DEFAULT 0x00000000
-#define mmDPP_TOP0_HOST_READ_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
-#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_DEFAULT 0x00000008
-#define mmCNVC_CFG0_FORMAT_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG0_FCNV_FP_SCALE_BIAS_DEFAULT 0x00003c00
-#define mmCNVC_CFG0_DENORM_CONTROL_DEFAULT 0x00002000
-#define mmCNVC_CFG0_COLOR_KEYER_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG0_COLOR_KEYER_ALPHA_DEFAULT 0x00000000
-#define mmCNVC_CFG0_COLOR_KEYER_RED_DEFAULT 0x00000000
-#define mmCNVC_CFG0_COLOR_KEYER_GREEN_DEFAULT 0x00000000
-#define mmCNVC_CFG0_COLOR_KEYER_BLUE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
-#define mmCNVC_CUR0_CURSOR0_CONTROL_DEFAULT 0x0003ff00
-#define mmCNVC_CUR0_CURSOR0_COLOR0_DEFAULT 0x00000000
-#define mmCNVC_CUR0_CURSOR0_COLOR1_DEFAULT 0x00000000
-#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_DEFAULT 0x00003c00
-
-
-// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
-#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT_DEFAULT 0x00000000
-#define mmDSCL0_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmDSCL0_SCL_MODE_DEFAULT 0x00000000
-#define mmDSCL0_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_CONTROL_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_2TAP_CONTROL_DEFAULT 0x01000100
-#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL0_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL0_SCL_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL0_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL0_SCL_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmDSCL0_SCL_BLACK_OFFSET_DEFAULT 0x80000000
-#define mmDSCL0_DSCL_UPDATE_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_AUTOCAL_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmDSCL0_OTG_H_BLANK_DEFAULT 0x00000000
-#define mmDSCL0_OTG_V_BLANK_DEFAULT 0x00000000
-#define mmDSCL0_RECOUT_START_DEFAULT 0x00000000
-#define mmDSCL0_RECOUT_SIZE_DEFAULT 0x00000000
-#define mmDSCL0_MPC_SIZE_DEFAULT 0x00000000
-#define mmDSCL0_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmDSCL0_LB_MEMORY_CTRL_DEFAULT 0x00003f00
-#define mmDSCL0_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDSCL0_DSCL_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDSCL0_OBUF_CONTROL_DEFAULT 0xe0000000
-#define mmDSCL0_OBUF_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
-#define mmCM0_CM_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_COMA_C11_C12_DEFAULT 0x00002000
-#define mmCM0_CM_COMA_C13_C14_DEFAULT 0x00000000
-#define mmCM0_CM_COMA_C21_C22_DEFAULT 0x20000000
-#define mmCM0_CM_COMA_C23_C24_DEFAULT 0x00000000
-#define mmCM0_CM_COMA_C31_C32_DEFAULT 0x00000000
-#define mmCM0_CM_COMA_C33_C34_DEFAULT 0x00002000
-#define mmCM0_CM_COMB_C11_C12_DEFAULT 0x00002000
-#define mmCM0_CM_COMB_C13_C14_DEFAULT 0x00000000
-#define mmCM0_CM_COMB_C21_C22_DEFAULT 0x20000000
-#define mmCM0_CM_COMB_C23_C24_DEFAULT 0x00000000
-#define mmCM0_CM_COMB_C31_C32_DEFAULT 0x00000000
-#define mmCM0_CM_COMB_C33_C34_DEFAULT 0x00002000
-#define mmCM0_CM_IGAM_CONTROL_DEFAULT 0x08000002
-#define mmCM0_CM_IGAM_LUT_RW_CONTROL_DEFAULT 0x00011070
-#define mmCM0_CM_IGAM_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCM0_CM_IGAM_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCM0_CM_IGAM_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCM0_CM_IGAM_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCM0_CM_IGAM_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCM0_CM_IGAM_LUT_BW_OFFSET_BLUE_DEFAULT 0xffff0000
-#define mmCM0_CM_IGAM_LUT_BW_OFFSET_GREEN_DEFAULT 0xffff0000
-#define mmCM0_CM_IGAM_LUT_BW_OFFSET_RED_DEFAULT 0xffff0000
-#define mmCM0_CM_ICSC_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_ICSC_C11_C12_DEFAULT 0x00002000
-#define mmCM0_CM_ICSC_C13_C14_DEFAULT 0x00000000
-#define mmCM0_CM_ICSC_C21_C22_DEFAULT 0x20000000
-#define mmCM0_CM_ICSC_C23_C24_DEFAULT 0x00000000
-#define mmCM0_CM_ICSC_C31_C32_DEFAULT 0x00000000
-#define mmCM0_CM_ICSC_C33_C34_DEFAULT 0x00002000
-#define mmCM0_CM_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCM0_CM_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCM0_CM_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCM0_CM_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCM0_CM_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCM0_CM_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmCM0_CM_OCSC_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_OCSC_C11_C12_DEFAULT 0x00002000
-#define mmCM0_CM_OCSC_C13_C14_DEFAULT 0x00000000
-#define mmCM0_CM_OCSC_C21_C22_DEFAULT 0x20000000
-#define mmCM0_CM_OCSC_C23_C24_DEFAULT 0x00000000
-#define mmCM0_CM_OCSC_C31_C32_DEFAULT 0x00000000
-#define mmCM0_CM_OCSC_C33_C34_DEFAULT 0x00002000
-#define mmCM0_CM_BNS_VALUES_R_DEFAULT 0x20000000
-#define mmCM0_CM_BNS_VALUES_G_DEFAULT 0x20000000
-#define mmCM0_CM_BNS_VALUES_B_DEFAULT 0x20000000
-#define mmCM0_CM_DGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM0_CM_DGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM0_CM_DGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM0_CM_RGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_16_17_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_18_19_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_20_21_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_22_23_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_24_25_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_26_27_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_28_29_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_30_31_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMA_REGION_32_33_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_16_17_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_18_19_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_20_21_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_22_23_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_24_25_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_26_27_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_28_29_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_30_31_DEFAULT 0x00000000
-#define mmCM0_CM_RGAM_RAMB_REGION_32_33_DEFAULT 0x00000000
-#define mmCM0_CM_HDR_MULT_COEF_DEFAULT 0x0001f000
-#define mmCM0_CM_RANGE_CLAMP_CONTROL_R_DEFAULT 0xfbff7bff
-#define mmCM0_CM_RANGE_CLAMP_CONTROL_G_DEFAULT 0xfbff7bff
-#define mmCM0_CM_RANGE_CLAMP_CONTROL_B_DEFAULT 0xfbff7bff
-#define mmCM0_CM_DENORM_CONTROL_DEFAULT 0x00000000
-#define mmCM0_CM_CMOUT_CONTROL_DEFAULT 0x0000000a
-#define mmCM0_CM_CMOUT_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmCM0_CM_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCM0_CM_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON12_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON12_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
-#define mmDPP_TOP1_DPP_CONTROL_DEFAULT 0x70000000
-#define mmDPP_TOP1_DPP_SOFT_RESET_DEFAULT 0x00000000
-#define mmDPP_TOP1_DPP_CRC_VAL_R_G_DEFAULT 0x00000000
-#define mmDPP_TOP1_DPP_CRC_VAL_B_A_DEFAULT 0x00000000
-#define mmDPP_TOP1_DPP_CRC_CTRL_DEFAULT 0x00000000
-#define mmDPP_TOP1_HOST_READ_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
-#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_DEFAULT 0x00000008
-#define mmCNVC_CFG1_FORMAT_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG1_FCNV_FP_SCALE_BIAS_DEFAULT 0x00003c00
-#define mmCNVC_CFG1_DENORM_CONTROL_DEFAULT 0x00002000
-#define mmCNVC_CFG1_COLOR_KEYER_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG1_COLOR_KEYER_ALPHA_DEFAULT 0x00000000
-#define mmCNVC_CFG1_COLOR_KEYER_RED_DEFAULT 0x00000000
-#define mmCNVC_CFG1_COLOR_KEYER_GREEN_DEFAULT 0x00000000
-#define mmCNVC_CFG1_COLOR_KEYER_BLUE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
-#define mmCNVC_CUR1_CURSOR0_CONTROL_DEFAULT 0x0003ff00
-#define mmCNVC_CUR1_CURSOR0_COLOR0_DEFAULT 0x00000000
-#define mmCNVC_CUR1_CURSOR0_COLOR1_DEFAULT 0x00000000
-#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_DEFAULT 0x00003c00
-
-
-// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
-#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT_DEFAULT 0x00000000
-#define mmDSCL1_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmDSCL1_SCL_MODE_DEFAULT 0x00000000
-#define mmDSCL1_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_CONTROL_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_2TAP_CONTROL_DEFAULT 0x01000100
-#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL1_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL1_SCL_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL1_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL1_SCL_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmDSCL1_SCL_BLACK_OFFSET_DEFAULT 0x80000000
-#define mmDSCL1_DSCL_UPDATE_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_AUTOCAL_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmDSCL1_OTG_H_BLANK_DEFAULT 0x00000000
-#define mmDSCL1_OTG_V_BLANK_DEFAULT 0x00000000
-#define mmDSCL1_RECOUT_START_DEFAULT 0x00000000
-#define mmDSCL1_RECOUT_SIZE_DEFAULT 0x00000000
-#define mmDSCL1_MPC_SIZE_DEFAULT 0x00000000
-#define mmDSCL1_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmDSCL1_LB_MEMORY_CTRL_DEFAULT 0x00003f00
-#define mmDSCL1_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDSCL1_DSCL_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDSCL1_OBUF_CONTROL_DEFAULT 0xe0000000
-#define mmDSCL1_OBUF_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
-#define mmCM1_CM_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_COMA_C11_C12_DEFAULT 0x00002000
-#define mmCM1_CM_COMA_C13_C14_DEFAULT 0x00000000
-#define mmCM1_CM_COMA_C21_C22_DEFAULT 0x20000000
-#define mmCM1_CM_COMA_C23_C24_DEFAULT 0x00000000
-#define mmCM1_CM_COMA_C31_C32_DEFAULT 0x00000000
-#define mmCM1_CM_COMA_C33_C34_DEFAULT 0x00002000
-#define mmCM1_CM_COMB_C11_C12_DEFAULT 0x00002000
-#define mmCM1_CM_COMB_C13_C14_DEFAULT 0x00000000
-#define mmCM1_CM_COMB_C21_C22_DEFAULT 0x20000000
-#define mmCM1_CM_COMB_C23_C24_DEFAULT 0x00000000
-#define mmCM1_CM_COMB_C31_C32_DEFAULT 0x00000000
-#define mmCM1_CM_COMB_C33_C34_DEFAULT 0x00002000
-#define mmCM1_CM_IGAM_CONTROL_DEFAULT 0x08000002
-#define mmCM1_CM_IGAM_LUT_RW_CONTROL_DEFAULT 0x00011070
-#define mmCM1_CM_IGAM_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCM1_CM_IGAM_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCM1_CM_IGAM_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCM1_CM_IGAM_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCM1_CM_IGAM_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCM1_CM_IGAM_LUT_BW_OFFSET_BLUE_DEFAULT 0xffff0000
-#define mmCM1_CM_IGAM_LUT_BW_OFFSET_GREEN_DEFAULT 0xffff0000
-#define mmCM1_CM_IGAM_LUT_BW_OFFSET_RED_DEFAULT 0xffff0000
-#define mmCM1_CM_ICSC_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_ICSC_C11_C12_DEFAULT 0x00002000
-#define mmCM1_CM_ICSC_C13_C14_DEFAULT 0x00000000
-#define mmCM1_CM_ICSC_C21_C22_DEFAULT 0x20000000
-#define mmCM1_CM_ICSC_C23_C24_DEFAULT 0x00000000
-#define mmCM1_CM_ICSC_C31_C32_DEFAULT 0x00000000
-#define mmCM1_CM_ICSC_C33_C34_DEFAULT 0x00002000
-#define mmCM1_CM_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCM1_CM_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCM1_CM_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCM1_CM_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCM1_CM_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCM1_CM_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmCM1_CM_OCSC_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_OCSC_C11_C12_DEFAULT 0x00002000
-#define mmCM1_CM_OCSC_C13_C14_DEFAULT 0x00000000
-#define mmCM1_CM_OCSC_C21_C22_DEFAULT 0x20000000
-#define mmCM1_CM_OCSC_C23_C24_DEFAULT 0x00000000
-#define mmCM1_CM_OCSC_C31_C32_DEFAULT 0x00000000
-#define mmCM1_CM_OCSC_C33_C34_DEFAULT 0x00002000
-#define mmCM1_CM_BNS_VALUES_R_DEFAULT 0x20000000
-#define mmCM1_CM_BNS_VALUES_G_DEFAULT 0x20000000
-#define mmCM1_CM_BNS_VALUES_B_DEFAULT 0x20000000
-#define mmCM1_CM_DGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM1_CM_DGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM1_CM_DGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM1_CM_RGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_16_17_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_18_19_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_20_21_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_22_23_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_24_25_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_26_27_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_28_29_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_30_31_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMA_REGION_32_33_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_16_17_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_18_19_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_20_21_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_22_23_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_24_25_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_26_27_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_28_29_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_30_31_DEFAULT 0x00000000
-#define mmCM1_CM_RGAM_RAMB_REGION_32_33_DEFAULT 0x00000000
-#define mmCM1_CM_HDR_MULT_COEF_DEFAULT 0x0001f000
-#define mmCM1_CM_RANGE_CLAMP_CONTROL_R_DEFAULT 0xfbff7bff
-#define mmCM1_CM_RANGE_CLAMP_CONTROL_G_DEFAULT 0xfbff7bff
-#define mmCM1_CM_RANGE_CLAMP_CONTROL_B_DEFAULT 0xfbff7bff
-#define mmCM1_CM_DENORM_CONTROL_DEFAULT 0x00000000
-#define mmCM1_CM_CMOUT_CONTROL_DEFAULT 0x0000000a
-#define mmCM1_CM_CMOUT_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmCM1_CM_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCM1_CM_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON13_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON13_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
-#define mmDPP_TOP2_DPP_CONTROL_DEFAULT 0x70000000
-#define mmDPP_TOP2_DPP_SOFT_RESET_DEFAULT 0x00000000
-#define mmDPP_TOP2_DPP_CRC_VAL_R_G_DEFAULT 0x00000000
-#define mmDPP_TOP2_DPP_CRC_VAL_B_A_DEFAULT 0x00000000
-#define mmDPP_TOP2_DPP_CRC_CTRL_DEFAULT 0x00000000
-#define mmDPP_TOP2_HOST_READ_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
-#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_DEFAULT 0x00000008
-#define mmCNVC_CFG2_FORMAT_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG2_FCNV_FP_SCALE_BIAS_DEFAULT 0x00003c00
-#define mmCNVC_CFG2_DENORM_CONTROL_DEFAULT 0x00002000
-#define mmCNVC_CFG2_COLOR_KEYER_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG2_COLOR_KEYER_ALPHA_DEFAULT 0x00000000
-#define mmCNVC_CFG2_COLOR_KEYER_RED_DEFAULT 0x00000000
-#define mmCNVC_CFG2_COLOR_KEYER_GREEN_DEFAULT 0x00000000
-#define mmCNVC_CFG2_COLOR_KEYER_BLUE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
-#define mmCNVC_CUR2_CURSOR0_CONTROL_DEFAULT 0x0003ff00
-#define mmCNVC_CUR2_CURSOR0_COLOR0_DEFAULT 0x00000000
-#define mmCNVC_CUR2_CURSOR0_COLOR1_DEFAULT 0x00000000
-#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_DEFAULT 0x00003c00
-
-
-// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
-#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT_DEFAULT 0x00000000
-#define mmDSCL2_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmDSCL2_SCL_MODE_DEFAULT 0x00000000
-#define mmDSCL2_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_CONTROL_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_2TAP_CONTROL_DEFAULT 0x01000100
-#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL2_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL2_SCL_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL2_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL2_SCL_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmDSCL2_SCL_BLACK_OFFSET_DEFAULT 0x80000000
-#define mmDSCL2_DSCL_UPDATE_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_AUTOCAL_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmDSCL2_OTG_H_BLANK_DEFAULT 0x00000000
-#define mmDSCL2_OTG_V_BLANK_DEFAULT 0x00000000
-#define mmDSCL2_RECOUT_START_DEFAULT 0x00000000
-#define mmDSCL2_RECOUT_SIZE_DEFAULT 0x00000000
-#define mmDSCL2_MPC_SIZE_DEFAULT 0x00000000
-#define mmDSCL2_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmDSCL2_LB_MEMORY_CTRL_DEFAULT 0x00003f00
-#define mmDSCL2_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDSCL2_DSCL_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDSCL2_OBUF_CONTROL_DEFAULT 0xe0000000
-#define mmDSCL2_OBUF_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
-#define mmCM2_CM_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_COMA_C11_C12_DEFAULT 0x00002000
-#define mmCM2_CM_COMA_C13_C14_DEFAULT 0x00000000
-#define mmCM2_CM_COMA_C21_C22_DEFAULT 0x20000000
-#define mmCM2_CM_COMA_C23_C24_DEFAULT 0x00000000
-#define mmCM2_CM_COMA_C31_C32_DEFAULT 0x00000000
-#define mmCM2_CM_COMA_C33_C34_DEFAULT 0x00002000
-#define mmCM2_CM_COMB_C11_C12_DEFAULT 0x00002000
-#define mmCM2_CM_COMB_C13_C14_DEFAULT 0x00000000
-#define mmCM2_CM_COMB_C21_C22_DEFAULT 0x20000000
-#define mmCM2_CM_COMB_C23_C24_DEFAULT 0x00000000
-#define mmCM2_CM_COMB_C31_C32_DEFAULT 0x00000000
-#define mmCM2_CM_COMB_C33_C34_DEFAULT 0x00002000
-#define mmCM2_CM_IGAM_CONTROL_DEFAULT 0x08000002
-#define mmCM2_CM_IGAM_LUT_RW_CONTROL_DEFAULT 0x00011070
-#define mmCM2_CM_IGAM_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCM2_CM_IGAM_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCM2_CM_IGAM_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCM2_CM_IGAM_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCM2_CM_IGAM_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCM2_CM_IGAM_LUT_BW_OFFSET_BLUE_DEFAULT 0xffff0000
-#define mmCM2_CM_IGAM_LUT_BW_OFFSET_GREEN_DEFAULT 0xffff0000
-#define mmCM2_CM_IGAM_LUT_BW_OFFSET_RED_DEFAULT 0xffff0000
-#define mmCM2_CM_ICSC_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_ICSC_C11_C12_DEFAULT 0x00002000
-#define mmCM2_CM_ICSC_C13_C14_DEFAULT 0x00000000
-#define mmCM2_CM_ICSC_C21_C22_DEFAULT 0x20000000
-#define mmCM2_CM_ICSC_C23_C24_DEFAULT 0x00000000
-#define mmCM2_CM_ICSC_C31_C32_DEFAULT 0x00000000
-#define mmCM2_CM_ICSC_C33_C34_DEFAULT 0x00002000
-#define mmCM2_CM_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCM2_CM_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCM2_CM_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCM2_CM_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCM2_CM_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCM2_CM_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmCM2_CM_OCSC_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_OCSC_C11_C12_DEFAULT 0x00002000
-#define mmCM2_CM_OCSC_C13_C14_DEFAULT 0x00000000
-#define mmCM2_CM_OCSC_C21_C22_DEFAULT 0x20000000
-#define mmCM2_CM_OCSC_C23_C24_DEFAULT 0x00000000
-#define mmCM2_CM_OCSC_C31_C32_DEFAULT 0x00000000
-#define mmCM2_CM_OCSC_C33_C34_DEFAULT 0x00002000
-#define mmCM2_CM_BNS_VALUES_R_DEFAULT 0x20000000
-#define mmCM2_CM_BNS_VALUES_G_DEFAULT 0x20000000
-#define mmCM2_CM_BNS_VALUES_B_DEFAULT 0x20000000
-#define mmCM2_CM_DGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM2_CM_DGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM2_CM_DGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM2_CM_RGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_16_17_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_18_19_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_20_21_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_22_23_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_24_25_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_26_27_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_28_29_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_30_31_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMA_REGION_32_33_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_16_17_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_18_19_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_20_21_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_22_23_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_24_25_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_26_27_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_28_29_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_30_31_DEFAULT 0x00000000
-#define mmCM2_CM_RGAM_RAMB_REGION_32_33_DEFAULT 0x00000000
-#define mmCM2_CM_HDR_MULT_COEF_DEFAULT 0x0001f000
-#define mmCM2_CM_RANGE_CLAMP_CONTROL_R_DEFAULT 0xfbff7bff
-#define mmCM2_CM_RANGE_CLAMP_CONTROL_G_DEFAULT 0xfbff7bff
-#define mmCM2_CM_RANGE_CLAMP_CONTROL_B_DEFAULT 0xfbff7bff
-#define mmCM2_CM_DENORM_CONTROL_DEFAULT 0x00000000
-#define mmCM2_CM_CMOUT_CONTROL_DEFAULT 0x0000000a
-#define mmCM2_CM_CMOUT_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmCM2_CM_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCM2_CM_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON14_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON14_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON14_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
-#define mmDPP_TOP3_DPP_CONTROL_DEFAULT 0x70000000
-#define mmDPP_TOP3_DPP_SOFT_RESET_DEFAULT 0x00000000
-#define mmDPP_TOP3_DPP_CRC_VAL_R_G_DEFAULT 0x00000000
-#define mmDPP_TOP3_DPP_CRC_VAL_B_A_DEFAULT 0x00000000
-#define mmDPP_TOP3_DPP_CRC_CTRL_DEFAULT 0x00000000
-#define mmDPP_TOP3_HOST_READ_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
-#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_DEFAULT 0x00000008
-#define mmCNVC_CFG3_FORMAT_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG3_FCNV_FP_SCALE_BIAS_DEFAULT 0x00003c00
-#define mmCNVC_CFG3_DENORM_CONTROL_DEFAULT 0x00002000
-#define mmCNVC_CFG3_COLOR_KEYER_CONTROL_DEFAULT 0x00000000
-#define mmCNVC_CFG3_COLOR_KEYER_ALPHA_DEFAULT 0x00000000
-#define mmCNVC_CFG3_COLOR_KEYER_RED_DEFAULT 0x00000000
-#define mmCNVC_CFG3_COLOR_KEYER_GREEN_DEFAULT 0x00000000
-#define mmCNVC_CFG3_COLOR_KEYER_BLUE_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
-#define mmCNVC_CUR3_CURSOR0_CONTROL_DEFAULT 0x0003ff00
-#define mmCNVC_CUR3_CURSOR0_COLOR0_DEFAULT 0x00000000
-#define mmCNVC_CUR3_CURSOR0_COLOR1_DEFAULT 0x00000000
-#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_DEFAULT 0x00003c00
-
-
-// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
-#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT_DEFAULT 0x00000000
-#define mmDSCL3_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmDSCL3_SCL_MODE_DEFAULT 0x00000000
-#define mmDSCL3_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_CONTROL_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_2TAP_CONTROL_DEFAULT 0x01000100
-#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL3_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL3_SCL_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmDSCL3_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmDSCL3_SCL_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmDSCL3_SCL_BLACK_OFFSET_DEFAULT 0x80000000
-#define mmDSCL3_DSCL_UPDATE_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_AUTOCAL_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmDSCL3_OTG_H_BLANK_DEFAULT 0x00000000
-#define mmDSCL3_OTG_V_BLANK_DEFAULT 0x00000000
-#define mmDSCL3_RECOUT_START_DEFAULT 0x00000000
-#define mmDSCL3_RECOUT_SIZE_DEFAULT 0x00000000
-#define mmDSCL3_MPC_SIZE_DEFAULT 0x00000000
-#define mmDSCL3_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmDSCL3_LB_MEMORY_CTRL_DEFAULT 0x00003f00
-#define mmDSCL3_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDSCL3_DSCL_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDSCL3_OBUF_CONTROL_DEFAULT 0xe0000000
-#define mmDSCL3_OBUF_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
-#define mmCM3_CM_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_COMA_C11_C12_DEFAULT 0x00002000
-#define mmCM3_CM_COMA_C13_C14_DEFAULT 0x00000000
-#define mmCM3_CM_COMA_C21_C22_DEFAULT 0x20000000
-#define mmCM3_CM_COMA_C23_C24_DEFAULT 0x00000000
-#define mmCM3_CM_COMA_C31_C32_DEFAULT 0x00000000
-#define mmCM3_CM_COMA_C33_C34_DEFAULT 0x00002000
-#define mmCM3_CM_COMB_C11_C12_DEFAULT 0x00002000
-#define mmCM3_CM_COMB_C13_C14_DEFAULT 0x00000000
-#define mmCM3_CM_COMB_C21_C22_DEFAULT 0x20000000
-#define mmCM3_CM_COMB_C23_C24_DEFAULT 0x00000000
-#define mmCM3_CM_COMB_C31_C32_DEFAULT 0x00000000
-#define mmCM3_CM_COMB_C33_C34_DEFAULT 0x00002000
-#define mmCM3_CM_IGAM_CONTROL_DEFAULT 0x08000002
-#define mmCM3_CM_IGAM_LUT_RW_CONTROL_DEFAULT 0x00011070
-#define mmCM3_CM_IGAM_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCM3_CM_IGAM_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCM3_CM_IGAM_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCM3_CM_IGAM_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCM3_CM_IGAM_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCM3_CM_IGAM_LUT_BW_OFFSET_BLUE_DEFAULT 0xffff0000
-#define mmCM3_CM_IGAM_LUT_BW_OFFSET_GREEN_DEFAULT 0xffff0000
-#define mmCM3_CM_IGAM_LUT_BW_OFFSET_RED_DEFAULT 0xffff0000
-#define mmCM3_CM_ICSC_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_ICSC_C11_C12_DEFAULT 0x00002000
-#define mmCM3_CM_ICSC_C13_C14_DEFAULT 0x00000000
-#define mmCM3_CM_ICSC_C21_C22_DEFAULT 0x20000000
-#define mmCM3_CM_ICSC_C23_C24_DEFAULT 0x00000000
-#define mmCM3_CM_ICSC_C31_C32_DEFAULT 0x00000000
-#define mmCM3_CM_ICSC_C33_C34_DEFAULT 0x00002000
-#define mmCM3_CM_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCM3_CM_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCM3_CM_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCM3_CM_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCM3_CM_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCM3_CM_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmCM3_CM_OCSC_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_OCSC_C11_C12_DEFAULT 0x00002000
-#define mmCM3_CM_OCSC_C13_C14_DEFAULT 0x00000000
-#define mmCM3_CM_OCSC_C21_C22_DEFAULT 0x20000000
-#define mmCM3_CM_OCSC_C23_C24_DEFAULT 0x00000000
-#define mmCM3_CM_OCSC_C31_C32_DEFAULT 0x00000000
-#define mmCM3_CM_OCSC_C33_C34_DEFAULT 0x00002000
-#define mmCM3_CM_BNS_VALUES_R_DEFAULT 0x20000000
-#define mmCM3_CM_BNS_VALUES_G_DEFAULT 0x20000000
-#define mmCM3_CM_BNS_VALUES_B_DEFAULT 0x20000000
-#define mmCM3_CM_DGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM3_CM_DGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM3_CM_DGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_LUT_INDEX_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_LUT_DATA_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCM3_CM_RGAM_RAMA_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_0_1_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_2_3_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_4_5_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_6_7_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_8_9_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_10_11_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_12_13_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_14_15_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_16_17_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_18_19_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_20_21_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_22_23_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_24_25_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_26_27_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_28_29_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_30_31_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMA_REGION_32_33_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_START_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_START_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_START_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_SLOPE_CNTL_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_SLOPE_CNTL_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_SLOPE_CNTL_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL1_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL2_B_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL1_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL2_G_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL1_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_END_CNTL2_R_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_0_1_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_2_3_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_4_5_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_6_7_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_8_9_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_10_11_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_12_13_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_14_15_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_16_17_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_18_19_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_20_21_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_22_23_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_24_25_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_26_27_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_28_29_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_30_31_DEFAULT 0x00000000
-#define mmCM3_CM_RGAM_RAMB_REGION_32_33_DEFAULT 0x00000000
-#define mmCM3_CM_HDR_MULT_COEF_DEFAULT 0x0001f000
-#define mmCM3_CM_RANGE_CLAMP_CONTROL_R_DEFAULT 0xfbff7bff
-#define mmCM3_CM_RANGE_CLAMP_CONTROL_G_DEFAULT 0xfbff7bff
-#define mmCM3_CM_RANGE_CLAMP_CONTROL_B_DEFAULT 0xfbff7bff
-#define mmCM3_CM_DENORM_CONTROL_DEFAULT 0x00000000
-#define mmCM3_CM_CMOUT_CONTROL_DEFAULT 0x0000000a
-#define mmCM3_CM_CMOUT_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmCM3_CM_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmCM3_CM_MEM_PWR_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON15_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON15_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON15_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpcc0_dispdec
-#define mmMPCC0_MPCC_TOP_SEL_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_BOT_SEL_DEFAULT 0x0000000f
-#define mmMPCC0_MPCC_OPP_ID_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_CONTROL_DEFAULT 0xffff0061
-#define mmMPCC0_MPCC_SM_CONTROL_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_UPDATE_LOCK_SEL_DEFAULT 0x0000000f
-#define mmMPCC0_MPCC_TOP_OFFSET_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_BOT_OFFSET_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_OFFSET_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_BG_R_CR_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_BG_G_Y_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_BG_B_CB_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_STALL_STATUS_DEFAULT 0x00000000
-#define mmMPCC0_MPCC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpcc1_dispdec
-#define mmMPCC1_MPCC_TOP_SEL_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_BOT_SEL_DEFAULT 0x0000000f
-#define mmMPCC1_MPCC_OPP_ID_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_CONTROL_DEFAULT 0xffff0061
-#define mmMPCC1_MPCC_SM_CONTROL_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_UPDATE_LOCK_SEL_DEFAULT 0x0000000f
-#define mmMPCC1_MPCC_TOP_OFFSET_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_BOT_OFFSET_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_OFFSET_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_BG_R_CR_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_BG_G_Y_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_BG_B_CB_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_STALL_STATUS_DEFAULT 0x00000000
-#define mmMPCC1_MPCC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpcc2_dispdec
-#define mmMPCC2_MPCC_TOP_SEL_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_BOT_SEL_DEFAULT 0x0000000f
-#define mmMPCC2_MPCC_OPP_ID_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_CONTROL_DEFAULT 0xffff0061
-#define mmMPCC2_MPCC_SM_CONTROL_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_UPDATE_LOCK_SEL_DEFAULT 0x0000000f
-#define mmMPCC2_MPCC_TOP_OFFSET_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_BOT_OFFSET_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_OFFSET_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_BG_R_CR_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_BG_G_Y_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_BG_B_CB_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_STALL_STATUS_DEFAULT 0x00000000
-#define mmMPCC2_MPCC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpcc3_dispdec
-#define mmMPCC3_MPCC_TOP_SEL_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_BOT_SEL_DEFAULT 0x0000000f
-#define mmMPCC3_MPCC_OPP_ID_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_CONTROL_DEFAULT 0xffff0061
-#define mmMPCC3_MPCC_SM_CONTROL_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_UPDATE_LOCK_SEL_DEFAULT 0x0000000f
-#define mmMPCC3_MPCC_TOP_OFFSET_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_BOT_OFFSET_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_OFFSET_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_BG_R_CR_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_BG_G_Y_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_BG_B_CB_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_STALL_STATUS_DEFAULT 0x00000000
-#define mmMPCC3_MPCC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
-#define mmMPC_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmMPC_SOFT_RESET_DEFAULT 0x00000000
-#define mmMPC_CRC_CTRL_DEFAULT 0x00000000
-#define mmMPC_CRC_SEL_CONTROL_DEFAULT 0x00000000
-#define mmMPC_CRC_RESULT_AR_DEFAULT 0x00000000
-#define mmMPC_CRC_RESULT_GB_DEFAULT 0x00000000
-#define mmMPC_CRC_RESULT_C_DEFAULT 0x00000000
-#define mmMPC_PERFMON_EVENT_CTRL_DEFAULT 0x00000000
-#define mmMPC_BYPASS_BG_AR_DEFAULT 0x00000000
-#define mmMPC_BYPASS_BG_GB_DEFAULT 0x00000000
-#define mmMPC_OUT0_MUX_DEFAULT 0x0000000f
-#define mmMPC_OUT1_MUX_DEFAULT 0x0000000f
-#define mmMPC_OUT2_MUX_DEFAULT 0x0000000f
-#define mmMPC_OUT3_MUX_DEFAULT 0x0000000f
-#define mmMPC_STALL_GRACE_WINDOW_DEFAULT 0x00000000
-#define mmADR_CFG_VUPDATE_LOCK_SET0_DEFAULT 0x00000000
-#define mmADR_VUPDATE_LOCK_SET0_DEFAULT 0x00000000
-#define mmCUR0_VUPDATE_LOCK_SET0_DEFAULT 0x00000000
-#define mmCUR1_VUPDATE_LOCK_SET0_DEFAULT 0x00000000
-#define mmADR_CFG_VUPDATE_LOCK_SET1_DEFAULT 0x00000000
-#define mmADR_VUPDATE_LOCK_SET1_DEFAULT 0x00000000
-#define mmCUR0_VUPDATE_LOCK_SET1_DEFAULT 0x00000000
-#define mmCUR1_VUPDATE_LOCK_SET1_DEFAULT 0x00000000
-#define mmADR_CFG_VUPDATE_LOCK_SET2_DEFAULT 0x00000000
-#define mmADR_VUPDATE_LOCK_SET2_DEFAULT 0x00000000
-#define mmCUR0_VUPDATE_LOCK_SET2_DEFAULT 0x00000000
-#define mmCUR1_VUPDATE_LOCK_SET2_DEFAULT 0x00000000
-#define mmADR_CFG_VUPDATE_LOCK_SET3_DEFAULT 0x00000000
-#define mmADR_VUPDATE_LOCK_SET3_DEFAULT 0x00000000
-#define mmCUR0_VUPDATE_LOCK_SET3_DEFAULT 0x00000000
-#define mmCUR1_VUPDATE_LOCK_SET3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mpc_mpc_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON16_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON16_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON16_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_abm0_dispdec
-#define mmABM0_BL1_PWM_AMBIENT_LIGHT_LEVEL_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_USER_LEVEL_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_TARGET_ABM_LEVEL_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_CURRENT_ABM_LEVEL_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_FINAL_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_MINIMUM_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_ABM_CNTL_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_BL_UPDATE_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM0_BL1_PWM_GRP2_REG_LOCK_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_CNTL_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_IPCSC_COEFF_SEL_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_ACE_OFFSET_SLOPE_0_DEFAULT 0x00000400
-#define mmABM0_DC_ABM1_ACE_OFFSET_SLOPE_1_DEFAULT 0x00000400
-#define mmABM0_DC_ABM1_ACE_OFFSET_SLOPE_2_DEFAULT 0x00000400
-#define mmABM0_DC_ABM1_ACE_OFFSET_SLOPE_3_DEFAULT 0x00000400
-#define mmABM0_DC_ABM1_ACE_OFFSET_SLOPE_4_DEFAULT 0x00000400
-#define mmABM0_DC_ABM1_ACE_THRES_12_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_ACE_THRES_34_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_ACE_CNTL_MISC_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HGLS_REG_READ_PROGRESS_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_MISC_CTRL_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_SUM_OF_LUMA_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_PIXEL_COUNT_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_LS_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_1_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_2_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_3_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_4_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_5_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_6_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_7_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_8_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_9_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_10_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_11_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_12_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_13_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_14_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_15_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_16_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_17_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_18_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_19_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_20_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_21_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_22_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_23_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_HG_RESULT_24_DEFAULT 0x00000000
-#define mmABM0_DC_ABM1_BL_MASTER_LOCK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_abm1_dispdec
-#define mmABM1_BL1_PWM_AMBIENT_LIGHT_LEVEL_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_USER_LEVEL_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_TARGET_ABM_LEVEL_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_CURRENT_ABM_LEVEL_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_FINAL_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_MINIMUM_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_ABM_CNTL_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_BL_UPDATE_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM1_BL1_PWM_GRP2_REG_LOCK_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_CNTL_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_IPCSC_COEFF_SEL_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_ACE_OFFSET_SLOPE_0_DEFAULT 0x00000400
-#define mmABM1_DC_ABM1_ACE_OFFSET_SLOPE_1_DEFAULT 0x00000400
-#define mmABM1_DC_ABM1_ACE_OFFSET_SLOPE_2_DEFAULT 0x00000400
-#define mmABM1_DC_ABM1_ACE_OFFSET_SLOPE_3_DEFAULT 0x00000400
-#define mmABM1_DC_ABM1_ACE_OFFSET_SLOPE_4_DEFAULT 0x00000400
-#define mmABM1_DC_ABM1_ACE_THRES_12_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_ACE_THRES_34_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_ACE_CNTL_MISC_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HGLS_REG_READ_PROGRESS_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_MISC_CTRL_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_SUM_OF_LUMA_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_FILTERED_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_PIXEL_COUNT_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_LS_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_BIN_1_32_SHIFT_FLAG_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_BIN_1_8_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_BIN_9_16_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_BIN_17_24_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_BIN_25_32_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_1_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_2_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_3_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_4_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_5_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_6_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_7_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_8_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_9_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_10_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_11_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_12_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_13_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_14_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_15_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_16_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_17_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_18_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_19_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_20_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_21_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_22_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_23_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_HG_RESULT_24_DEFAULT 0x00000000
-#define mmABM1_DC_ABM1_BL_MASTER_LOCK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt0_dispdec
-#define mmFMT0_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT0_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT0_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT0_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT0_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT0_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT0_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT0_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT0_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT0_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf0_dispdec
-#define mmOPPBUF0_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe0_dispdec
-#define mmOPP_PIPE0_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
-#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt1_dispdec
-#define mmFMT1_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT1_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT1_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT1_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT1_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT1_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT1_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT1_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT1_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT1_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf1_dispdec
-#define mmOPPBUF1_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe1_dispdec
-#define mmOPP_PIPE1_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
-#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt2_dispdec
-#define mmFMT2_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT2_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT2_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT2_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT2_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT2_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT2_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT2_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT2_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT2_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT2_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf2_dispdec
-#define mmOPPBUF2_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF2_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe2_dispdec
-#define mmOPP_PIPE2_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc2_dispdec
-#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC2_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt3_dispdec
-#define mmFMT3_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT3_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT3_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT3_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT3_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT3_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT3_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT3_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT3_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT3_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT3_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf3_dispdec
-#define mmOPPBUF3_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF3_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe3_dispdec
-#define mmOPP_PIPE3_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc3_dispdec
-#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC3_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt4_dispdec
-#define mmFMT4_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT4_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT4_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT4_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT4_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT4_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT4_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT4_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT4_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT4_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT4_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf4_dispdec
-#define mmOPPBUF4_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF4_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe4_dispdec
-#define mmOPP_PIPE4_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc4_dispdec
-#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC4_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_fmt5_dispdec
-#define mmFMT5_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT5_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT5_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT5_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT5_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT5_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT5_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT5_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT5_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT5_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT5_FMT_MAP420_MEMORY_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_oppbuf5_dispdec
-#define mmOPPBUF5_OPPBUF_CONTROL_DEFAULT 0x00000000
-#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_0_DEFAULT 0x00000000
-#define mmOPPBUF5_OPPBUF_3D_PARAMETERS_1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe5_dispdec
-#define mmOPP_PIPE5_OPP_PIPE_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_pipe_crc5_dispdec
-#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_CONTROL_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_MASK_DEFAULT 0x0000ffff
-#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT0_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT1_DEFAULT 0x00000000
-#define mmOPP_PIPE_CRC5_OPP_PIPE_CRC_RESULT2_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_top_dispdec
-#define mmOPP_TOP_CLK_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_opp_opp_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON17_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON17_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON17_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm0_dispdec
-#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM0_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM0_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM0_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm1_dispdec
-#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM1_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM1_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM1_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm2_dispdec
-#define mmODM2_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM2_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM2_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM2_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm3_dispdec
-#define mmODM3_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM3_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM3_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM3_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm4_dispdec
-#define mmODM4_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM4_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM4_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM4_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_odm5_dispdec
-#define mmODM5_OPTC_INPUT_GLOBAL_CONTROL_DEFAULT 0x00000000
-#define mmODM5_OPTC_DATA_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmODM5_OPTC_INPUT_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmODM5_OPTC_INPUT_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg0_dispdec
-#define mmOTG0_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG0_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG0_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG0_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG0_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG0_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG0_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG0_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG0_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG0_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG0_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG0_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG0_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG0_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG0_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG0_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG0_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG0_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG0_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG0_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG0_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG0_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG0_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG0_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG0_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG0_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG0_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG0_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG0_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG0_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG0_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG0_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG0_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG0_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG0_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG0_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG0_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG0_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg1_dispdec
-#define mmOTG1_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG1_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG1_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG1_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG1_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG1_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG1_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG1_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG1_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG1_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG1_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG1_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG1_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG1_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG1_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG1_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG1_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG1_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG1_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG1_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG1_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG1_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG1_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG1_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG1_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG1_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG1_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG1_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG1_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG1_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG1_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG1_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG1_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG1_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG1_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG1_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG1_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG1_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg2_dispdec
-#define mmOTG2_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG2_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG2_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG2_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG2_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG2_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG2_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG2_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG2_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG2_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG2_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG2_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG2_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG2_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG2_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG2_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG2_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG2_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG2_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG2_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG2_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG2_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG2_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG2_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG2_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG2_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG2_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG2_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG2_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG2_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG2_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG2_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG2_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG2_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG2_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG2_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG2_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG2_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG2_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG2_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG2_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG2_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG2_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG2_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG2_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg3_dispdec
-#define mmOTG3_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG3_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG3_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG3_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG3_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG3_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG3_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG3_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG3_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG3_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG3_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG3_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG3_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG3_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG3_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG3_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG3_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG3_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG3_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG3_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG3_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG3_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG3_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG3_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG3_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG3_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG3_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG3_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG3_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG3_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG3_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG3_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG3_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG3_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG3_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG3_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG3_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG3_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG3_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG3_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG3_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG3_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG3_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG3_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG3_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg4_dispdec
-#define mmOTG4_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG4_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG4_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG4_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG4_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG4_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG4_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG4_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG4_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG4_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG4_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG4_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG4_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG4_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG4_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG4_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG4_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG4_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG4_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG4_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG4_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG4_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG4_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG4_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG4_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG4_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG4_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG4_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG4_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG4_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG4_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG4_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG4_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG4_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG4_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG4_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG4_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG4_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG4_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG4_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG4_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG4_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG4_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG4_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG4_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_otg5_dispdec
-#define mmOTG5_OTG_H_TOTAL_DEFAULT 0x00000000
-#define mmOTG5_OTG_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG5_OTG_H_SYNC_A_DEFAULT 0x00000000
-#define mmOTG5_OTG_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_H_TIMING_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_MID_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_SYNC_A_DEFAULT 0x00000000
-#define mmOTG5_OTG_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG5_OTG_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmOTG5_OTG_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmOTG5_OTG_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmOTG5_OTG_CONTROL_DEFAULT 0x80000110
-#define mmOTG5_OTG_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_PIPE_ABORT_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmOTG5_OTG_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmOTG5_OTG_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_STATUS_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmOTG5_OTG_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmOTG5_OTG_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmOTG5_OTG_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_COUNT_RESET_DEFAULT 0x00000000
-#define mmOTG5_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_STEREO_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmOTG5_OTG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG5_OTG_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmOTG5_OTG_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmOTG5_OTG_MASTER_EN_DEFAULT 0x00000000
-#define mmOTG5_OTG_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmOTG5_OTG_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG5_OTG_BLACK_COLOR_DEFAULT 0x00000000
-#define mmOTG5_OTG_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmOTG5_OTG_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC_CNTL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC2_DATA_RG_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC2_DATA_B_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC3_DATA_RG_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC3_DATA_B_DEFAULT 0x00000000
-#define mmOTG5_OTG_CRC_SIG_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmOTG5_OTG_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0xffffffff
-#define mmOTG5_OTG_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmOTG5_OTG_3D_STRUCTURE_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmOTG5_OTG_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmOTG5_OTG_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_VSTARTUP_PARAM_DEFAULT 0x00000000
-#define mmOTG5_OTG_VUPDATE_PARAM_DEFAULT 0x00010000
-#define mmOTG5_OTG_VREADY_PARAM_DEFAULT 0x00000000
-#define mmOTG5_OTG_GLOBAL_SYNC_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_MASTER_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmOTG5_OTG_GSL_CONTROL_DEFAULT 0x00020000
-#define mmOTG5_OTG_GSL_WINDOW_X_DEFAULT 0x00000000
-#define mmOTG5_OTG_GSL_WINDOW_Y_DEFAULT 0x00000000
-#define mmOTG5_OTG_VUPDATE_KEEPOUT_DEFAULT 0x00000000
-#define mmOTG5_OTG_GLOBAL_CONTROL0_DEFAULT 0x00000000
-#define mmOTG5_OTG_GLOBAL_CONTROL1_DEFAULT 0x00000000
-#define mmOTG5_OTG_GLOBAL_CONTROL2_DEFAULT 0x00000000
-#define mmOTG5_OTG_GLOBAL_CONTROL3_DEFAULT 0x00000000
-#define mmOTG5_OTG_TRIG_MANUAL_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_MANUAL_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmOTG5_OTG_DRR_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_REQUEST_CONTROL_DEFAULT 0x00000000
-#define mmOTG5_OTG_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_optc_misc_dispdec
-#define mmDWB_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmGSL_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmOPTC_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmOPTC_MISC_SPARE_REGISTER_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_optc_optc_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON18_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON18_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON18_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dac_dispdec
-#define mmDAC_ENABLE_DEFAULT 0x00000004
-#define mmDAC_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmDAC_CRC_EN_DEFAULT 0x00000000
-#define mmDAC_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDAC_CRC_SIG_RGB_MASK_DEFAULT 0x3fffffff
-#define mmDAC_CRC_SIG_CONTROL_MASK_DEFAULT 0x0000003f
-#define mmDAC_CRC_SIG_RGB_DEFAULT 0x3fffffff
-#define mmDAC_CRC_SIG_CONTROL_DEFAULT 0x0000003f
-#define mmDAC_SYNC_TRISTATE_CONTROL_DEFAULT 0x00000000
-#define mmDAC_STEREOSYNC_SELECT_DEFAULT 0x00000000
-#define mmDAC_AUTODETECT_CONTROL_DEFAULT 0x00070000
-#define mmDAC_AUTODETECT_CONTROL2_DEFAULT 0x0000000b
-#define mmDAC_AUTODETECT_CONTROL3_DEFAULT 0x00000519
-#define mmDAC_AUTODETECT_STATUS_DEFAULT 0x00000000
-#define mmDAC_AUTODETECT_INT_CONTROL_DEFAULT 0x00000000
-#define mmDAC_FORCE_OUTPUT_CNTL_DEFAULT 0x00000000
-#define mmDAC_FORCE_DATA_DEFAULT 0x000001e6
-#define mmDAC_POWERDOWN_DEFAULT 0x01010100
-#define mmDAC_CONTROL_DEFAULT 0x00000000
-#define mmDAC_COMPARATOR_ENABLE_DEFAULT 0x00000000
-#define mmDAC_COMPARATOR_OUTPUT_DEFAULT 0x00000000
-#define mmDAC_PWR_CNTL_DEFAULT 0x00000000
-#define mmDAC_DFT_CONFIG_DEFAULT 0x00000000
-#define mmDAC_FIFO_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dout_i2c_dispdec
-#define mmDC_I2C_CONTROL_DEFAULT 0x00000000
-#define mmDC_I2C_ARBITRATION_DEFAULT 0x00000001
-#define mmDC_I2C_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDC_I2C_SW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC1_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC2_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC3_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC4_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC5_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC6_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC1_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC1_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC2_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC2_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC3_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC3_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC4_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC4_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC5_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC5_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC6_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC6_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION0_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION1_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION2_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION3_DEFAULT 0x00000000
-#define mmDC_I2C_DATA_DEFAULT 0x00000000
-#define mmDC_I2C_DDCVGA_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDCVGA_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDCVGA_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_EDID_DETECT_CTRL_DEFAULT 0x004001f4
-#define mmDC_I2C_READ_REQUEST_INTERRUPT_DEFAULT 0x40000000
-
-
-// addressBlock: dce_dc_dio_generic_i2c_dispdec
-#define mmGENERIC_I2C_CONTROL_DEFAULT 0x00000000
-#define mmGENERIC_I2C_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmGENERIC_I2C_STATUS_DEFAULT 0x00000000
-#define mmGENERIC_I2C_SPEED_DEFAULT 0x00000002
-#define mmGENERIC_I2C_SETUP_DEFAULT 0x00000000
-#define mmGENERIC_I2C_TRANSACTION_DEFAULT 0x00000000
-#define mmGENERIC_I2C_DATA_DEFAULT 0x00000000
-#define mmGENERIC_I2C_PIN_SELECTION_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dio_misc_dispdec
-#define mmDIO_SCRATCH0_DEFAULT 0x00000000
-#define mmDIO_SCRATCH1_DEFAULT 0x00000000
-#define mmDIO_SCRATCH2_DEFAULT 0x00000000
-#define mmDIO_SCRATCH3_DEFAULT 0x00000000
-#define mmDIO_SCRATCH4_DEFAULT 0x00000000
-#define mmDIO_SCRATCH5_DEFAULT 0x00000000
-#define mmDIO_SCRATCH6_DEFAULT 0x00000000
-#define mmDIO_SCRATCH7_DEFAULT 0x00000000
-#define mmDCE_VCE_CONTROL_DEFAULT 0x00000000
-#define mmDIO_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDIO_MEM_PWR_CTRL_DEFAULT 0x6db6d800
-#define mmDIO_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDIO_CLK_CNTL_DEFAULT 0x00000000
-#define mmDIO_POWER_MANAGEMENT_CNTL_DEFAULT 0x00000000
-#define mmDIO_STEREOSYNC_SEL_DEFAULT 0x00000000
-#define mmDIO_SOFT_RESET_DEFAULT 0x00000000
-#define mmDIG_SOFT_RESET_DEFAULT 0x00000000
-#define mmDIO_MEM_PWR_STATUS1_DEFAULT 0x00000000
-#define mmDIO_CLK_CNTL2_DEFAULT 0x00000000
-#define mmDIO_CLK_CNTL3_DEFAULT 0x00000000
-#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL_DEFAULT 0x00000000
-#define mmDIO_PSP_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIO_PSP_INTERRUPT_CLEAR_DEFAULT 0x00000000
-#define mmDIO_GENERIC_INTERRUPT_MESSAGE_DEFAULT 0x00000000
-#define mmDIO_GENERIC_INTERRUPT_CLEAR_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd0_dispdec
-#define mmHPD0_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd1_dispdec
-#define mmHPD1_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd2_dispdec
-#define mmHPD2_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd3_dispdec
-#define mmHPD3_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd4_dispdec
-#define mmHPD4_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_hpd5_dispdec
-#define mmHPD5_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD5_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dio_dcperfmon_dc_perfmon_dispdec
-#define mmDC_PERFMON19_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON19_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON19_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux0_dispdec
-#define mmDP_AUX0_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX0_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux1_dispdec
-#define mmDP_AUX1_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX1_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux2_dispdec
-#define mmDP_AUX2_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX2_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX2_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux3_dispdec
-#define mmDP_AUX3_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX3_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX3_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux4_dispdec
-#define mmDP_AUX4_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX4_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX4_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux5_dispdec
-#define mmDP_AUX5_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX5_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX5_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX5_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp_aux6_dispdec
-#define mmDP_AUX6_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX6_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX6_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX6_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX6_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX6_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX6_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig0_dispdec
-#define mmDIG0_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG0_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG0_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG0_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG0_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG0_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG0_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG0_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG0_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG0_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG0_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG0_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG0_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG0_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG0_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG0_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG0_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG0_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG0_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp0_dispdec
-#define mmDP0_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP0_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP0_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP0_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP0_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP0_DP_VID_N_DEFAULT 0x00002000
-#define mmDP0_DP_VID_M_DEFAULT 0x00000000
-#define mmDP0_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP0_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP0_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP0_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP0_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP0_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP0_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP0_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP0_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP0_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP0_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP0_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP0_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP0_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP0_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig1_dispdec
-#define mmDIG1_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG1_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG1_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG1_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG1_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG1_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG1_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG1_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG1_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG1_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG1_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG1_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG1_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG1_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG1_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG1_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG1_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG1_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG1_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp1_dispdec
-#define mmDP1_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP1_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP1_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP1_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP1_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP1_DP_VID_N_DEFAULT 0x00002000
-#define mmDP1_DP_VID_M_DEFAULT 0x00000000
-#define mmDP1_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP1_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP1_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP1_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP1_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP1_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP1_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP1_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP1_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP1_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP1_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP1_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP1_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP1_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP1_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig2_dispdec
-#define mmDIG2_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG2_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG2_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG2_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG2_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG2_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG2_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG2_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG2_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG2_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG2_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG2_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG2_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG2_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG2_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG2_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG2_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG2_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG2_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG2_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp2_dispdec
-#define mmDP2_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP2_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP2_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP2_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP2_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP2_DP_VID_N_DEFAULT 0x00002000
-#define mmDP2_DP_VID_M_DEFAULT 0x00000000
-#define mmDP2_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP2_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP2_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP2_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP2_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP2_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP2_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP2_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP2_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP2_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP2_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP2_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP2_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP2_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP2_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig3_dispdec
-#define mmDIG3_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG3_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG3_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG3_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG3_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG3_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG3_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG3_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG3_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG3_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG3_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG3_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG3_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG3_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG3_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG3_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG3_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG3_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG3_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG3_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp3_dispdec
-#define mmDP3_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP3_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP3_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP3_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP3_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP3_DP_VID_N_DEFAULT 0x00002000
-#define mmDP3_DP_VID_M_DEFAULT 0x00000000
-#define mmDP3_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP3_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP3_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP3_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP3_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP3_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP3_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP3_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP3_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP3_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP3_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP3_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP3_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP3_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP3_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig4_dispdec
-#define mmDIG4_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG4_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG4_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG4_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG4_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG4_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG4_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG4_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG4_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG4_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG4_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG4_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG4_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG4_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG4_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG4_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG4_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG4_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG4_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG4_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp4_dispdec
-#define mmDP4_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP4_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP4_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP4_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP4_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP4_DP_VID_N_DEFAULT 0x00002000
-#define mmDP4_DP_VID_M_DEFAULT 0x00000000
-#define mmDP4_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP4_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP4_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP4_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP4_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP4_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP4_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP4_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP4_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP4_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP4_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP4_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP4_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP4_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP4_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig5_dispdec
-#define mmDIG5_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG5_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG5_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG5_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG5_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG5_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG5_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG5_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG5_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG5_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG5_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG5_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG5_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG5_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG5_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG5_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG5_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG5_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG5_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG5_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp5_dispdec
-#define mmDP5_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP5_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP5_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP5_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP5_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP5_DP_VID_N_DEFAULT 0x00002000
-#define mmDP5_DP_VID_M_DEFAULT 0x00000000
-#define mmDP5_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP5_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP5_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP5_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP5_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP5_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP5_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP5_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP5_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP5_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP5_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP5_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP5_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP5_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP5_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP5_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dig6_dispdec
-#define mmDIG6_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG6_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG6_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG6_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG6_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG6_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG6_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG6_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG6_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG6_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL3_DEFAULT 0x00000000
-#define mmDIG6_HDMI_DB_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG6_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG6_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG6_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG6_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG6_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG6_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG6_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG6_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG6_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG6_AFMT_CNTL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_VBI_PACKET_CONTROL1_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dio_dp6_dispdec
-#define mmDP6_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP6_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP6_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP6_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP6_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP6_DP_VID_N_DEFAULT 0x00002000
-#define mmDP6_DP_VID_M_DEFAULT 0x00000000
-#define mmDP6_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP6_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP6_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP6_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP6_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP6_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP6_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP6_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP6_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP6_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP6_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP6_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP6_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_TIMING_PARAM1_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_TIMING_PARAM2_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_TIMING_PARAM3_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_TIMING_PARAM4_DEFAULT 0x00000000
-#define mmDP6_DP_MSO_CNTL_DEFAULT 0xfffffff0
-#define mmDP6_DP_MSO_CNTL1_DEFAULT 0xffffffff
-#define mmDP6_DP_DSC_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL2_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL3_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL4_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL5_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL6_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL7_DEFAULT 0x00000000
-#define mmDP6_DP_DB_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_VBID_MISC_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_dcio_dispdec
-#define mmDC_GENERICA_DEFAULT 0x00000000
-#define mmDC_GENERICB_DEFAULT 0x00000000
-#define mmDC_REF_CLK_CNTL_DEFAULT 0x00000000
-#define mmDC_GPIO_DEBUG_DEFAULT 0x00000101
-#define mmUNIPHYA_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYA_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYB_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYB_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYC_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYC_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYD_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYD_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYE_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYE_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYF_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYF_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYG_LINK_CNTL_DEFAULT 0x01000100
-#define mmUNIPHYG_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmDCIO_WRCMD_DELAY_DEFAULT 0x00033333
-#define mmDC_DVODATA_CONFIG_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_CNTL_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_STATE_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_REF_DIV_DEFAULT 0x00010000
-#define mmLVTMA_PWRSEQ_DELAY1_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_DELAY2_DEFAULT 0x00000000
-#define mmBL_PWM_CNTL_DEFAULT 0x00000000
-#define mmBL_PWM_CNTL2_DEFAULT 0x00000000
-#define mmBL_PWM_PERIOD_CNTL_DEFAULT 0x00000001
-#define mmBL_PWM_GRP1_REG_LOCK_DEFAULT 0x00000000
-#define mmDCIO_GSL_GENLK_PAD_CNTL_DEFAULT 0x00000000
-#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL_DEFAULT 0x00000000
-#define mmDCIO_CLOCK_CNTL_DEFAULT 0x00000000
-#define mmDIO_OTG_EXT_VSYNC_CNTL_DEFAULT 0x00000000
-#define mmDCIO_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCIO_DPHY_SEL_DEFAULT 0x000000e4
-#define mmUNIPHY_IMPCAL_LINKA_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKB_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_PERIOD_DEFAULT 0x00000000
-#define mmAUXP_IMPCAL_DEFAULT 0x0a000000
-#define mmAUXN_IMPCAL_DEFAULT 0x04000000
-#define mmDCIO_IMPCAL_CNTL_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_AB_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_LINKC_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKD_DEFAULT 0x0f000000
-#define mmDCIO_IMPCAL_CNTL_CD_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_CD_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_LINKE_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKF_DEFAULT 0x0f000000
-#define mmDCIO_IMPCAL_CNTL_EF_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_EF_DEFAULT 0x00000000
-#define mmDCIO_DPCS_TX_INTERRUPT_DEFAULT 0x00000000
-#define mmDCIO_DPCS_RX_INTERRUPT_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE0_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE1_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE2_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE3_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE4_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE5_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE6_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE7_DEFAULT 0x00000000
-#define mmDCIO_USBC_FLIP_EN_SEL_DEFAULT 0x00543210
-
-
-// addressBlock: dce_dc_dcio_dcio_chip_dispdec
-#define mmDC_GPIO_GENERIC_MASK_DEFAULT 0x04444444
-#define mmDC_GPIO_GENERIC_A_DEFAULT 0x00000000
-#define mmDC_GPIO_GENERIC_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_GENERIC_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC1_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC2_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC3_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC4_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC5_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC6_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDCVGA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_MASK_DEFAULT 0x00004040
-#define mmDC_GPIO_SYNCA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_MASK_DEFAULT 0x10101a10
-#define mmDC_GPIO_GENLK_A_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_HPD_MASK_DEFAULT 0x44440440
-#define mmDC_GPIO_HPD_A_DEFAULT 0x00000000
-#define mmDC_GPIO_HPD_EN_DEFAULT 0x22220202
-#define mmDC_GPIO_HPD_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_MASK_DEFAULT 0x66404040
-#define mmDC_GPIO_PWRSEQ_A_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_PAD_STRENGTH_1_DEFAULT 0x47fc470f
-#define mmDC_GPIO_PAD_STRENGTH_2_DEFAULT 0x00472147
-#define mmPHY_AUX_CNTL_DEFAULT 0x00010001
-#define mmDC_GPIO_I2CPAD_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_A_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_STRENGTH_DEFAULT 0x0000004c
-#define mmDVO_STRENGTH_CONTROL_DEFAULT 0x31116060
-#define mmDVO_VREF_CONTROL_DEFAULT 0x00000000
-#define mmDVO_SKEW_ADJUST_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_A_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_EN_DEFAULT 0x00008000
-#define mmDC_GPIO_I2S_SPDIF_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_STRENGTH_DEFAULT 0x01021202
-#define mmDC_GPIO_TX12_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_AUX_CTRL_0_DEFAULT 0x00000000
-#define mmDC_GPIO_AUX_CTRL_1_DEFAULT 0x00500000
-#define mmDC_GPIO_AUX_CTRL_2_DEFAULT 0x00000000
-#define mmDC_GPIO_RXEN_DEFAULT 0x007fff7f
-#define mmDC_GPIO_PULLUPEN_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_dcio_dac_dispdec
-#define mmDAC_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDAC_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDAC_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDAC_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_dcio_uniphy0_dispdec
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophycmregs0_dispdec
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE3_DEFAULT 0x1c010000
-#define mmDC_COMBOPHYCMREGS0_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS0_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS0_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS0_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophytxregs0_dispdec
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophypllregs0_dispdec
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS0_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS0_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS0_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_DFT_OUT_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL_DEFAULT 0x00010520
-
-
-// addressBlock: dce_dc_dcio_dcio_uniphy1_dispdec
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophycmregs1_dispdec
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE3_DEFAULT 0x1c010000
-#define mmDC_COMBOPHYCMREGS1_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS1_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS1_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS1_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophytxregs1_dispdec
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophypllregs1_dispdec
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS1_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS1_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS1_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_DFT_OUT_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL_DEFAULT 0x00010520
-
-
-// addressBlock: dce_dc_dcio_dcio_uniphy2_dispdec
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophycmregs2_dispdec
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE3_DEFAULT 0x1c010000
-#define mmDC_COMBOPHYCMREGS2_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS2_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS2_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS2_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophytxregs2_dispdec
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophypllregs2_dispdec
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS2_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS2_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS2_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_DFT_OUT_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL_DEFAULT 0x00010520
-
-
-// addressBlock: dce_dc_dcio_dcio_uniphy3_dispdec
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophycmregs3_dispdec
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE3_DEFAULT 0x1c010000
-#define mmDC_COMBOPHYCMREGS3_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS3_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS3_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS3_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophytxregs3_dispdec
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_combophy_dc_combophypllregs3_dispdec
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS3_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS3_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS3_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_DFT_OUT_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL_DEFAULT 0x00010520
-
-
-// addressBlock: dce_dc_dcio_dcio_zcal_dispdec
-#define mmZCAL_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_zcal_dc_zcalregs_dispdec
-#define mmCOMP_EN_CTL_DEFAULT 0x00080000
-#define mmCOMP_EN_DFX_DEFAULT 0x00000000
-#define mmZCAL_FUSES_DEFAULT 0x00000000
-
-
-// addressBlock: vga_vgaseqind
-#define ixSEQ00_DEFAULT 0x00000003
-#define ixSEQ01_DEFAULT 0x00000021
-#define ixSEQ02_DEFAULT 0x00000000
-#define ixSEQ03_DEFAULT 0x00000000
-#define ixSEQ04_DEFAULT 0x00000000
-
-
-// addressBlock: vga_vgacrtind
-#define ixCRT00_DEFAULT 0x00000000
-#define ixCRT01_DEFAULT 0x00000000
-#define ixCRT02_DEFAULT 0x00000000
-#define ixCRT03_DEFAULT 0x00000000
-#define ixCRT04_DEFAULT 0x00000000
-#define ixCRT05_DEFAULT 0x00000000
-#define ixCRT06_DEFAULT 0x00000000
-#define ixCRT07_DEFAULT 0x00000000
-#define ixCRT08_DEFAULT 0x00000000
-#define ixCRT09_DEFAULT 0x00000000
-#define ixCRT0A_DEFAULT 0x00000000
-#define ixCRT0B_DEFAULT 0x00000000
-#define ixCRT0C_DEFAULT 0x00000000
-#define ixCRT0D_DEFAULT 0x00000000
-#define ixCRT0E_DEFAULT 0x00000000
-#define ixCRT0F_DEFAULT 0x00000000
-#define ixCRT10_DEFAULT 0x00000000
-#define ixCRT11_DEFAULT 0x00000000
-#define ixCRT12_DEFAULT 0x00000000
-#define ixCRT13_DEFAULT 0x00000000
-#define ixCRT14_DEFAULT 0x00000000
-#define ixCRT15_DEFAULT 0x00000000
-#define ixCRT16_DEFAULT 0x00000000
-#define ixCRT17_DEFAULT 0x00000000
-#define ixCRT18_DEFAULT 0x00000000
-#define ixCRT1E_DEFAULT 0x00000000
-#define ixCRT1F_DEFAULT 0x00000000
-#define ixCRT22_DEFAULT 0x00000000
-
-
-// addressBlock: vga_vgagrphind
-#define ixGRA00_DEFAULT 0x00000000
-#define ixGRA01_DEFAULT 0x00000000
-#define ixGRA02_DEFAULT 0x00000000
-#define ixGRA03_DEFAULT 0x00000000
-#define ixGRA04_DEFAULT 0x00000000
-#define ixGRA05_DEFAULT 0x00000000
-#define ixGRA06_DEFAULT 0x00000000
-#define ixGRA07_DEFAULT 0x00000000
-#define ixGRA08_DEFAULT 0x00000000
-
-
-// addressBlock: vga_vgaattrind
-#define ixATTR00_DEFAULT 0x00000000
-#define ixATTR01_DEFAULT 0x00000000
-#define ixATTR02_DEFAULT 0x00000000
-#define ixATTR03_DEFAULT 0x00000000
-#define ixATTR04_DEFAULT 0x00000000
-#define ixATTR05_DEFAULT 0x00000000
-#define ixATTR06_DEFAULT 0x00000000
-#define ixATTR07_DEFAULT 0x00000000
-#define ixATTR08_DEFAULT 0x00000000
-#define ixATTR09_DEFAULT 0x00000000
-#define ixATTR0A_DEFAULT 0x00000000
-#define ixATTR0B_DEFAULT 0x00000000
-#define ixATTR0C_DEFAULT 0x00000000
-#define ixATTR0D_DEFAULT 0x00000000
-#define ixATTR0E_DEFAULT 0x00000000
-#define ixATTR0F_DEFAULT 0x00000000
-#define ixATTR10_DEFAULT 0x00000000
-#define ixATTR11_DEFAULT 0x00000000
-#define ixATTR12_DEFAULT 0x00000000
-#define ixATTR13_DEFAULT 0x00000000
-#define ixATTR14_DEFAULT 0x00000000
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-// addressBlock: azendpoint_f2codecind
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x000000b4
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000040
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x00000010
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3_DEFAULT 0x00000056
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4_DEFAULT 0x00000018
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH_DEFAULT 0x00000000
-
-
-// addressBlock: azendpoint_descriptorind
-#define ixAUDIO_DESCRIPTOR0_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-
-
-// addressBlock: azendpoint_sinkinfoind
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION0_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION1_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION2_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION3_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION4_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION5_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION6_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION7_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION8_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION9_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION10_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION11_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION12_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION13_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION14_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION15_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION16_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION17_DEFAULT 0x00000000
-
-
-// addressBlock: azf0controller_azinputcrc0resultind
-#define ixAZALIA_INPUT_CRC0_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azf0controller_azinputcrc1resultind
-#define ixAZALIA_INPUT_CRC1_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azf0controller_azcrc0resultind
-#define ixAZALIA_CRC0_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azf0controller_azcrc1resultind
-#define ixAZALIA_CRC1_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azinputendpoint_f2codecind
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x000000f0
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3_DEFAULT 0x000000d6
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4_DEFAULT 0x00000018
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000010
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000000
-
-
-// addressBlock: azroot_f2codecind
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE_DEFAULT 0x00000003
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2_DEFAULT 0x00000001
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3_DEFAULT 0x000000aa
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream0_streamind
-#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream1_streamind
-#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream2_streamind
-#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream3_streamind
-#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream4_streamind
-#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream5_streamind
-#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream6_streamind
-#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream7_streamind
-#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream8_streamind
-#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream9_streamind
-#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream10_streamind
-#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream11_streamind
-#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream12_streamind
-#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream13_streamind
-#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream14_streamind
-#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream15_streamind
-#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint0_endpointind
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint1_endpointind
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint2_endpointind
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint3_endpointind
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint4_endpointind
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint5_endpointind
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint6_endpointind
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint7_endpointind
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint0_inputendpointind
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint1_inputendpointind
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint2_inputendpointind
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint3_inputendpointind
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint4_inputendpointind
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint5_inputendpointind
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint6_inputendpointind
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint7_inputendpointind
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h
deleted file mode 100644
index 582f1a6..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/GC/gc_9_1_default.h
+++ /dev/null
@@ -1,4005 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _gc_9_1_DEFAULT_HEADER
-#define _gc_9_1_DEFAULT_HEADER
-
-
-// addressBlock: gc_grbmdec
-#define mmGRBM_CNTL_DEFAULT 0x00000018
-#define mmGRBM_SKEW_CNTL_DEFAULT 0x00000020
-#define mmGRBM_STATUS2_DEFAULT 0x00000000
-#define mmGRBM_PWR_CNTL_DEFAULT 0x00000000
-#define mmGRBM_STATUS_DEFAULT 0x00000000
-#define mmGRBM_STATUS_SE0_DEFAULT 0x00000000
-#define mmGRBM_STATUS_SE1_DEFAULT 0x00000000
-#define mmGRBM_SOFT_RESET_DEFAULT 0x00000000
-#define mmGRBM_CGTT_CLK_CNTL_DEFAULT 0x00000100
-#define mmGRBM_GFX_CLKEN_CNTL_DEFAULT 0x00001008
-#define mmGRBM_WAIT_IDLE_CLOCKS_DEFAULT 0x00000030
-#define mmGRBM_STATUS_SE2_DEFAULT 0x00000000
-#define mmGRBM_STATUS_SE3_DEFAULT 0x00000000
-#define mmGRBM_READ_ERROR_DEFAULT 0x00000000
-#define mmGRBM_READ_ERROR2_DEFAULT 0x00000000
-#define mmGRBM_INT_CNTL_DEFAULT 0x00000000
-#define mmGRBM_TRAP_OP_DEFAULT 0x00000000
-#define mmGRBM_TRAP_ADDR_DEFAULT 0x00000000
-#define mmGRBM_TRAP_ADDR_MSK_DEFAULT 0x0003ffff
-#define mmGRBM_TRAP_WD_DEFAULT 0x00000000
-#define mmGRBM_TRAP_WD_MSK_DEFAULT 0xffffffff
-#define mmGRBM_DSM_BYPASS_DEFAULT 0x00000000
-#define mmGRBM_WRITE_ERROR_DEFAULT 0x00000000
-#define mmGRBM_IOV_ERROR_DEFAULT 0x00000000
-#define mmGRBM_CHIP_REVISION_DEFAULT 0x00000000
-#define mmGRBM_GFX_CNTL_DEFAULT 0x00000000
-#define mmGRBM_RSMU_CFG_DEFAULT 0x00011000
-#define mmGRBM_IH_CREDIT_DEFAULT 0x00010000
-#define mmGRBM_PWR_CNTL2_DEFAULT 0x00010000
-#define mmGRBM_UTCL2_INVAL_RANGE_START_DEFAULT 0x00002891
-#define mmGRBM_UTCL2_INVAL_RANGE_END_DEFAULT 0x000028ea
-#define mmGRBM_RSMU_READ_ERROR_DEFAULT 0x00000000
-#define mmGRBM_CHICKEN_BITS_DEFAULT 0x00000000
-#define mmGRBM_NOWHERE_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG0_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG1_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG2_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG3_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG4_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG5_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG6_DEFAULT 0x00000000
-#define mmGRBM_SCRATCH_REG7_DEFAULT 0x00000000
-
-
-// addressBlock: gc_cpdec
-#define mmCP_CPC_STATUS_DEFAULT 0x00000000
-#define mmCP_CPC_BUSY_STAT_DEFAULT 0x00000000
-#define mmCP_CPC_STALLED_STAT1_DEFAULT 0x00000000
-#define mmCP_CPF_STATUS_DEFAULT 0x00000000
-#define mmCP_CPF_BUSY_STAT_DEFAULT 0x00000000
-#define mmCP_CPF_STALLED_STAT1_DEFAULT 0x00000000
-#define mmCP_CPC_GRBM_FREE_COUNT_DEFAULT 0x00000008
-#define mmCP_MEC_CNTL_DEFAULT 0x50000000
-#define mmCP_MEC_ME1_HEADER_DUMP_DEFAULT 0x00000000
-#define mmCP_MEC_ME2_HEADER_DUMP_DEFAULT 0x00000000
-#define mmCP_CPC_SCRATCH_INDEX_DEFAULT 0x00000000
-#define mmCP_CPC_SCRATCH_DATA_DEFAULT 0x00000000
-#define mmCP_CPF_GRBM_FREE_COUNT_DEFAULT 0x00000004
-#define mmCP_CPC_HALT_HYST_COUNT_DEFAULT 0x00000002
-#define mmCP_PRT_LOD_STATS_CNTL0_DEFAULT 0x00000000
-#define mmCP_PRT_LOD_STATS_CNTL1_DEFAULT 0x00000000
-#define mmCP_PRT_LOD_STATS_CNTL2_DEFAULT 0x00000000
-#define mmCP_PRT_LOD_STATS_CNTL3_DEFAULT 0x00000000
-#define mmCP_CE_COMPARE_COUNT_DEFAULT 0x00000000
-#define mmCP_CE_DE_COUNT_DEFAULT 0x00000000
-#define mmCP_DE_CE_COUNT_DEFAULT 0x00000000
-#define mmCP_DE_LAST_INVAL_COUNT_DEFAULT 0x00000000
-#define mmCP_DE_DE_COUNT_DEFAULT 0x00000000
-#define mmCP_STALLED_STAT3_DEFAULT 0x00000000
-#define mmCP_STALLED_STAT1_DEFAULT 0x00000000
-#define mmCP_STALLED_STAT2_DEFAULT 0x00000000
-#define mmCP_BUSY_STAT_DEFAULT 0x00000000
-#define mmCP_STAT_DEFAULT 0x00000000
-#define mmCP_ME_HEADER_DUMP_DEFAULT 0x00000000
-#define mmCP_PFP_HEADER_DUMP_DEFAULT 0x00000000
-#define mmCP_GRBM_FREE_COUNT_DEFAULT 0x00080808
-#define mmCP_CE_HEADER_DUMP_DEFAULT 0x00000000
-#define mmCP_PFP_INSTR_PNTR_DEFAULT 0x00000000
-#define mmCP_ME_INSTR_PNTR_DEFAULT 0x00000000
-#define mmCP_CE_INSTR_PNTR_DEFAULT 0x00000000
-#define mmCP_MEC1_INSTR_PNTR_DEFAULT 0x00000000
-#define mmCP_MEC2_INSTR_PNTR_DEFAULT 0x00000000
-#define mmCP_CSF_STAT_DEFAULT 0x00000000
-#define mmCP_ME_CNTL_DEFAULT 0x15000000
-#define mmCP_CNTX_STAT_DEFAULT 0x00000000
-#define mmCP_ME_PREEMPTION_DEFAULT 0x00000000
-#define mmCP_ROQ_THRESHOLDS_DEFAULT 0x00003010
-#define mmCP_MEQ_STQ_THRESHOLD_DEFAULT 0x00000010
-#define mmCP_RB2_RPTR_DEFAULT 0x00000000
-#define mmCP_RB1_RPTR_DEFAULT 0x00000000
-#define mmCP_RB0_RPTR_DEFAULT 0x00000000
-#define mmCP_RB_RPTR_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_DELAY_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_POLL_CNTL_DEFAULT 0x00400100
-#define mmCP_ROQ1_THRESHOLDS_DEFAULT 0x30101010
-#define mmCP_ROQ2_THRESHOLDS_DEFAULT 0x40403030
-#define mmCP_STQ_THRESHOLDS_DEFAULT 0x00804000
-#define mmCP_QUEUE_THRESHOLDS_DEFAULT 0x00002b16
-#define mmCP_MEQ_THRESHOLDS_DEFAULT 0x00008040
-#define mmCP_ROQ_AVAIL_DEFAULT 0x00000000
-#define mmCP_STQ_AVAIL_DEFAULT 0x00000000
-#define mmCP_ROQ2_AVAIL_DEFAULT 0x00000000
-#define mmCP_MEQ_AVAIL_DEFAULT 0x00000000
-#define mmCP_CMD_INDEX_DEFAULT 0x00000000
-#define mmCP_CMD_DATA_DEFAULT 0x00000000
-#define mmCP_ROQ_RB_STAT_DEFAULT 0x00000000
-#define mmCP_ROQ_IB1_STAT_DEFAULT 0x00000000
-#define mmCP_ROQ_IB2_STAT_DEFAULT 0x00000000
-#define mmCP_STQ_STAT_DEFAULT 0x00000000
-#define mmCP_STQ_WR_STAT_DEFAULT 0x00000000
-#define mmCP_MEQ_STAT_DEFAULT 0x00000000
-#define mmCP_CEQ1_AVAIL_DEFAULT 0x00000000
-#define mmCP_CEQ2_AVAIL_DEFAULT 0x00000000
-#define mmCP_CE_ROQ_RB_STAT_DEFAULT 0x00000000
-#define mmCP_CE_ROQ_IB1_STAT_DEFAULT 0x00000000
-#define mmCP_CE_ROQ_IB2_STAT_DEFAULT 0x00000000
-
-
-// addressBlock: gc_padec
-#define mmVGT_VTX_VECT_EJECT_REG_DEFAULT 0x0000007d
-#define mmVGT_DMA_DATA_FIFO_DEPTH_DEFAULT 0x00040180
-#define mmVGT_DMA_REQ_FIFO_DEPTH_DEFAULT 0x00000020
-#define mmVGT_DRAW_INIT_FIFO_DEPTH_DEFAULT 0x00000020
-#define mmVGT_LAST_COPY_STATE_DEFAULT 0x00000000
-#define mmVGT_CACHE_INVALIDATION_DEFAULT 0x09000000
-#define mmVGT_STRMOUT_DELAY_DEFAULT 0x00092410
-#define mmVGT_FIFO_DEPTHS_DEFAULT 0x08000040
-#define mmVGT_GS_VERTEX_REUSE_DEFAULT 0x00000010
-#define mmVGT_MC_LAT_CNTL_DEFAULT 0x000000fe
-#define mmIA_CNTL_STATUS_DEFAULT 0x00000000
-#define mmVGT_CNTL_STATUS_DEFAULT 0x00000000
-#define mmWD_CNTL_STATUS_DEFAULT 0x00000000
-#define mmCC_GC_PRIM_CONFIG_DEFAULT 0x0e020000
-#define mmGC_USER_PRIM_CONFIG_DEFAULT 0x00000000
-#define mmWD_QOS_DEFAULT 0x00000000
-#define mmWD_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmWD_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmIA_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmIA_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmVGT_SYS_CONFIG_DEFAULT 0x00000011
-#define mmVGT_VS_MAX_WAVE_ID_DEFAULT 0x0000007f
-#define mmVGT_GS_MAX_WAVE_ID_DEFAULT 0x000000ff
-#define mmGFX_PIPE_CONTROL_DEFAULT 0x00000000
-#define mmCC_GC_SHADER_ARRAY_CONFIG_DEFAULT 0xf8000000
-#define mmGC_USER_SHADER_ARRAY_CONFIG_DEFAULT 0x00000000
-#define mmVGT_DMA_PRIMITIVE_TYPE_DEFAULT 0x00000000
-#define mmVGT_DMA_CONTROL_DEFAULT 0x000000ff
-#define mmVGT_DMA_LS_HS_CONFIG_DEFAULT 0x00000000
-#define mmWD_BUF_RESOURCE_1_DEFAULT 0x00000000
-#define mmWD_BUF_RESOURCE_2_DEFAULT 0x00000000
-#define mmPA_CL_CNTL_STATUS_DEFAULT 0x00000000
-#define mmPA_CL_ENHANCE_DEFAULT 0x00000007
-#define mmPA_SU_CNTL_STATUS_DEFAULT 0x00000000
-#define mmPA_SC_FIFO_DEPTH_CNTL_DEFAULT 0x00000018
-#define mmPA_SC_P3D_TRAP_SCREEN_HV_LOCK_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_HV_LOCK_DEFAULT 0x00000000
-#define mmPA_SC_FORCE_EOV_MAX_CNTS_DEFAULT 0x00ffffff
-#define mmPA_SC_BINNER_EVENT_CNTL_0_DEFAULT 0x842a4402
-#define mmPA_SC_BINNER_EVENT_CNTL_1_DEFAULT 0x8a000008
-#define mmPA_SC_BINNER_EVENT_CNTL_2_DEFAULT 0x9118aaa8
-#define mmPA_SC_BINNER_EVENT_CNTL_3_DEFAULT 0x82400025
-#define mmPA_SC_BINNER_TIMEOUT_COUNTER_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_PERF_CNTL_0_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_PERF_CNTL_1_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_PERF_CNTL_2_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_PERF_CNTL_3_DEFAULT 0x00000000
-#define mmPA_SC_FIFO_SIZE_DEFAULT 0x00000000
-#define mmPA_SC_IF_FIFO_SIZE_DEFAULT 0x00000000
-#define mmPA_SC_PKR_WAVE_TABLE_CNTL_DEFAULT 0x00000000
-#define mmPA_UTCL1_CNTL1_DEFAULT 0x00000600
-#define mmPA_UTCL1_CNTL2_DEFAULT 0x00000000
-#define mmPA_SIDEBAND_REQUEST_DELAYS_DEFAULT 0x08000020
-#define mmPA_SC_ENHANCE_DEFAULT 0x00000001
-#define mmPA_SC_ENHANCE_1_DEFAULT 0x00040000
-#define mmPA_SC_DSM_CNTL_DEFAULT 0x00000000
-#define mmPA_SC_TILE_STEERING_CREST_OVERRIDE_DEFAULT 0x00000000
-
-
-// addressBlock: gc_sqdec
-#define mmSQ_CONFIG_DEFAULT 0x01180000
-#define mmSQC_CONFIG_DEFAULT 0x010a2000
-#define mmLDS_CONFIG_DEFAULT 0x00000000
-#define mmSQ_RANDOM_WAVE_PRI_DEFAULT 0x0000007f
-#define mmSQ_REG_CREDITS_DEFAULT 0x00000820
-#define mmSQ_FIFO_SIZES_DEFAULT 0x00000f01
-#define mmSQ_DSM_CNTL_DEFAULT 0x00000000
-#define mmSQ_DSM_CNTL2_DEFAULT 0x00000000
-#define mmSQ_RUNTIME_CONFIG_DEFAULT 0x00000000
-#define mmSH_MEM_BASES_DEFAULT 0x00000000
-#define mmSH_MEM_CONFIG_DEFAULT 0x00000000
-#define mmCC_GC_SHADER_RATE_CONFIG_DEFAULT 0x00000000
-#define mmGC_USER_SHADER_RATE_CONFIG_DEFAULT 0x00000000
-#define mmSQ_INTERRUPT_AUTO_MASK_DEFAULT 0x00ffffff
-#define mmSQ_INTERRUPT_MSG_CTRL_DEFAULT 0x00000000
-#define mmSQ_UTCL1_CNTL1_DEFAULT 0x00000580
-#define mmSQ_UTCL1_CNTL2_DEFAULT 0x00000000
-#define mmSQ_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmSQ_SHADER_TBA_LO_DEFAULT 0x00000000
-#define mmSQ_SHADER_TBA_HI_DEFAULT 0x00000000
-#define mmSQ_SHADER_TMA_LO_DEFAULT 0x00000000
-#define mmSQ_SHADER_TMA_HI_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTL_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTLA_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTLB_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTL2_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTL2A_DEFAULT 0x00000000
-#define mmSQC_DSM_CNTL2B_DEFAULT 0x00000000
-#define mmSQC_EDC_FUE_CNTL_DEFAULT 0x00000000
-#define mmSQC_EDC_CNT2_DEFAULT 0x00000000
-#define mmSQC_EDC_CNT3_DEFAULT 0x00000000
-#define mmSQ_REG_TIMESTAMP_DEFAULT 0x00000000
-#define mmSQ_CMD_TIMESTAMP_DEFAULT 0x00000000
-#define mmSQ_IND_INDEX_DEFAULT 0x00000000
-#define mmSQ_IND_DATA_DEFAULT 0x00000000
-#define mmSQ_CMD_DEFAULT 0x00000000
-#define mmSQ_TIME_HI_DEFAULT 0x00000000
-#define mmSQ_TIME_LO_DEFAULT 0x00000000
-#define mmSQ_DS_0_DEFAULT 0x00000000
-#define mmSQ_DS_1_DEFAULT 0x00000000
-#define mmSQ_EXP_0_DEFAULT 0x00000000
-#define mmSQ_EXP_1_DEFAULT 0x00000000
-#define mmSQ_FLAT_0_DEFAULT 0x00000000
-#define mmSQ_FLAT_1_DEFAULT 0x00000000
-#define mmSQ_GLBL_0_DEFAULT 0x00000000
-#define mmSQ_GLBL_1_DEFAULT 0x00000000
-#define mmSQ_INST_DEFAULT 0x00000000
-#define mmSQ_MIMG_0_DEFAULT 0x00000000
-#define mmSQ_MIMG_1_DEFAULT 0x00000000
-#define mmSQ_MTBUF_0_DEFAULT 0x00000000
-#define mmSQ_MTBUF_1_DEFAULT 0x00000000
-#define mmSQ_MUBUF_0_DEFAULT 0x00000000
-#define mmSQ_MUBUF_1_DEFAULT 0x00000000
-#define mmSQ_SCRATCH_0_DEFAULT 0x00000000
-#define mmSQ_SCRATCH_1_DEFAULT 0x00000000
-#define mmSQ_SMEM_0_DEFAULT 0x00000000
-#define mmSQ_SMEM_1_DEFAULT 0x00000000
-#define mmSQ_SOP1_DEFAULT 0x00000000
-#define mmSQ_SOP2_DEFAULT 0x00000000
-#define mmSQ_SOPC_DEFAULT 0x00000000
-#define mmSQ_SOPK_DEFAULT 0x00000000
-#define mmSQ_SOPP_DEFAULT 0x00000000
-#define mmSQ_VINTRP_DEFAULT 0x00000000
-#define mmSQ_VOP1_DEFAULT 0x00000000
-#define mmSQ_VOP2_DEFAULT 0x00000000
-#define mmSQ_VOP3P_0_DEFAULT 0x00000000
-#define mmSQ_VOP3P_1_DEFAULT 0x00000000
-#define mmSQ_VOP3_0_DEFAULT 0x00000000
-#define mmSQ_VOP3_0_SDST_ENC_DEFAULT 0x00000000
-#define mmSQ_VOP3_1_DEFAULT 0x00000000
-#define mmSQ_VOPC_DEFAULT 0x00000000
-#define mmSQ_VOP_DPP_DEFAULT 0x00000000
-#define mmSQ_VOP_SDWA_DEFAULT 0x00000000
-#define mmSQ_VOP_SDWA_SDST_ENC_DEFAULT 0x00000000
-#define mmSQ_LB_CTR_CTRL_DEFAULT 0x00000000
-#define mmSQ_LB_DATA0_DEFAULT 0x00000000
-#define mmSQ_LB_DATA1_DEFAULT 0x00000000
-#define mmSQ_LB_DATA2_DEFAULT 0x00000000
-#define mmSQ_LB_DATA3_DEFAULT 0x00000000
-#define mmSQ_LB_CTR_SEL_DEFAULT 0x00000000
-#define mmSQ_LB_CTR0_CU_DEFAULT 0xffffffff
-#define mmSQ_LB_CTR1_CU_DEFAULT 0xffffffff
-#define mmSQ_LB_CTR2_CU_DEFAULT 0xffffffff
-#define mmSQ_LB_CTR3_CU_DEFAULT 0xffffffff
-#define mmSQC_EDC_CNT_DEFAULT 0x00000000
-#define mmSQ_EDC_SEC_CNT_DEFAULT 0x00000000
-#define mmSQ_EDC_DED_CNT_DEFAULT 0x00000000
-#define mmSQ_EDC_INFO_DEFAULT 0x00000000
-#define mmSQ_EDC_CNT_DEFAULT 0x00000000
-#define mmSQ_EDC_FUE_CNTL_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_CMN_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_EVENT_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_INST_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_INST_PC_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_ISSUE_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_MISC_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_PERF_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_REG_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_REG_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_REG_CS_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_REG_CS_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_1_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_WAVE_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_WAVE_START_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_INST_PC_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_INST_USERDATA_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_PERF_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_WORD_TIMESTAMP_2_OF_2_DEFAULT 0x00000000
-#define mmSQ_WREXEC_EXEC_HI_DEFAULT 0x00000000
-#define mmSQ_WREXEC_EXEC_LO_DEFAULT 0x00000000
-#define mmSQ_BUF_RSRC_WORD0_DEFAULT 0x00000000
-#define mmSQ_BUF_RSRC_WORD1_DEFAULT 0x00000000
-#define mmSQ_BUF_RSRC_WORD2_DEFAULT 0x00000000
-#define mmSQ_BUF_RSRC_WORD3_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD0_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD1_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD2_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD3_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD4_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD5_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD6_DEFAULT 0x00000000
-#define mmSQ_IMG_RSRC_WORD7_DEFAULT 0x00000000
-#define mmSQ_IMG_SAMP_WORD0_DEFAULT 0x00000000
-#define mmSQ_IMG_SAMP_WORD1_DEFAULT 0x00000000
-#define mmSQ_IMG_SAMP_WORD2_DEFAULT 0x00000000
-#define mmSQ_IMG_SAMP_WORD3_DEFAULT 0x00000000
-#define mmSQ_FLAT_SCRATCH_WORD0_DEFAULT 0x00000000
-#define mmSQ_FLAT_SCRATCH_WORD1_DEFAULT 0x00000000
-#define mmSQ_M0_GPR_IDX_WORD_DEFAULT 0x00000000
-#define mmSQC_ICACHE_UTCL1_CNTL1_DEFAULT 0x00000480
-#define mmSQC_ICACHE_UTCL1_CNTL2_DEFAULT 0x00000000
-#define mmSQC_DCACHE_UTCL1_CNTL1_DEFAULT 0x00000500
-#define mmSQC_DCACHE_UTCL1_CNTL2_DEFAULT 0x00000000
-#define mmSQC_ICACHE_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmSQC_DCACHE_UTCL1_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: gc_shsdec
-#define mmSX_DEBUG_1_DEFAULT 0x00000020
-#define mmSPI_PS_MAX_WAVE_ID_DEFAULT 0x020000ff
-#define mmSPI_START_PHASE_DEFAULT 0x00000000
-#define mmSPI_GFX_CNTL_DEFAULT 0x00000000
-#define mmSPI_DSM_CNTL_DEFAULT 0x00000000
-#define mmSPI_DSM_CNTL2_DEFAULT 0x00000000
-#define mmSPI_EDC_CNT_DEFAULT 0x00000000
-#define mmSPI_CONFIG_PS_CU_EN_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_CNTL_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_LIMIT_0_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_1_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_2_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_3_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_4_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_5_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_6_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_7_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_8_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_LIMIT_9_DEFAULT 0x00000100
-#define mmSPI_WF_LIFETIME_STATUS_0_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_1_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_2_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_3_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_4_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_5_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_6_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_7_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_8_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_9_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_10_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_11_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_12_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_13_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_14_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_15_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_16_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_17_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_18_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_19_DEFAULT 0x00000000
-#define mmSPI_WF_LIFETIME_STATUS_20_DEFAULT 0x00000000
-#define mmSPI_LB_CTR_CTRL_DEFAULT 0x00000000
-#define mmSPI_LB_CU_MASK_DEFAULT 0x0000ffff
-#define mmSPI_LB_DATA_REG_DEFAULT 0x00000000
-#define mmSPI_PG_ENABLE_STATIC_CU_MASK_DEFAULT 0x0000ffff
-#define mmSPI_GDS_CREDITS_DEFAULT 0x00001080
-#define mmSPI_SX_EXPORT_BUFFER_SIZES_DEFAULT 0x08000800
-#define mmSPI_SX_SCOREBOARD_BUFFER_SIZES_DEFAULT 0x00200040
-#define mmSPI_CSQ_WF_ACTIVE_STATUS_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_0_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_1_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_2_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_3_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_4_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_5_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_6_DEFAULT 0x00000000
-#define mmSPI_CSQ_WF_ACTIVE_COUNT_7_DEFAULT 0x00000000
-#define mmSPI_LB_DATA_WAVES_DEFAULT 0x00000000
-#define mmSPI_LB_DATA_PERCU_WAVE_HSGS_DEFAULT 0x00000000
-#define mmSPI_LB_DATA_PERCU_WAVE_VSPS_DEFAULT 0x00000000
-#define mmSPI_LB_DATA_PERCU_WAVE_CS_DEFAULT 0x00000000
-#define mmSPI_P0_TRAP_SCREEN_PSBA_LO_DEFAULT 0x00000000
-#define mmSPI_P0_TRAP_SCREEN_PSBA_HI_DEFAULT 0x00000000
-#define mmSPI_P0_TRAP_SCREEN_PSMA_LO_DEFAULT 0x00000000
-#define mmSPI_P0_TRAP_SCREEN_PSMA_HI_DEFAULT 0x00000000
-#define mmSPI_P0_TRAP_SCREEN_GPR_MIN_DEFAULT 0x00000000
-#define mmSPI_P1_TRAP_SCREEN_PSBA_LO_DEFAULT 0x00000000
-#define mmSPI_P1_TRAP_SCREEN_PSBA_HI_DEFAULT 0x00000000
-#define mmSPI_P1_TRAP_SCREEN_PSMA_LO_DEFAULT 0x00000000
-#define mmSPI_P1_TRAP_SCREEN_PSMA_HI_DEFAULT 0x00000000
-#define mmSPI_P1_TRAP_SCREEN_GPR_MIN_DEFAULT 0x00000000
-
-
-// addressBlock: gc_tpdec
-#define mmTD_CNTL_DEFAULT 0x00000000
-#define mmTD_STATUS_DEFAULT 0x00000000
-#define mmTD_DSM_CNTL_DEFAULT 0x00000000
-#define mmTD_DSM_CNTL2_DEFAULT 0x00000000
-#define mmTD_SCRATCH_DEFAULT 0x00000000
-#define mmTA_CNTL_DEFAULT 0x8004d850
-#define mmTA_CNTL_AUX_DEFAULT 0x00000000
-#define mmTA_RESERVED_010C_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_DEFAULT 0x40000040
-#define mmTA_STATUS_DEFAULT 0x00000000
-#define mmTA_SCRATCH_DEFAULT 0x00000000
-
-
-// addressBlock: gc_gdsdec
-#define mmGDS_CONFIG_DEFAULT 0x00000000
-#define mmGDS_CNTL_STATUS_DEFAULT 0x00000000
-#define mmGDS_ENHANCE2_DEFAULT 0x00000000
-#define mmGDS_PROTECTION_FAULT_DEFAULT 0x00000000
-#define mmGDS_VM_PROTECTION_FAULT_DEFAULT 0x00000000
-#define mmGDS_EDC_CNT_DEFAULT 0x00000000
-#define mmGDS_EDC_GRBM_CNT_DEFAULT 0x00000000
-#define mmGDS_EDC_OA_DED_DEFAULT 0x00000000
-#define mmGDS_DSM_CNTL_DEFAULT 0x00000000
-#define mmGDS_EDC_OA_PHY_CNT_DEFAULT 0x00000000
-#define mmGDS_EDC_OA_PIPE_CNT_DEFAULT 0x00000000
-#define mmGDS_DSM_CNTL2_DEFAULT 0x00000000
-#define mmGDS_WD_GDS_CSB_DEFAULT 0x00000000
-
-
-// addressBlock: gc_rbdec
-#define mmDB_DEBUG_DEFAULT 0x00000000
-#define mmDB_DEBUG2_DEFAULT 0x00000000
-#define mmDB_DEBUG3_DEFAULT 0x00000000
-#define mmDB_DEBUG4_DEFAULT 0x00000000
-#define mmDB_CREDIT_LIMIT_DEFAULT 0x00000000
-#define mmDB_WATERMARKS_DEFAULT 0x01020204
-#define mmDB_SUBTILE_CONTROL_DEFAULT 0x00000000
-#define mmDB_FREE_CACHELINES_DEFAULT 0x00000000
-#define mmDB_FIFO_DEPTH1_DEFAULT 0x00000000
-#define mmDB_FIFO_DEPTH2_DEFAULT 0x00000000
-#define mmDB_EXCEPTION_CONTROL_DEFAULT 0x00000000
-#define mmDB_RING_CONTROL_DEFAULT 0x00000001
-#define mmDB_MEM_ARB_WATERMARKS_DEFAULT 0x04040404
-#define mmDB_RMI_CACHE_POLICY_DEFAULT 0x0f0f0f07
-#define mmDB_DFSM_CONFIG_DEFAULT 0x00007f00
-#define mmDB_DFSM_WATERMARK_DEFAULT 0x00640064
-#define mmDB_DFSM_TILES_IN_FLIGHT_DEFAULT 0x05dc03e8
-#define mmDB_DFSM_PRIMS_IN_FLIGHT_DEFAULT 0x00fa00c8
-#define mmDB_DFSM_WATCHDOG_DEFAULT 0x000f4240
-#define mmDB_DFSM_FLUSH_ENABLE_DEFAULT 0x000003ff
-#define mmDB_DFSM_FLUSH_AUX_EVENT_DEFAULT 0x00000000
-#define mmCC_RB_REDUNDANCY_DEFAULT 0x00000000
-#define mmCC_RB_BACKEND_DISABLE_DEFAULT 0x00000000
-#define mmGB_ADDR_CONFIG_DEFAULT 0x26010011
-#define mmGB_BACKEND_MAP_DEFAULT 0x33221100
-#define mmGB_GPU_ID_DEFAULT 0x00000000
-#define mmCC_RB_DAISY_CHAIN_DEFAULT 0x76543210
-#define mmGB_ADDR_CONFIG_READ_DEFAULT 0x26010011
-#define mmGB_TILE_MODE0_DEFAULT 0x00000000
-#define mmGB_TILE_MODE1_DEFAULT 0x00000000
-#define mmGB_TILE_MODE2_DEFAULT 0x00000000
-#define mmGB_TILE_MODE3_DEFAULT 0x00000000
-#define mmGB_TILE_MODE4_DEFAULT 0x00000000
-#define mmGB_TILE_MODE5_DEFAULT 0x00000000
-#define mmGB_TILE_MODE6_DEFAULT 0x00000000
-#define mmGB_TILE_MODE7_DEFAULT 0x00000000
-#define mmGB_TILE_MODE8_DEFAULT 0x00000000
-#define mmGB_TILE_MODE9_DEFAULT 0x00000000
-#define mmGB_TILE_MODE10_DEFAULT 0x00000000
-#define mmGB_TILE_MODE11_DEFAULT 0x00000000
-#define mmGB_TILE_MODE12_DEFAULT 0x00000000
-#define mmGB_TILE_MODE13_DEFAULT 0x00000000
-#define mmGB_TILE_MODE14_DEFAULT 0x00000000
-#define mmGB_TILE_MODE15_DEFAULT 0x00000000
-#define mmGB_TILE_MODE16_DEFAULT 0x00000000
-#define mmGB_TILE_MODE17_DEFAULT 0x00000000
-#define mmGB_TILE_MODE18_DEFAULT 0x00000000
-#define mmGB_TILE_MODE19_DEFAULT 0x00000000
-#define mmGB_TILE_MODE20_DEFAULT 0x00000000
-#define mmGB_TILE_MODE21_DEFAULT 0x00000000
-#define mmGB_TILE_MODE22_DEFAULT 0x00000000
-#define mmGB_TILE_MODE23_DEFAULT 0x00000000
-#define mmGB_TILE_MODE24_DEFAULT 0x00000000
-#define mmGB_TILE_MODE25_DEFAULT 0x00000000
-#define mmGB_TILE_MODE26_DEFAULT 0x00000000
-#define mmGB_TILE_MODE27_DEFAULT 0x00000000
-#define mmGB_TILE_MODE28_DEFAULT 0x00000000
-#define mmGB_TILE_MODE29_DEFAULT 0x00000000
-#define mmGB_TILE_MODE30_DEFAULT 0x00000000
-#define mmGB_TILE_MODE31_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE0_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE1_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE2_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE3_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE4_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE5_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE6_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE7_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE8_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE9_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE10_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE11_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE12_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE13_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE14_DEFAULT 0x00000000
-#define mmGB_MACROTILE_MODE15_DEFAULT 0x00000000
-#define mmCB_HW_CONTROL_DEFAULT 0x00014107
-#define mmCB_HW_CONTROL_1_DEFAULT 0x10000000
-#define mmCB_HW_CONTROL_2_DEFAULT 0x00000000
-#define mmCB_HW_CONTROL_3_DEFAULT 0x00000000
-#define mmCB_HW_MEM_ARBITER_RD_DEFAULT 0x00029000
-#define mmCB_HW_MEM_ARBITER_WR_DEFAULT 0x00029000
-#define mmCB_DCC_CONFIG_DEFAULT 0x04000000
-#define mmGC_USER_RB_REDUNDANCY_DEFAULT 0x00000000
-#define mmGC_USER_RB_BACKEND_DISABLE_DEFAULT 0x00000000
-
-
-// addressBlock: gc_ea_gceadec2
-#define mmGCEA_EDC_CNT_DEFAULT 0x00000000
-#define mmGCEA_EDC_CNT2_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTL_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTLA_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTLB_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTL2_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTL2A_DEFAULT 0x00000000
-#define mmGCEA_DSM_CNTL2B_DEFAULT 0x00000000
-#define mmGCEA_TCC_XBR_CREDITS_DEFAULT 0x637f637f
-#define mmGCEA_TCC_XBR_MAXBURST_DEFAULT 0x00003333
-#define mmGCEA_PROBE_CNTL_DEFAULT 0x00000000
-#define mmGCEA_PROBE_MAP_DEFAULT 0x0000aaaa
-#define mmGCEA_ERR_STATUS_DEFAULT 0x00000000
-#define mmGCEA_MISC2_DEFAULT 0x00000000
-#define mmGCEA_SDP_BACKDOOR_CMDCREDITS0_DEFAULT 0x00000000
-#define mmGCEA_SDP_BACKDOOR_CMDCREDITS1_DEFAULT 0x00000000
-#define mmGCEA_SDP_BACKDOOR_DATACREDITS0_DEFAULT 0x00000000
-#define mmGCEA_SDP_BACKDOOR_DATACREDITS1_DEFAULT 0x00000000
-#define mmGCEA_SDP_BACKDOOR_MISCCREDITS_DEFAULT 0x00000000
-#define mmGCEA_SDP_ENABLE_DEFAULT 0x00000000
-
-
-// addressBlock: gc_rmi_rmidec
-#define mmRMI_GENERAL_CNTL_DEFAULT 0x00000000
-#define mmRMI_GENERAL_CNTL1_DEFAULT 0x00001a03
-#define mmRMI_GENERAL_STATUS_DEFAULT 0x00000000
-#define mmRMI_SUBBLOCK_STATUS0_DEFAULT 0x00000000
-#define mmRMI_SUBBLOCK_STATUS1_DEFAULT 0x00000000
-#define mmRMI_SUBBLOCK_STATUS2_DEFAULT 0x00000000
-#define mmRMI_SUBBLOCK_STATUS3_DEFAULT 0x00000000
-#define mmRMI_XBAR_CONFIG_DEFAULT 0x00000f00
-#define mmRMI_PROBE_POP_LOGIC_CNTL_DEFAULT 0x000300c0
-#define mmRMI_UTC_XNACK_N_MISC_CNTL_DEFAULT 0x00000564
-#define mmRMI_DEMUX_CNTL_DEFAULT 0x02000200
-#define mmRMI_UTCL1_CNTL1_DEFAULT 0x00020000
-#define mmRMI_UTCL1_CNTL2_DEFAULT 0x00010000
-#define mmRMI_UTC_UNIT_CONFIG_DEFAULT 0x00000000
-#define mmRMI_TCIW_FORMATTER0_CNTL_DEFAULT 0x4404001e
-#define mmRMI_TCIW_FORMATTER1_CNTL_DEFAULT 0x4404001e
-#define mmRMI_SCOREBOARD_CNTL_DEFAULT 0x001ffe00
-#define mmRMI_SCOREBOARD_STATUS0_DEFAULT 0x00000000
-#define mmRMI_SCOREBOARD_STATUS1_DEFAULT 0x00000000
-#define mmRMI_SCOREBOARD_STATUS2_DEFAULT 0x00000000
-#define mmRMI_XBAR_ARBITER_CONFIG_DEFAULT 0x08000800
-#define mmRMI_XBAR_ARBITER_CONFIG_1_DEFAULT 0xffffffff
-#define mmRMI_CLOCK_CNTRL_DEFAULT 0x04208822
-#define mmRMI_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmRMI_SPARE_DEFAULT 0x00000001
-#define mmRMI_SPARE_1_DEFAULT 0x00000000
-#define mmRMI_SPARE_2_DEFAULT 0x00000000
-
-
-// addressBlock: gc_dbgu_gfx_dbgudec
-#define mmport_a_addr_DEFAULT 0x00000000
-#define mmport_a_data_lo_DEFAULT 0x00000000
-#define mmport_a_data_hi_DEFAULT 0x00000000
-#define mmport_b_addr_DEFAULT 0x00000000
-#define mmport_b_data_lo_DEFAULT 0x00000000
-#define mmport_b_data_hi_DEFAULT 0x00000000
-#define mmport_c_addr_DEFAULT 0x00000000
-#define mmport_c_data_lo_DEFAULT 0x00000000
-#define mmport_c_data_hi_DEFAULT 0x00000000
-#define mmport_d_addr_DEFAULT 0x00000000
-#define mmport_d_data_lo_DEFAULT 0x00000000
-#define mmport_d_data_hi_DEFAULT 0x00000000
-
-
-// addressBlock: gc_utcl2_atcl2dec
-#define mmATC_L2_CNTL_DEFAULT 0x000001c9
-#define mmATC_L2_CNTL2_DEFAULT 0x00000100
-#define mmATC_L2_CACHE_DATA0_DEFAULT 0x00000000
-#define mmATC_L2_CACHE_DATA1_DEFAULT 0x00000000
-#define mmATC_L2_CACHE_DATA2_DEFAULT 0x00000000
-#define mmATC_L2_CNTL3_DEFAULT 0x000001f8
-#define mmATC_L2_STATUS_DEFAULT 0x00000000
-#define mmATC_L2_STATUS2_DEFAULT 0x00000000
-#define mmATC_L2_MISC_CG_DEFAULT 0x00000200
-#define mmATC_L2_MEM_POWER_LS_DEFAULT 0x00000208
-#define mmATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: gc_utcl2_vml2pfdec
-#define mmVM_L2_CNTL_DEFAULT 0x00080602
-#define mmVM_L2_CNTL2_DEFAULT 0x00000000
-#define mmVM_L2_CNTL3_DEFAULT 0x80100007
-#define mmVM_L2_STATUS_DEFAULT 0x00000000
-#define mmVM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090
-#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc
-#define mmVM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000
-#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff
-#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff
-#define mmVM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CNTL4_DEFAULT 0x000000c1
-#define mmVM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000
-#define mmVM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000
-#define mmVM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000
-#define mmVM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000
-#define mmVM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: gc_utcl2_vml2vcdec
-#define mmVM_CONTEXT0_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT1_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT2_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT3_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT4_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT5_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT6_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT7_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT8_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT9_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT10_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT11_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT12_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT13_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT14_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT15_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXTS_DISABLE_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-
-
-// addressBlock: gc_utcl2_vmsharedpfdec
-#define mmMC_VM_NB_MMIOBASE_DEFAULT 0x00000000
-#define mmMC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000
-#define mmMC_VM_NB_PCI_CTRL_DEFAULT 0x00000000
-#define mmMC_VM_NB_PCI_ARB_DEFAULT 0x00000008
-#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000
-#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000
-#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000
-#define mmMC_VM_FB_OFFSET_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmMC_VM_STEERING_DEFAULT 0x00000001
-#define mmMC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmMC_MEM_POWER_LS_DEFAULT 0x00000208
-#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000
-#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000
-#define mmMC_VM_APT_CNTL_DEFAULT 0x00000000
-#define mmMC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000
-#define mmMC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff
-#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: gc_utcl2_vmsharedvcdec
-#define mmMC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000
-#define mmMC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000
-#define mmMC_VM_AGP_TOP_DEFAULT 0x00000000
-#define mmMC_VM_AGP_BOT_DEFAULT 0x00000000
-#define mmMC_VM_AGP_BASE_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501
-
-
-// addressBlock: gc_ea_gceadec
-#define mmGCEA_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0xeaaa9580
-#define mmGCEA_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0xeaaa9580
-#define mmGCEA_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0xeaaa9580
-#define mmGCEA_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0xeaaa9580
-#define mmGCEA_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000924
-#define mmGCEA_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000324
-#define mmGCEA_DRAM_RD_LAZY_DEFAULT 0x00000924
-#define mmGCEA_DRAM_WR_LAZY_DEFAULT 0x00000924
-#define mmGCEA_DRAM_RD_CAM_CNTL_DEFAULT 0x06db3333
-#define mmGCEA_DRAM_WR_CAM_CNTL_DEFAULT 0x06db3333
-#define mmGCEA_DRAM_PAGE_BURST_DEFAULT 0x20082008
-#define mmGCEA_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmGCEA_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmGCEA_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmGCEA_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmGCEA_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmGCEA_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmGCEA_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmGCEA_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmGCEA_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmGCEA_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmGCEA_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmGCEA_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmGCEA_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmGCEA_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmGCEA_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
-#define mmGCEA_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
-#define mmGCEA_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
-#define mmGCEA_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
-#define mmGCEA_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
-#define mmGCEA_ADDRNORM_HOLE_CNTL_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC_BANK_CFG_DEFAULT 0x000001ef
-#define mmGCEA_ADDRDEC_MISC_CFG_DEFAULT 0x3ffff000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmGCEA_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmGCEA_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmGCEA_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmGCEA_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmGCEA_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmGCEA_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmGCEA_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmGCEA_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmGCEA_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmGCEA_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmGCEA_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmGCEA_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmGCEA_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmGCEA_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmGCEA_IO_RD_CLI2GRP_MAP0_DEFAULT 0xeaaa9580
-#define mmGCEA_IO_RD_CLI2GRP_MAP1_DEFAULT 0xeaaa9580
-#define mmGCEA_IO_WR_CLI2GRP_MAP0_DEFAULT 0xeaaa9580
-#define mmGCEA_IO_WR_CLI2GRP_MAP1_DEFAULT 0xeaaa9580
-#define mmGCEA_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmGCEA_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmGCEA_IO_GROUP_BURST_DEFAULT 0x1f031f03
-#define mmGCEA_IO_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmGCEA_IO_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmGCEA_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmGCEA_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmGCEA_IO_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmGCEA_IO_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmGCEA_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
-#define mmGCEA_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
-#define mmGCEA_IO_RD_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmGCEA_IO_WR_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmGCEA_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmGCEA_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmGCEA_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmGCEA_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmGCEA_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmGCEA_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmGCEA_SDP_ARB_DRAM_DEFAULT 0x00102040
-#define mmGCEA_SDP_ARB_FINAL_DEFAULT 0x00007fff
-#define mmGCEA_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
-#define mmGCEA_SDP_IO_PRIORITY_DEFAULT 0x00000000
-#define mmGCEA_SDP_CREDITS_DEFAULT 0x000100bf
-#define mmGCEA_SDP_TAG_RESERVE0_DEFAULT 0x00000000
-#define mmGCEA_SDP_TAG_RESERVE1_DEFAULT 0x00000000
-#define mmGCEA_SDP_VCC_RESERVE0_DEFAULT 0x00000000
-#define mmGCEA_SDP_VCC_RESERVE1_DEFAULT 0x00000000
-#define mmGCEA_SDP_VCD_RESERVE0_DEFAULT 0x00000000
-#define mmGCEA_SDP_VCD_RESERVE1_DEFAULT 0x00000000
-#define mmGCEA_SDP_REQ_CNTL_DEFAULT 0x0000000f
-#define mmGCEA_MISC_DEFAULT 0x0de03ff0
-#define mmGCEA_LATENCY_SAMPLING_DEFAULT 0x00000000
-#define mmGCEA_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmGCEA_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmGCEA_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmGCEA_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmGCEA_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-
-// addressBlock: gc_tcdec
-#define mmTCP_INVALIDATE_DEFAULT 0x00000000
-#define mmTCP_STATUS_DEFAULT 0x00000000
-#define mmTCP_CNTL_DEFAULT 0x2f9c0000
-#define mmTCP_CHAN_STEER_LO_DEFAULT 0x76543210
-#define mmTCP_CHAN_STEER_HI_DEFAULT 0xfedcba98
-#define mmTCP_ADDR_CONFIG_DEFAULT 0x000000f3
-#define mmTCP_CREDIT_DEFAULT 0x804001c0
-#define mmTCP_BUFFER_ADDR_HASH_CNTL_DEFAULT 0x00000000
-#define mmTCP_EDC_CNT_DEFAULT 0x00000000
-#define mmTC_CFG_L1_LOAD_POLICY0_DEFAULT 0x00000000
-#define mmTC_CFG_L1_LOAD_POLICY1_DEFAULT 0x00000000
-#define mmTC_CFG_L1_STORE_POLICY_DEFAULT 0x00000000
-#define mmTC_CFG_L2_LOAD_POLICY0_DEFAULT 0x00000000
-#define mmTC_CFG_L2_LOAD_POLICY1_DEFAULT 0x00000000
-#define mmTC_CFG_L2_STORE_POLICY0_DEFAULT 0x00000000
-#define mmTC_CFG_L2_STORE_POLICY1_DEFAULT 0x00000000
-#define mmTC_CFG_L2_ATOMIC_POLICY_DEFAULT 0x00000000
-#define mmTC_CFG_L1_VOLATILE_DEFAULT 0x00000000
-#define mmTC_CFG_L2_VOLATILE_DEFAULT 0x00000000
-#define mmTCI_STATUS_DEFAULT 0x00000000
-#define mmTCI_CNTL_1_DEFAULT 0x40080022
-#define mmTCI_CNTL_2_DEFAULT 0x00000041
-#define mmTCC_CTRL_DEFAULT 0xf30fff7f
-#define mmTCC_CTRL2_DEFAULT 0x0000000f
-#define mmTCC_EDC_CNT_DEFAULT 0x00000000
-#define mmTCC_EDC_CNT2_DEFAULT 0x00000000
-#define mmTCC_REDUNDANCY_DEFAULT 0x00000000
-#define mmTCC_EXE_DISABLE_DEFAULT 0x00000000
-#define mmTCC_DSM_CNTL_DEFAULT 0x00000000
-#define mmTCC_DSM_CNTLA_DEFAULT 0x00000000
-#define mmTCC_DSM_CNTL2_DEFAULT 0x00000000
-#define mmTCC_DSM_CNTL2A_DEFAULT 0x00000000
-#define mmTCC_DSM_CNTL2B_DEFAULT 0x00000000
-#define mmTCC_WBINVL2_DEFAULT 0x00000010
-#define mmTCC_SOFT_RESET_DEFAULT 0x00000000
-#define mmTCA_CTRL_DEFAULT 0x00000088
-#define mmTCA_BURST_MASK_DEFAULT 0xffffffff
-#define mmTCA_BURST_CTRL_DEFAULT 0x00000007
-#define mmTCA_DSM_CNTL_DEFAULT 0x00000000
-#define mmTCA_DSM_CNTL2_DEFAULT 0x00000000
-#define mmTCA_EDC_CNT_DEFAULT 0x00000000
-
-
-// addressBlock: gc_shdec
-#define mmSPI_SHADER_PGM_RSRC3_PS_DEFAULT 0x0000ffff
-#define mmSPI_SHADER_PGM_LO_PS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_PS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC1_PS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC2_PS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_0_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_1_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_2_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_3_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_4_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_5_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_6_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_7_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_8_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_9_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_10_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_11_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_12_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_13_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_14_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_15_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_16_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_17_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_18_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_19_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_20_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_21_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_22_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_23_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_24_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_25_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_26_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_27_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_28_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_29_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_30_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_PS_31_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC3_VS_DEFAULT 0x0000ffff
-#define mmSPI_SHADER_LATE_ALLOC_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_LO_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC1_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC2_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_0_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_1_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_2_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_3_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_4_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_5_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_6_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_7_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_8_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_9_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_10_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_11_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_12_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_13_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_14_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_15_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_16_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_17_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_18_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_19_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_20_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_21_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_22_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_23_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_24_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_25_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_26_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_27_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_28_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_29_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_30_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_VS_31_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC2_GS_VS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC4_GS_DEFAULT 0x00000800
-#define mmSPI_SHADER_USER_DATA_ADDR_LO_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ADDR_HI_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_LO_ES_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_ES_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC3_GS_DEFAULT 0x0000fffe
-#define mmSPI_SHADER_PGM_LO_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC1_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC2_GS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_0_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_1_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_2_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_3_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_4_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_5_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_6_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_7_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_8_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_9_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_10_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_11_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_12_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_13_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_14_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_15_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_16_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_17_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_18_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_19_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_20_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_21_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_22_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_23_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_24_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_25_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_26_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_27_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_28_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_29_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_30_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ES_31_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC4_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ADDR_LO_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_ADDR_HI_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_LO_LS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_LS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC3_HS_DEFAULT 0xffff0000
-#define mmSPI_SHADER_PGM_LO_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_HI_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC1_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_PGM_RSRC2_HS_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_0_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_1_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_2_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_3_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_4_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_5_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_6_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_7_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_8_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_9_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_10_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_11_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_12_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_13_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_14_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_15_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_16_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_17_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_18_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_19_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_20_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_21_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_22_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_23_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_24_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_25_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_26_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_27_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_28_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_29_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_30_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_LS_31_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_0_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_1_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_2_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_3_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_4_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_5_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_6_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_7_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_8_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_9_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_10_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_11_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_12_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_13_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_14_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_15_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_16_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_17_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_18_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_19_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_20_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_21_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_22_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_23_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_24_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_25_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_26_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_27_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_28_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_29_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_30_DEFAULT 0x00000000
-#define mmSPI_SHADER_USER_DATA_COMMON_31_DEFAULT 0x00000000
-#define mmCOMPUTE_DISPATCH_INITIATOR_DEFAULT 0x00000000
-#define mmCOMPUTE_DIM_X_DEFAULT 0x00000000
-#define mmCOMPUTE_DIM_Y_DEFAULT 0x00000000
-#define mmCOMPUTE_DIM_Z_DEFAULT 0x00000000
-#define mmCOMPUTE_START_X_DEFAULT 0x00000000
-#define mmCOMPUTE_START_Y_DEFAULT 0x00000000
-#define mmCOMPUTE_START_Z_DEFAULT 0x00000000
-#define mmCOMPUTE_NUM_THREAD_X_DEFAULT 0x00000000
-#define mmCOMPUTE_NUM_THREAD_Y_DEFAULT 0x00000000
-#define mmCOMPUTE_NUM_THREAD_Z_DEFAULT 0x00000000
-#define mmCOMPUTE_PIPELINESTAT_ENABLE_DEFAULT 0x00000001
-#define mmCOMPUTE_PERFCOUNT_ENABLE_DEFAULT 0x00000000
-#define mmCOMPUTE_PGM_LO_DEFAULT 0x00000000
-#define mmCOMPUTE_PGM_HI_DEFAULT 0x00000000
-#define mmCOMPUTE_DISPATCH_PKT_ADDR_LO_DEFAULT 0x00000000
-#define mmCOMPUTE_DISPATCH_PKT_ADDR_HI_DEFAULT 0x00000000
-#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_LO_DEFAULT 0x00000000
-#define mmCOMPUTE_DISPATCH_SCRATCH_BASE_HI_DEFAULT 0x00000000
-#define mmCOMPUTE_PGM_RSRC1_DEFAULT 0x00000000
-#define mmCOMPUTE_PGM_RSRC2_DEFAULT 0x00000000
-#define mmCOMPUTE_VMID_DEFAULT 0x00000000
-#define mmCOMPUTE_RESOURCE_LIMITS_DEFAULT 0x00000000
-#define mmCOMPUTE_STATIC_THREAD_MGMT_SE0_DEFAULT 0xffffffff
-#define mmCOMPUTE_STATIC_THREAD_MGMT_SE1_DEFAULT 0xffffffff
-#define mmCOMPUTE_TMPRING_SIZE_DEFAULT 0x00000000
-#define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_DEFAULT 0xffffffff
-#define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_DEFAULT 0xffffffff
-#define mmCOMPUTE_RESTART_X_DEFAULT 0x00000000
-#define mmCOMPUTE_RESTART_Y_DEFAULT 0x00000000
-#define mmCOMPUTE_RESTART_Z_DEFAULT 0x00000000
-#define mmCOMPUTE_THREAD_TRACE_ENABLE_DEFAULT 0x00000000
-#define mmCOMPUTE_MISC_RESERVED_DEFAULT 0x00000002
-#define mmCOMPUTE_DISPATCH_ID_DEFAULT 0x00000000
-#define mmCOMPUTE_THREADGROUP_ID_DEFAULT 0x00000000
-#define mmCOMPUTE_RELAUNCH_DEFAULT 0x00000000
-#define mmCOMPUTE_WAVE_RESTORE_ADDR_LO_DEFAULT 0x00000000
-#define mmCOMPUTE_WAVE_RESTORE_ADDR_HI_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_0_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_1_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_2_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_3_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_4_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_5_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_6_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_7_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_8_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_9_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_10_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_11_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_12_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_13_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_14_DEFAULT 0x00000000
-#define mmCOMPUTE_USER_DATA_15_DEFAULT 0x00000000
-#define mmCOMPUTE_NOWHERE_DEFAULT 0x00000000
-
-
-// addressBlock: gc_cppdec
-#define mmCP_DFY_CNTL_DEFAULT 0x00000000
-#define mmCP_DFY_STAT_DEFAULT 0x00000000
-#define mmCP_DFY_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DFY_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_0_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_1_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_2_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_3_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_4_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_5_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_6_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_7_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_8_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_9_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_10_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_11_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_12_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_13_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_14_DEFAULT 0x00000000
-#define mmCP_DFY_DATA_15_DEFAULT 0x00000000
-#define mmCP_DFY_CMD_DEFAULT 0x00000000
-#define mmCP_EOPQ_WAIT_TIME_DEFAULT 0x0000052c
-#define mmCP_CPC_MGCG_SYNC_CNTL_DEFAULT 0x00001020
-#define mmCPC_INT_INFO_DEFAULT 0x00000000
-#define mmCP_VIRT_STATUS_DEFAULT 0x00000000
-#define mmCPC_INT_ADDR_DEFAULT 0x00000000
-#define mmCPC_INT_PASID_DEFAULT 0x00000000
-#define mmCP_GFX_ERROR_DEFAULT 0x00000000
-#define mmCPG_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmCPC_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmCPF_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmCP_AQL_SMM_STATUS_DEFAULT 0x00000000
-#define mmCP_RB0_BASE_DEFAULT 0x00000000
-#define mmCP_RB_BASE_DEFAULT 0x00000000
-#define mmCP_RB0_CNTL_DEFAULT 0x00400000
-#define mmCP_RB_CNTL_DEFAULT 0x00400000
-#define mmCP_RB_RPTR_WR_DEFAULT 0x00000000
-#define mmCP_RB0_RPTR_ADDR_DEFAULT 0x00000000
-#define mmCP_RB_RPTR_ADDR_DEFAULT 0x00000000
-#define mmCP_RB0_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_RB0_BUFSZ_MASK_DEFAULT 0x00000000
-#define mmCP_RB_BUFSZ_MASK_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmGC_PRIV_MODE_DEFAULT 0x00000000
-#define mmCP_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_DEVICE_ID_DEFAULT 0x00000000
-#define mmCP_ME0_PIPE_PRIORITY_CNTS_DEFAULT 0x08081020
-#define mmCP_RING_PRIORITY_CNTS_DEFAULT 0x08081020
-#define mmCP_ME0_PIPE0_PRIORITY_DEFAULT 0x00000002
-#define mmCP_RING0_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME0_PIPE1_PRIORITY_DEFAULT 0x00000002
-#define mmCP_RING1_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME0_PIPE2_PRIORITY_DEFAULT 0x00000002
-#define mmCP_RING2_PRIORITY_DEFAULT 0x00000002
-#define mmCP_FATAL_ERROR_DEFAULT 0x00000000
-#define mmCP_RB_VMID_DEFAULT 0x00000000
-#define mmCP_ME0_PIPE0_VMID_DEFAULT 0x00000000
-#define mmCP_ME0_PIPE1_VMID_DEFAULT 0x00000000
-#define mmCP_RB0_WPTR_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_DEFAULT 0x00000000
-#define mmCP_RB0_WPTR_HI_DEFAULT 0x00000000
-#define mmCP_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmCP_RB1_WPTR_DEFAULT 0x00000000
-#define mmCP_RB1_WPTR_HI_DEFAULT 0x00000000
-#define mmCP_RB2_WPTR_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_RANGE_LOWER_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_RANGE_UPPER_DEFAULT 0x00000044
-#define mmCP_MEC_DOORBELL_RANGE_LOWER_DEFAULT 0x00000048
-#define mmCP_MEC_DOORBELL_RANGE_UPPER_DEFAULT 0x0ffffffc
-#define mmCPG_UTCL1_ERROR_DEFAULT 0x00000000
-#define mmCPC_UTCL1_ERROR_DEFAULT 0x00000000
-#define mmCP_RB1_BASE_DEFAULT 0x00000000
-#define mmCP_RB1_CNTL_DEFAULT 0x00400000
-#define mmCP_RB1_RPTR_ADDR_DEFAULT 0x00000000
-#define mmCP_RB1_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_RB2_BASE_DEFAULT 0x00000000
-#define mmCP_RB2_CNTL_DEFAULT 0x00400000
-#define mmCP_RB2_RPTR_ADDR_DEFAULT 0x00000000
-#define mmCP_RB2_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_RB0_ACTIVE_DEFAULT 0x00000001
-#define mmCP_RB_ACTIVE_DEFAULT 0x00000001
-#define mmCP_INT_CNTL_RING0_DEFAULT 0x00000000
-#define mmCP_INT_CNTL_RING1_DEFAULT 0x00000000
-#define mmCP_INT_CNTL_RING2_DEFAULT 0x00000000
-#define mmCP_INT_STATUS_RING0_DEFAULT 0x00000000
-#define mmCP_INT_STATUS_RING1_DEFAULT 0x00000000
-#define mmCP_INT_STATUS_RING2_DEFAULT 0x00000000
-#define mmCP_PWR_CNTL_DEFAULT 0x00000000
-#define mmCP_MEM_SLP_CNTL_DEFAULT 0x00020200
-#define mmCP_ECC_FIRSTOCCURRENCE_DEFAULT 0x00000000
-#define mmCP_ECC_FIRSTOCCURRENCE_RING0_DEFAULT 0x00000000
-#define mmCP_ECC_FIRSTOCCURRENCE_RING1_DEFAULT 0x00000000
-#define mmCP_ECC_FIRSTOCCURRENCE_RING2_DEFAULT 0x00000000
-#define mmGB_EDC_MODE_DEFAULT 0x00000000
-#define mmCP_PQ_WPTR_POLL_CNTL_DEFAULT 0x00000001
-#define mmCP_PQ_WPTR_POLL_CNTL1_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE0_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE1_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE2_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE3_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE0_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE1_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE2_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE3_INT_CNTL_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE0_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE1_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE2_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE3_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE0_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE1_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE2_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_ME2_PIPE3_INT_STATUS_DEFAULT 0x00000000
-#define mmCC_GC_EDC_CONFIG_DEFAULT 0x00000000
-#define mmCP_ME1_PIPE_PRIORITY_CNTS_DEFAULT 0x08081020
-#define mmCP_ME1_PIPE0_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME1_PIPE1_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME1_PIPE2_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME1_PIPE3_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME2_PIPE_PRIORITY_CNTS_DEFAULT 0x08081020
-#define mmCP_ME2_PIPE0_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME2_PIPE1_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME2_PIPE2_PRIORITY_DEFAULT 0x00000002
-#define mmCP_ME2_PIPE3_PRIORITY_DEFAULT 0x00000002
-#define mmCP_CE_PRGRM_CNTR_START_DEFAULT 0x00000000
-#define mmCP_PFP_PRGRM_CNTR_START_DEFAULT 0x00000000
-#define mmCP_ME_PRGRM_CNTR_START_DEFAULT 0x00000000
-#define mmCP_MEC1_PRGRM_CNTR_START_DEFAULT 0x00000000
-#define mmCP_MEC2_PRGRM_CNTR_START_DEFAULT 0x00000000
-#define mmCP_CE_INTR_ROUTINE_START_DEFAULT 0x00000002
-#define mmCP_PFP_INTR_ROUTINE_START_DEFAULT 0x00000002
-#define mmCP_ME_INTR_ROUTINE_START_DEFAULT 0x00000002
-#define mmCP_MEC1_INTR_ROUTINE_START_DEFAULT 0x00000002
-#define mmCP_MEC2_INTR_ROUTINE_START_DEFAULT 0x00000002
-#define mmCP_CONTEXT_CNTL_DEFAULT 0x00750075
-#define mmCP_MAX_CONTEXT_DEFAULT 0x00000007
-#define mmCP_IQ_WAIT_TIME1_DEFAULT 0x40404040
-#define mmCP_IQ_WAIT_TIME2_DEFAULT 0x40404040
-#define mmCP_RB0_BASE_HI_DEFAULT 0x00000000
-#define mmCP_RB1_BASE_HI_DEFAULT 0x00000000
-#define mmCP_VMID_RESET_DEFAULT 0x00000000
-#define mmCPC_INT_CNTL_DEFAULT 0x00000000
-#define mmCPC_INT_STATUS_DEFAULT 0x00000000
-#define mmCP_VMID_PREEMPT_DEFAULT 0x00000000
-#define mmCPC_INT_CNTX_ID_DEFAULT 0x00000000
-#define mmCP_PQ_STATUS_DEFAULT 0x00000000
-#define mmCP_CPC_IC_BASE_LO_DEFAULT 0x00000000
-#define mmCP_CPC_IC_BASE_HI_DEFAULT 0x00000000
-#define mmCP_CPC_IC_BASE_CNTL_DEFAULT 0x00000000
-#define mmCP_CPC_IC_OP_CNTL_DEFAULT 0x00000000
-#define mmCP_MEC1_F32_INT_DIS_DEFAULT 0x00000000
-#define mmCP_MEC2_F32_INT_DIS_DEFAULT 0x00000000
-#define mmCP_VMID_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: gc_cppdec2
-#define mmCP_RB_DOORBELL_CONTROL_SCH_0_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_1_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_2_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_3_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_4_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_5_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_6_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CONTROL_SCH_7_DEFAULT 0x00000000
-#define mmCP_RB_DOORBELL_CLEAR_DEFAULT 0x00000000
-#define mmCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
-#define mmCP_GFX_MQD_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_GFX_MQD_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_RB_STATUS_DEFAULT 0x00000000
-#define mmCPG_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmCPC_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmCPF_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmCP_SD_CNTL_DEFAULT 0x0000001f
-#define mmCP_SOFT_RESET_CNTL_DEFAULT 0x00000000
-#define mmCP_CPC_GFX_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: gc_spipdec
-#define mmSPI_ARB_PRIORITY_DEFAULT 0x00000000
-#define mmSPI_ARB_CYCLES_0_DEFAULT 0x00000000
-#define mmSPI_ARB_CYCLES_1_DEFAULT 0x00000000
-#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
-#define mmSPI_WCL_PIPE_PERCENT_HP3D_DEFAULT 0x07c1f07f
-#define mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS1_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS2_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS3_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS4_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS5_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS6_DEFAULT 0x0000007f
-#define mmSPI_WCL_PIPE_PERCENT_CS7_DEFAULT 0x0000007f
-#define mmSPI_COMPUTE_QUEUE_RESET_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_0_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_1_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_2_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_3_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_4_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_5_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_6_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_7_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_8_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_9_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_0_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_1_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_2_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_3_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_4_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_5_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_6_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_7_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_8_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_9_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_10_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_11_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_10_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_11_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_12_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_13_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_14_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_CU_15_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_12_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_13_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_14_DEFAULT 0x00000000
-#define mmSPI_RESOURCE_RESERVE_EN_CU_15_DEFAULT 0x00000000
-#define mmSPI_COMPUTE_WF_CTX_SAVE_DEFAULT 0x00000000
-#define mmSPI_ARB_CNTL_0_DEFAULT 0x00000000
-
-
-// addressBlock: gc_cpphqddec
-#define mmCP_HQD_GFX_CONTROL_DEFAULT 0x00000000
-#define mmCP_HQD_GFX_STATUS_DEFAULT 0x00000000
-#define mmCP_HPD_ROQ_OFFSETS_DEFAULT 0x00200604
-#define mmCP_HPD_STATUS0_DEFAULT 0x01000000
-#define mmCP_HPD_UTCL1_CNTL_DEFAULT 0x00000000
-#define mmCP_HPD_UTCL1_ERROR_DEFAULT 0x00000000
-#define mmCP_HPD_UTCL1_ERROR_ADDR_DEFAULT 0x00000000
-#define mmCP_MQD_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_MQD_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_ACTIVE_DEFAULT 0x00000000
-#define mmCP_HQD_VMID_DEFAULT 0x00000000
-#define mmCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05301
-#define mmCP_HQD_PIPE_PRIORITY_DEFAULT 0x00000000
-#define mmCP_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
-#define mmCP_HQD_QUANTUM_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_BASE_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_BASE_HI_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_RPTR_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_WPTR_POLL_ADDR_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
-#define mmCP_HQD_IB_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_HQD_IB_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_IB_RPTR_DEFAULT 0x00000000
-#define mmCP_HQD_IB_CONTROL_DEFAULT 0x00300000
-#define mmCP_HQD_IQ_TIMER_DEFAULT 0x00000000
-#define mmCP_HQD_IQ_RPTR_DEFAULT 0x00000000
-#define mmCP_HQD_DEQUEUE_REQUEST_DEFAULT 0x00000000
-#define mmCP_HQD_DMA_OFFLOAD_DEFAULT 0x00000000
-#define mmCP_HQD_OFFLOAD_DEFAULT 0x00000000
-#define mmCP_HQD_SEMA_CMD_DEFAULT 0x00000000
-#define mmCP_HQD_MSG_TYPE_DEFAULT 0x00000000
-#define mmCP_HQD_ATOMIC0_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_HQD_ATOMIC0_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_HQD_ATOMIC1_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_HQD_ATOMIC1_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_HQD_HQ_SCHEDULER0_DEFAULT 0x00000000
-#define mmCP_HQD_HQ_STATUS0_DEFAULT 0x40000000
-#define mmCP_HQD_HQ_CONTROL0_DEFAULT 0x00000000
-#define mmCP_HQD_HQ_SCHEDULER1_DEFAULT 0x00000000
-#define mmCP_MQD_CONTROL_DEFAULT 0x00000100
-#define mmCP_HQD_HQ_STATUS1_DEFAULT 0x00000000
-#define mmCP_HQD_HQ_CONTROL1_DEFAULT 0x00000000
-#define mmCP_HQD_EOP_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_HQD_EOP_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
-#define mmCP_HQD_EOP_RPTR_DEFAULT 0x40000000
-#define mmCP_HQD_EOP_WPTR_DEFAULT 0x007f8000
-#define mmCP_HQD_EOP_EVENTS_DEFAULT 0x00000000
-#define mmCP_HQD_CTX_SAVE_BASE_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_HQD_CTX_SAVE_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_HQD_CTX_SAVE_CONTROL_DEFAULT 0x00000000
-#define mmCP_HQD_CNTL_STACK_OFFSET_DEFAULT 0x00000000
-#define mmCP_HQD_CNTL_STACK_SIZE_DEFAULT 0x00000000
-#define mmCP_HQD_WG_STATE_OFFSET_DEFAULT 0x00000000
-#define mmCP_HQD_CTX_SAVE_SIZE_DEFAULT 0x00000000
-#define mmCP_HQD_GDS_RESOURCE_STATE_DEFAULT 0x00000000
-#define mmCP_HQD_ERROR_DEFAULT 0x00000000
-#define mmCP_HQD_EOP_WPTR_MEM_DEFAULT 0x00000000
-#define mmCP_HQD_AQL_CONTROL_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_WPTR_LO_DEFAULT 0x00000000
-#define mmCP_HQD_PQ_WPTR_HI_DEFAULT 0x00000000
-
-
-// addressBlock: gc_didtdec
-#define mmDIDT_IND_INDEX_DEFAULT 0x00000000
-#define mmDIDT_IND_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: gc_gccacdec
-#define mmGC_CAC_CTRL_1_DEFAULT 0x01000000
-#define mmGC_CAC_CTRL_2_DEFAULT 0x00000000
-#define mmGC_CAC_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmGC_CAC_AGGR_LOWER_DEFAULT 0x00000000
-#define mmGC_CAC_AGGR_UPPER_DEFAULT 0x00000000
-#define mmGC_CAC_PG_AGGR_LOWER_DEFAULT 0x00000000
-#define mmGC_CAC_PG_AGGR_UPPER_DEFAULT 0x00000000
-#define mmGC_CAC_SOFT_CTRL_DEFAULT 0x00000000
-#define mmGC_DIDT_CTRL0_DEFAULT 0x00000000
-#define mmGC_DIDT_CTRL1_DEFAULT 0xffff0000
-#define mmGC_DIDT_CTRL2_DEFAULT 0x1880000f
-#define mmGC_DIDT_WEIGHT_DEFAULT 0x00000000
-#define mmGC_EDC_CTRL_DEFAULT 0x00000000
-#define mmGC_EDC_THRESHOLD_DEFAULT 0x00000000
-#define mmGC_EDC_STATUS_DEFAULT 0x00000000
-#define mmGC_EDC_OVERFLOW_DEFAULT 0x00000000
-#define mmGC_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define mmGC_DIDT_DROOP_CTRL_DEFAULT 0x00000000
-#define mmGC_EDC_DROOP_CTRL_DEFAULT 0x00100000
-#define mmGC_CAC_IND_INDEX_DEFAULT 0x00000000
-#define mmGC_CAC_IND_DATA_DEFAULT 0x00000000
-#define mmSE_CAC_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmSE_CAC_IND_INDEX_DEFAULT 0x00000000
-#define mmSE_CAC_IND_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: gc_tcpdec
-#define mmTCP_WATCH0_ADDR_H_DEFAULT 0x00000000
-#define mmTCP_WATCH0_ADDR_L_DEFAULT 0x00000000
-#define mmTCP_WATCH0_CNTL_DEFAULT 0x00000000
-#define mmTCP_WATCH1_ADDR_H_DEFAULT 0x00000000
-#define mmTCP_WATCH1_ADDR_L_DEFAULT 0x00000000
-#define mmTCP_WATCH1_CNTL_DEFAULT 0x00000000
-#define mmTCP_WATCH2_ADDR_H_DEFAULT 0x00000000
-#define mmTCP_WATCH2_ADDR_L_DEFAULT 0x00000000
-#define mmTCP_WATCH2_CNTL_DEFAULT 0x00000000
-#define mmTCP_WATCH3_ADDR_H_DEFAULT 0x00000000
-#define mmTCP_WATCH3_ADDR_L_DEFAULT 0x00000000
-#define mmTCP_WATCH3_CNTL_DEFAULT 0x00000000
-#define mmTCP_GATCL1_CNTL_DEFAULT 0x00000000
-#define mmTCP_ATC_EDC_GATCL1_CNT_DEFAULT 0x00000000
-#define mmTCP_GATCL1_DSM_CNTL_DEFAULT 0x00000000
-#define mmTCP_CNTL2_DEFAULT 0x0000000a
-#define mmTCP_UTCL1_CNTL1_DEFAULT 0x00800400
-#define mmTCP_UTCL1_CNTL2_DEFAULT 0x00000000
-#define mmTCP_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER_FILTER_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER_FILTER_EN_DEFAULT 0x00000000
-
-
-// addressBlock: gc_gdspdec
-#define mmGDS_VMID0_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID0_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID1_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID1_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID2_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID2_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID3_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID3_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID4_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID4_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID5_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID5_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID6_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID6_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID7_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID7_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID8_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID8_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID9_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID9_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID10_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID10_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID11_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID11_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID12_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID12_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID13_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID13_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID14_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID14_SIZE_DEFAULT 0x00010000
-#define mmGDS_VMID15_BASE_DEFAULT 0x00000000
-#define mmGDS_VMID15_SIZE_DEFAULT 0x00010000
-#define mmGDS_GWS_VMID0_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID1_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID2_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID3_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID4_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID5_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID6_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID7_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID8_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID9_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID10_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID11_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID12_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID13_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID14_DEFAULT 0x00400000
-#define mmGDS_GWS_VMID15_DEFAULT 0x00400000
-#define mmGDS_OA_VMID0_DEFAULT 0x00000000
-#define mmGDS_OA_VMID1_DEFAULT 0x00000000
-#define mmGDS_OA_VMID2_DEFAULT 0x00000000
-#define mmGDS_OA_VMID3_DEFAULT 0x00000000
-#define mmGDS_OA_VMID4_DEFAULT 0x00000000
-#define mmGDS_OA_VMID5_DEFAULT 0x00000000
-#define mmGDS_OA_VMID6_DEFAULT 0x00000000
-#define mmGDS_OA_VMID7_DEFAULT 0x00000000
-#define mmGDS_OA_VMID8_DEFAULT 0x00000000
-#define mmGDS_OA_VMID9_DEFAULT 0x00000000
-#define mmGDS_OA_VMID10_DEFAULT 0x00000000
-#define mmGDS_OA_VMID11_DEFAULT 0x00000000
-#define mmGDS_OA_VMID12_DEFAULT 0x00000000
-#define mmGDS_OA_VMID13_DEFAULT 0x00000000
-#define mmGDS_OA_VMID14_DEFAULT 0x00000000
-#define mmGDS_OA_VMID15_DEFAULT 0x00000000
-#define mmGDS_GWS_RESET0_DEFAULT 0x00000000
-#define mmGDS_GWS_RESET1_DEFAULT 0x00000000
-#define mmGDS_GWS_RESOURCE_RESET_DEFAULT 0x00000000
-#define mmGDS_COMPUTE_MAX_WAVE_ID_DEFAULT 0x0000015f
-#define mmGDS_OA_RESET_MASK_DEFAULT 0x00000000
-#define mmGDS_OA_RESET_DEFAULT 0x00000000
-#define mmGDS_ENHANCE_DEFAULT 0x00000000
-#define mmGDS_OA_CGPG_RESTORE_DEFAULT 0x00000000
-#define mmGDS_CS_CTXSW_STATUS_DEFAULT 0x00000000
-#define mmGDS_CS_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_CS_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_CS_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_CS_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_GFX_CTXSW_STATUS_DEFAULT 0x00000000
-#define mmGDS_VS_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_VS_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_VS_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_VS_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS0_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS0_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS0_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS0_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS1_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS1_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS1_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS1_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS2_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS2_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS2_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS2_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS3_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS3_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS3_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS3_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS4_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS4_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS4_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS4_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS5_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS5_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS5_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS5_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS6_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS6_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS6_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS6_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_PS7_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_PS7_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_PS7_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_PS7_CTXSW_CNT3_DEFAULT 0x00000000
-#define mmGDS_GS_CTXSW_CNT0_DEFAULT 0x00000000
-#define mmGDS_GS_CTXSW_CNT1_DEFAULT 0x00000000
-#define mmGDS_GS_CTXSW_CNT2_DEFAULT 0x00000000
-#define mmGDS_GS_CTXSW_CNT3_DEFAULT 0x00000000
-
-
-// addressBlock: gc_rasdec
-#define mmRAS_SIGNATURE_CONTROL_DEFAULT 0x00000000
-#define mmRAS_SIGNATURE_MASK_DEFAULT 0x00000000
-#define mmRAS_SX_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_SX_SIGNATURE1_DEFAULT 0x00000000
-#define mmRAS_SX_SIGNATURE2_DEFAULT 0x00000000
-#define mmRAS_SX_SIGNATURE3_DEFAULT 0x00000000
-#define mmRAS_DB_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_PA_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_VGT_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_SQ_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE1_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE2_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE3_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE4_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE5_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE6_DEFAULT 0x00000000
-#define mmRAS_SC_SIGNATURE7_DEFAULT 0x00000000
-#define mmRAS_IA_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_IA_SIGNATURE1_DEFAULT 0x00000000
-#define mmRAS_SPI_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_SPI_SIGNATURE1_DEFAULT 0x00000000
-#define mmRAS_TA_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_TD_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_CB_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_BCI_SIGNATURE0_DEFAULT 0x00000000
-#define mmRAS_BCI_SIGNATURE1_DEFAULT 0x00000000
-#define mmRAS_TA_SIGNATURE1_DEFAULT 0x00000000
-
-
-// addressBlock: gc_gfxdec0
-#define mmDB_RENDER_CONTROL_DEFAULT 0x00000000
-#define mmDB_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmDB_DEPTH_VIEW_DEFAULT 0x00000000
-#define mmDB_RENDER_OVERRIDE_DEFAULT 0x00000000
-#define mmDB_RENDER_OVERRIDE2_DEFAULT 0x00000000
-#define mmDB_HTILE_DATA_BASE_DEFAULT 0x00000000
-#define mmDB_HTILE_DATA_BASE_HI_DEFAULT 0x00000000
-#define mmDB_DEPTH_SIZE_DEFAULT 0x00000000
-#define mmDB_DEPTH_BOUNDS_MIN_DEFAULT 0x00000000
-#define mmDB_DEPTH_BOUNDS_MAX_DEFAULT 0x00000000
-#define mmDB_STENCIL_CLEAR_DEFAULT 0x00000000
-#define mmDB_DEPTH_CLEAR_DEFAULT 0x00000000
-#define mmPA_SC_SCREEN_SCISSOR_TL_DEFAULT 0x00000000
-#define mmPA_SC_SCREEN_SCISSOR_BR_DEFAULT 0x00000000
-#define mmDB_Z_INFO_DEFAULT 0x00000000
-#define mmDB_STENCIL_INFO_DEFAULT 0x00000000
-#define mmDB_Z_READ_BASE_DEFAULT 0x00000000
-#define mmDB_Z_READ_BASE_HI_DEFAULT 0x00000000
-#define mmDB_STENCIL_READ_BASE_DEFAULT 0x00000000
-#define mmDB_STENCIL_READ_BASE_HI_DEFAULT 0x00000000
-#define mmDB_Z_WRITE_BASE_DEFAULT 0x00000000
-#define mmDB_Z_WRITE_BASE_HI_DEFAULT 0x00000000
-#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
-#define mmDB_STENCIL_WRITE_BASE_HI_DEFAULT 0x00000000
-#define mmDB_DFSM_CONTROL_DEFAULT 0x00000000
-#define mmDB_RENDER_FILTER_DEFAULT 0x00000000
-#define mmDB_Z_INFO2_DEFAULT 0x00000000
-#define mmDB_STENCIL_INFO2_DEFAULT 0x00000000
-#define mmTA_BC_BASE_ADDR_DEFAULT 0x00000000
-#define mmTA_BC_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_HI_0_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_HI_1_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_HI_2_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_HI_3_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_2_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_3_DEFAULT 0x00000000
-#define mmPA_SC_WINDOW_OFFSET_DEFAULT 0x00000000
-#define mmPA_SC_WINDOW_SCISSOR_TL_DEFAULT 0x00000000
-#define mmPA_SC_WINDOW_SCISSOR_BR_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_RULE_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_0_TL_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_0_BR_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_1_TL_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_1_BR_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_2_TL_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_2_BR_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_3_TL_DEFAULT 0x00000000
-#define mmPA_SC_CLIPRECT_3_BR_DEFAULT 0x00000000
-#define mmPA_SC_EDGERULE_DEFAULT 0x00000000
-#define mmPA_SU_HARDWARE_SCREEN_OFFSET_DEFAULT 0x00000000
-#define mmCB_TARGET_MASK_DEFAULT 0x00000000
-#define mmCB_SHADER_MASK_DEFAULT 0x00000000
-#define mmPA_SC_GENERIC_SCISSOR_TL_DEFAULT 0x00000000
-#define mmPA_SC_GENERIC_SCISSOR_BR_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_0_DEFAULT 0x00000000
-#define mmCOHER_DEST_BASE_1_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_0_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_0_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_1_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_1_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_2_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_2_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_3_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_3_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_4_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_4_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_5_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_5_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_6_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_6_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_7_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_7_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_8_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_8_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_9_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_9_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_10_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_10_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_11_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_11_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_12_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_12_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_13_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_13_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_14_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_14_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_15_TL_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_SCISSOR_15_BR_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_0_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_0_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_1_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_1_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_2_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_2_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_3_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_3_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_4_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_4_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_5_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_5_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_6_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_6_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_7_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_7_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_8_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_8_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_9_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_9_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_10_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_10_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_11_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_11_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_12_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_12_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_13_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_13_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_14_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_14_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMIN_15_DEFAULT 0x00000000
-#define mmPA_SC_VPORT_ZMAX_15_DEFAULT 0x00000000
-#define mmPA_SC_RASTER_CONFIG_DEFAULT 0x00000000
-#define mmPA_SC_RASTER_CONFIG_1_DEFAULT 0x00000000
-#define mmPA_SC_SCREEN_EXTENT_CONTROL_DEFAULT 0x00000000
-#define mmPA_SC_TILE_STEERING_OVERRIDE_DEFAULT 0x00000000
-#define mmCP_PERFMON_CNTX_CNTL_DEFAULT 0x00000000
-#define mmCP_PIPEID_DEFAULT 0x00000000
-#define mmCP_RINGID_DEFAULT 0x00000000
-#define mmCP_VMID_DEFAULT 0x00000000
-#define mmPA_SC_RIGHT_VERT_GRID_DEFAULT 0x00000000
-#define mmPA_SC_LEFT_VERT_GRID_DEFAULT 0x00000000
-#define mmPA_SC_HORIZ_GRID_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_LR_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_TB_DEFAULT 0x00000000
-#define mmVGT_MULTI_PRIM_IB_RESET_INDX_DEFAULT 0x00000000
-#define mmCB_BLEND_RED_DEFAULT 0x00000000
-#define mmCB_BLEND_GREEN_DEFAULT 0x00000000
-#define mmCB_BLEND_BLUE_DEFAULT 0x00000000
-#define mmCB_BLEND_ALPHA_DEFAULT 0x00000000
-#define mmCB_DCC_CONTROL_DEFAULT 0x00000000
-#define mmDB_STENCIL_CONTROL_DEFAULT 0x00000000
-#define mmDB_STENCILREFMASK_DEFAULT 0x00000000
-#define mmDB_STENCILREFMASK_BF_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_1_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_2_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_3_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_4_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_5_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_6_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_7_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_8_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_9_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_10_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_11_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_12_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_13_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_14_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XSCALE_15_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_XOFFSET_15_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YSCALE_15_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_YOFFSET_15_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZSCALE_15_DEFAULT 0x00000000
-#define mmPA_CL_VPORT_ZOFFSET_15_DEFAULT 0x00000000
-#define mmPA_CL_UCP_0_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_0_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_0_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_0_W_DEFAULT 0x00000000
-#define mmPA_CL_UCP_1_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_1_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_1_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_1_W_DEFAULT 0x00000000
-#define mmPA_CL_UCP_2_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_2_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_2_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_2_W_DEFAULT 0x00000000
-#define mmPA_CL_UCP_3_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_3_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_3_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_3_W_DEFAULT 0x00000000
-#define mmPA_CL_UCP_4_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_4_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_4_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_4_W_DEFAULT 0x00000000
-#define mmPA_CL_UCP_5_X_DEFAULT 0x00000000
-#define mmPA_CL_UCP_5_Y_DEFAULT 0x00000000
-#define mmPA_CL_UCP_5_Z_DEFAULT 0x00000000
-#define mmPA_CL_UCP_5_W_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_0_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_1_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_2_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_3_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_4_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_5_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_6_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_7_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_8_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_9_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_10_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_11_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_12_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_13_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_14_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_15_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_16_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_17_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_18_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_19_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_20_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_21_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_22_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_23_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_24_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_25_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_26_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_27_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_28_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_29_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_30_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_CNTL_31_DEFAULT 0x00000000
-#define mmSPI_VS_OUT_CONFIG_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_ENA_DEFAULT 0x00000000
-#define mmSPI_PS_INPUT_ADDR_DEFAULT 0x00000000
-#define mmSPI_INTERP_CONTROL_0_DEFAULT 0x00000000
-#define mmSPI_PS_IN_CONTROL_DEFAULT 0x00000000
-#define mmSPI_BARYC_CNTL_DEFAULT 0x00000000
-#define mmSPI_TMPRING_SIZE_DEFAULT 0x00000000
-#define mmSPI_SHADER_POS_FORMAT_DEFAULT 0x00000000
-#define mmSPI_SHADER_Z_FORMAT_DEFAULT 0x00000000
-#define mmSPI_SHADER_COL_FORMAT_DEFAULT 0x00000000
-#define mmSX_PS_DOWNCONVERT_DEFAULT 0x00000000
-#define mmSX_BLEND_OPT_EPSILON_DEFAULT 0x00000000
-#define mmSX_BLEND_OPT_CONTROL_DEFAULT 0x00000000
-#define mmSX_MRT0_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT1_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT2_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT3_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT4_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT5_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT6_BLEND_OPT_DEFAULT 0x00000000
-#define mmSX_MRT7_BLEND_OPT_DEFAULT 0x00000000
-#define mmCB_BLEND0_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND1_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND2_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND3_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND4_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND5_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND6_CONTROL_DEFAULT 0x00000000
-#define mmCB_BLEND7_CONTROL_DEFAULT 0x00000000
-#define mmCB_MRT0_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT1_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT2_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT3_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT4_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT5_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT6_EPITCH_DEFAULT 0x00000000
-#define mmCB_MRT7_EPITCH_DEFAULT 0x00000000
-#define mmCS_COPY_STATE_DEFAULT 0x00000000
-#define mmGFX_COPY_STATE_DEFAULT 0x00000000
-#define mmPA_CL_POINT_X_RAD_DEFAULT 0x00000000
-#define mmPA_CL_POINT_Y_RAD_DEFAULT 0x00000000
-#define mmPA_CL_POINT_SIZE_DEFAULT 0x00000000
-#define mmPA_CL_POINT_CULL_RAD_DEFAULT 0x00000000
-#define mmVGT_DMA_BASE_HI_DEFAULT 0x00000000
-#define mmVGT_DMA_BASE_DEFAULT 0x00000000
-#define mmVGT_DRAW_INITIATOR_DEFAULT 0x00000000
-#define mmVGT_IMMED_DATA_DEFAULT 0x00000000
-#define mmVGT_EVENT_ADDRESS_REG_DEFAULT 0x00000000
-#define mmDB_DEPTH_CONTROL_DEFAULT 0x00000000
-#define mmDB_EQAA_DEFAULT 0x00000000
-#define mmCB_COLOR_CONTROL_DEFAULT 0x00000000
-#define mmDB_SHADER_CONTROL_DEFAULT 0x00000000
-#define mmPA_CL_CLIP_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_SC_MODE_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_VTE_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_VS_OUT_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_NANINF_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_LINE_STIPPLE_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_LINE_STIPPLE_SCALE_DEFAULT 0x00000000
-#define mmPA_SU_PRIM_FILTER_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_SMALL_PRIM_FILTER_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_OBJPRIM_ID_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_NGG_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_POINT_SIZE_DEFAULT 0x00000000
-#define mmPA_SU_POINT_MINMAX_DEFAULT 0x00000000
-#define mmPA_SU_LINE_CNTL_DEFAULT 0x00000000
-#define mmPA_SC_LINE_STIPPLE_DEFAULT 0x00000000
-#define mmVGT_OUTPUT_PATH_CNTL_DEFAULT 0x00000000
-#define mmVGT_HOS_CNTL_DEFAULT 0x00000000
-#define mmVGT_HOS_MAX_TESS_LEVEL_DEFAULT 0x00000000
-#define mmVGT_HOS_MIN_TESS_LEVEL_DEFAULT 0x00000000
-#define mmVGT_HOS_REUSE_DEPTH_DEFAULT 0x00000000
-#define mmVGT_GROUP_PRIM_TYPE_DEFAULT 0x00000000
-#define mmVGT_GROUP_FIRST_DECR_DEFAULT 0x00000000
-#define mmVGT_GROUP_DECR_DEFAULT 0x00000000
-#define mmVGT_GROUP_VECT_0_CNTL_DEFAULT 0x00000000
-#define mmVGT_GROUP_VECT_1_CNTL_DEFAULT 0x00000000
-#define mmVGT_GROUP_VECT_0_FMT_CNTL_DEFAULT 0x00000000
-#define mmVGT_GROUP_VECT_1_FMT_CNTL_DEFAULT 0x00000000
-#define mmVGT_GS_MODE_DEFAULT 0x00000000
-#define mmVGT_GS_ONCHIP_CNTL_DEFAULT 0x00000000
-#define mmPA_SC_MODE_CNTL_0_DEFAULT 0x00000000
-#define mmPA_SC_MODE_CNTL_1_DEFAULT 0x06000000
-#define mmVGT_ENHANCE_DEFAULT 0x00000000
-#define mmVGT_GS_PER_ES_DEFAULT 0x00000000
-#define mmVGT_ES_PER_GS_DEFAULT 0x00000000
-#define mmVGT_GS_PER_VS_DEFAULT 0x00000000
-#define mmVGT_GSVS_RING_OFFSET_1_DEFAULT 0x00000000
-#define mmVGT_GSVS_RING_OFFSET_2_DEFAULT 0x00000000
-#define mmVGT_GSVS_RING_OFFSET_3_DEFAULT 0x00000000
-#define mmVGT_GS_OUT_PRIM_TYPE_DEFAULT 0x00000000
-#define mmIA_ENHANCE_DEFAULT 0x00000000
-#define mmVGT_DMA_SIZE_DEFAULT 0x00000000
-#define mmVGT_DMA_MAX_SIZE_DEFAULT 0x00000000
-#define mmVGT_DMA_INDEX_TYPE_DEFAULT 0x00000000
-#define mmWD_ENHANCE_DEFAULT 0x00000000
-#define mmVGT_PRIMITIVEID_EN_DEFAULT 0x00000000
-#define mmVGT_DMA_NUM_INSTANCES_DEFAULT 0x00000000
-#define mmVGT_PRIMITIVEID_RESET_DEFAULT 0x00000000
-#define mmVGT_EVENT_INITIATOR_DEFAULT 0x00000000
-#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_DEFAULT 0x00000000
-#define mmVGT_DRAW_PAYLOAD_CNTL_DEFAULT 0x00000000
-#define mmVGT_INDEX_PAYLOAD_CNTL_DEFAULT 0x00000000
-#define mmVGT_INSTANCE_STEP_RATE_0_DEFAULT 0x00000000
-#define mmVGT_INSTANCE_STEP_RATE_1_DEFAULT 0x00000000
-#define mmVGT_ESGS_RING_ITEMSIZE_DEFAULT 0x00000000
-#define mmVGT_GSVS_RING_ITEMSIZE_DEFAULT 0x00000000
-#define mmVGT_REUSE_OFF_DEFAULT 0x00000000
-#define mmVGT_VTX_CNT_EN_DEFAULT 0x00000000
-#define mmDB_HTILE_SURFACE_DEFAULT 0x00000000
-#define mmDB_SRESULTS_COMPARE_STATE0_DEFAULT 0x00000000
-#define mmDB_SRESULTS_COMPARE_STATE1_DEFAULT 0x00000000
-#define mmDB_PRELOAD_CONTROL_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_SIZE_0_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_VTX_STRIDE_0_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_OFFSET_0_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_SIZE_1_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_VTX_STRIDE_1_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_OFFSET_1_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_SIZE_2_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_VTX_STRIDE_2_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_OFFSET_2_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_SIZE_3_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_VTX_STRIDE_3_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_OFFSET_3_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_DRAW_OPAQUE_OFFSET_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_DEFAULT 0x00000000
-#define mmVGT_GS_MAX_VERT_OUT_DEFAULT 0x00000000
-#define mmVGT_TESS_DISTRIBUTION_DEFAULT 0x00000000
-#define mmVGT_SHADER_STAGES_EN_DEFAULT 0x00000000
-#define mmVGT_LS_HS_CONFIG_DEFAULT 0x00000000
-#define mmVGT_GS_VERT_ITEMSIZE_DEFAULT 0x00000000
-#define mmVGT_GS_VERT_ITEMSIZE_1_DEFAULT 0x00000000
-#define mmVGT_GS_VERT_ITEMSIZE_2_DEFAULT 0x00000000
-#define mmVGT_GS_VERT_ITEMSIZE_3_DEFAULT 0x00000000
-#define mmVGT_TF_PARAM_DEFAULT 0x00000000
-#define mmDB_ALPHA_TO_MASK_DEFAULT 0x00000000
-#define mmVGT_DISPATCH_DRAW_INDEX_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_DB_FMT_CNTL_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_CLAMP_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_FRONT_SCALE_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_FRONT_OFFSET_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_BACK_SCALE_DEFAULT 0x00000000
-#define mmPA_SU_POLY_OFFSET_BACK_OFFSET_DEFAULT 0x00000000
-#define mmVGT_GS_INSTANCE_CNT_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_CONFIG_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_CONFIG_DEFAULT 0x00000000
-#define mmVGT_DMA_EVENT_INITIATOR_DEFAULT 0x00000000
-#define mmPA_SC_CENTROID_PRIORITY_0_DEFAULT 0x00000000
-#define mmPA_SC_CENTROID_PRIORITY_1_DEFAULT 0x00000000
-#define mmPA_SC_LINE_CNTL_DEFAULT 0x00000000
-#define mmPA_SC_AA_CONFIG_DEFAULT 0x00000000
-#define mmPA_SU_VTX_CNTL_DEFAULT 0x00000000
-#define mmPA_CL_GB_VERT_CLIP_ADJ_DEFAULT 0x00000000
-#define mmPA_CL_GB_VERT_DISC_ADJ_DEFAULT 0x00000000
-#define mmPA_CL_GB_HORZ_CLIP_ADJ_DEFAULT 0x00000000
-#define mmPA_CL_GB_HORZ_DISC_ADJ_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_DEFAULT 0x00000000
-#define mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_DEFAULT 0x00000000
-#define mmPA_SC_AA_MASK_X0Y0_X1Y0_DEFAULT 0x00000000
-#define mmPA_SC_AA_MASK_X0Y1_X1Y1_DEFAULT 0x00000000
-#define mmPA_SC_SHADER_CONTROL_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_CNTL_0_DEFAULT 0x00000000
-#define mmPA_SC_BINNER_CNTL_1_DEFAULT 0x00000000
-#define mmPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_DEFAULT 0x00000000
-#define mmPA_SC_NGG_MODE_CNTL_DEFAULT 0x00000000
-#define mmVGT_VERTEX_REUSE_BLOCK_CNTL_DEFAULT 0x00000000
-#define mmVGT_OUT_DEALLOC_CNTL_DEFAULT 0x00000000
-#define mmCB_COLOR0_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR0_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR0_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR0_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR0_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR0_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR0_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR0_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR0_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR0_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR0_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR0_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR0_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR0_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR0_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR1_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR1_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR1_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR1_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR1_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR1_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR1_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR1_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR1_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR1_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR1_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR1_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR1_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR1_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR1_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR2_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR2_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR2_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR2_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR2_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR2_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR2_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR2_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR2_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR2_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR2_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR2_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR2_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR2_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR2_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR3_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR3_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR3_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR3_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR3_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR3_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR3_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR3_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR3_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR3_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR3_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR3_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR3_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR3_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR3_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR4_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR4_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR4_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR4_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR4_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR4_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR4_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR4_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR4_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR4_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR4_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR4_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR4_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR4_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR4_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR5_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR5_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR5_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR5_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR5_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR5_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR5_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR5_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR5_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR5_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR5_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR5_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR5_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR5_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR5_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR6_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR6_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR6_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR6_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR6_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR6_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR6_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR6_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR6_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR6_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR6_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR6_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR6_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR6_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR6_DCC_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR7_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR7_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR7_ATTRIB2_DEFAULT 0x00000000
-#define mmCB_COLOR7_VIEW_DEFAULT 0x00000000
-#define mmCB_COLOR7_INFO_DEFAULT 0x00000000
-#define mmCB_COLOR7_ATTRIB_DEFAULT 0x00000000
-#define mmCB_COLOR7_DCC_CONTROL_DEFAULT 0x00000000
-#define mmCB_COLOR7_CMASK_DEFAULT 0x00000000
-#define mmCB_COLOR7_CMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR7_FMASK_DEFAULT 0x00000000
-#define mmCB_COLOR7_FMASK_BASE_EXT_DEFAULT 0x00000000
-#define mmCB_COLOR7_CLEAR_WORD0_DEFAULT 0x00000000
-#define mmCB_COLOR7_CLEAR_WORD1_DEFAULT 0x00000000
-#define mmCB_COLOR7_DCC_BASE_DEFAULT 0x00000000
-#define mmCB_COLOR7_DCC_BASE_EXT_DEFAULT 0x00000000
-
-
-// addressBlock: gc_gfxudec
-#define mmCP_EOP_DONE_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_DATA_LO_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_DATA_HI_DEFAULT 0x00000000
-#define mmCP_EOP_LAST_FENCE_LO_DEFAULT 0x00000000
-#define mmCP_EOP_LAST_FENCE_HI_DEFAULT 0x00000000
-#define mmCP_STREAM_OUT_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_STREAM_OUT_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT0_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT0_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT0_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT0_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT1_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT1_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT1_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT1_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT2_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT2_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT2_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT2_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT3_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_WRITTEN_COUNT3_HI_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT3_LO_DEFAULT 0x00000000
-#define mmCP_NUM_PRIM_NEEDED_COUNT3_HI_DEFAULT 0x00000000
-#define mmCP_PIPE_STATS_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_PIPE_STATS_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_VGT_IAVERT_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_IAVERT_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_IAPRIM_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_IAPRIM_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_GSPRIM_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_GSPRIM_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_VSINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_VSINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_GSINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_GSINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_HSINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_HSINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_VGT_DSINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_DSINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_PA_CINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_PA_CINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_PA_CPRIM_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_PA_CPRIM_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_SC_PSINVOC_COUNT0_LO_DEFAULT 0x00000000
-#define mmCP_SC_PSINVOC_COUNT0_HI_DEFAULT 0x00000000
-#define mmCP_SC_PSINVOC_COUNT1_LO_DEFAULT 0x00000000
-#define mmCP_SC_PSINVOC_COUNT1_HI_DEFAULT 0x00000000
-#define mmCP_VGT_CSINVOC_COUNT_LO_DEFAULT 0x00000000
-#define mmCP_VGT_CSINVOC_COUNT_HI_DEFAULT 0x00000000
-#define mmCP_PIPE_STATS_CONTROL_DEFAULT 0x00000000
-#define mmCP_STREAM_OUT_CONTROL_DEFAULT 0x00000000
-#define mmCP_STRMOUT_CNTL_DEFAULT 0x00000000
-#define mmSCRATCH_REG0_DEFAULT 0x00000000
-#define mmSCRATCH_REG1_DEFAULT 0x00000000
-#define mmSCRATCH_REG2_DEFAULT 0x00000000
-#define mmSCRATCH_REG3_DEFAULT 0x00000000
-#define mmSCRATCH_REG4_DEFAULT 0x00000000
-#define mmSCRATCH_REG5_DEFAULT 0x00000000
-#define mmSCRATCH_REG6_DEFAULT 0x00000000
-#define mmSCRATCH_REG7_DEFAULT 0x00000000
-#define mmCP_APPEND_DATA_HI_DEFAULT 0x00000000
-#define mmCP_APPEND_LAST_CS_FENCE_HI_DEFAULT 0x00000000
-#define mmCP_APPEND_LAST_PS_FENCE_HI_DEFAULT 0x00000000
-#define mmSCRATCH_UMSK_DEFAULT 0x00000000
-#define mmSCRATCH_ADDR_DEFAULT 0x00000000
-#define mmCP_PFP_ATOMIC_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_PFP_ATOMIC_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_PFP_GDS_ATOMIC0_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_PFP_GDS_ATOMIC0_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_PFP_GDS_ATOMIC1_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_PFP_GDS_ATOMIC1_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_APPEND_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_APPEND_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_APPEND_DATA_LO_DEFAULT 0x00000000
-#define mmCP_APPEND_LAST_CS_FENCE_LO_DEFAULT 0x00000000
-#define mmCP_APPEND_LAST_PS_FENCE_LO_DEFAULT 0x00000000
-#define mmCP_ATOMIC_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_ME_ATOMIC_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_ATOMIC_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_ME_ATOMIC_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_GDS_ATOMIC0_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_ME_GDS_ATOMIC0_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_GDS_ATOMIC0_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_ME_GDS_ATOMIC0_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_GDS_ATOMIC1_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_ME_GDS_ATOMIC1_PREOP_LO_DEFAULT 0x00000000
-#define mmCP_GDS_ATOMIC1_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_ME_GDS_ATOMIC1_PREOP_HI_DEFAULT 0x00000000
-#define mmCP_ME_MC_WADDR_LO_DEFAULT 0x00000000
-#define mmCP_ME_MC_WADDR_HI_DEFAULT 0x00000000
-#define mmCP_ME_MC_WDATA_LO_DEFAULT 0x00000000
-#define mmCP_ME_MC_WDATA_HI_DEFAULT 0x00000000
-#define mmCP_ME_MC_RADDR_LO_DEFAULT 0x00000000
-#define mmCP_ME_MC_RADDR_HI_DEFAULT 0x00000000
-#define mmCP_SEM_WAIT_TIMER_DEFAULT 0x00000000
-#define mmCP_SIG_SEM_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_SIG_SEM_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_WAIT_REG_MEM_TIMEOUT_DEFAULT 0x00000000
-#define mmCP_WAIT_SEM_ADDR_LO_DEFAULT 0x00000000
-#define mmCP_WAIT_SEM_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_CONTROL_DEFAULT 0x00000000
-#define mmCP_DMA_ME_CONTROL_DEFAULT 0x00000000
-#define mmCP_COHER_BASE_HI_DEFAULT 0x00000000
-#define mmCP_COHER_START_DELAY_DEFAULT 0x00000020
-#define mmCP_COHER_CNTL_DEFAULT 0x00000000
-#define mmCP_COHER_SIZE_DEFAULT 0x00000000
-#define mmCP_COHER_BASE_DEFAULT 0x00000000
-#define mmCP_COHER_STATUS_DEFAULT 0x00000000
-#define mmCP_DMA_ME_SRC_ADDR_DEFAULT 0x00000000
-#define mmCP_DMA_ME_SRC_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DMA_ME_DST_ADDR_DEFAULT 0x00000000
-#define mmCP_DMA_ME_DST_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DMA_ME_COMMAND_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_SRC_ADDR_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_SRC_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_DST_ADDR_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_DST_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DMA_PFP_COMMAND_DEFAULT 0x00000000
-#define mmCP_DMA_CNTL_DEFAULT 0x00080030
-#define mmCP_DMA_READ_TAGS_DEFAULT 0x00000000
-#define mmCP_COHER_SIZE_HI_DEFAULT 0x00000000
-#define mmCP_PFP_IB_CONTROL_DEFAULT 0x00000000
-#define mmCP_PFP_LOAD_CONTROL_DEFAULT 0x00000000
-#define mmCP_SCRATCH_INDEX_DEFAULT 0x00000000
-#define mmCP_SCRATCH_DATA_DEFAULT 0x00000000
-#define mmCP_RB_OFFSET_DEFAULT 0x00000000
-#define mmCP_IB1_OFFSET_DEFAULT 0x00000000
-#define mmCP_IB2_OFFSET_DEFAULT 0x00000000
-#define mmCP_IB1_PREAMBLE_BEGIN_DEFAULT 0x00000000
-#define mmCP_IB1_PREAMBLE_END_DEFAULT 0x00000000
-#define mmCP_IB2_PREAMBLE_BEGIN_DEFAULT 0x00000000
-#define mmCP_IB2_PREAMBLE_END_DEFAULT 0x00000000
-#define mmCP_CE_IB1_OFFSET_DEFAULT 0x00000000
-#define mmCP_CE_IB2_OFFSET_DEFAULT 0x00000000
-#define mmCP_CE_COUNTER_DEFAULT 0x00000000
-#define mmCP_CE_RB_OFFSET_DEFAULT 0x00000000
-#define mmCP_CE_INIT_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_CE_IB1_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_CE_IB2_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_IB1_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_IB2_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_ST_CMD_BUFSZ_DEFAULT 0x00000000
-#define mmCP_CE_INIT_BASE_LO_DEFAULT 0x00000000
-#define mmCP_CE_INIT_BASE_HI_DEFAULT 0x00000000
-#define mmCP_CE_INIT_BUFSZ_DEFAULT 0x00000000
-#define mmCP_CE_IB1_BASE_LO_DEFAULT 0x00000000
-#define mmCP_CE_IB1_BASE_HI_DEFAULT 0x00000000
-#define mmCP_CE_IB1_BUFSZ_DEFAULT 0x00000000
-#define mmCP_CE_IB2_BASE_LO_DEFAULT 0x00000000
-#define mmCP_CE_IB2_BASE_HI_DEFAULT 0x00000000
-#define mmCP_CE_IB2_BUFSZ_DEFAULT 0x00000000
-#define mmCP_IB1_BASE_LO_DEFAULT 0x00000000
-#define mmCP_IB1_BASE_HI_DEFAULT 0x00000000
-#define mmCP_IB1_BUFSZ_DEFAULT 0x00000000
-#define mmCP_IB2_BASE_LO_DEFAULT 0x00000000
-#define mmCP_IB2_BASE_HI_DEFAULT 0x00000000
-#define mmCP_IB2_BUFSZ_DEFAULT 0x00000000
-#define mmCP_ST_BASE_LO_DEFAULT 0x00000000
-#define mmCP_ST_BASE_HI_DEFAULT 0x00000000
-#define mmCP_ST_BUFSZ_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_EVENT_CNTL_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_DATA_CNTL_DEFAULT 0x00000000
-#define mmCP_EOP_DONE_CNTX_ID_DEFAULT 0x00000000
-#define mmCP_PFP_COMPLETION_STATUS_DEFAULT 0x00000000
-#define mmCP_CE_COMPLETION_STATUS_DEFAULT 0x00000000
-#define mmCP_PRED_NOT_VISIBLE_DEFAULT 0x00000000
-#define mmCP_PFP_METADATA_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_PFP_METADATA_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_CE_METADATA_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_CE_METADATA_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DRAW_INDX_INDR_ADDR_DEFAULT 0x00000000
-#define mmCP_DRAW_INDX_INDR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_DISPATCH_INDR_ADDR_DEFAULT 0x00000000
-#define mmCP_DISPATCH_INDR_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_INDEX_BASE_ADDR_DEFAULT 0x00000000
-#define mmCP_INDEX_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_INDEX_TYPE_DEFAULT 0x00000000
-#define mmCP_GDS_BKUP_ADDR_DEFAULT 0x00000000
-#define mmCP_GDS_BKUP_ADDR_HI_DEFAULT 0x00000000
-#define mmCP_SAMPLE_STATUS_DEFAULT 0x00000000
-#define mmCP_ME_COHER_CNTL_DEFAULT 0x00000000
-#define mmCP_ME_COHER_SIZE_DEFAULT 0x00000000
-#define mmCP_ME_COHER_SIZE_HI_DEFAULT 0x00000000
-#define mmCP_ME_COHER_BASE_DEFAULT 0x00000000
-#define mmCP_ME_COHER_BASE_HI_DEFAULT 0x00000000
-#define mmCP_ME_COHER_STATUS_DEFAULT 0x00000000
-#define mmRLC_GPM_PERF_COUNT_0_DEFAULT 0x00000000
-#define mmRLC_GPM_PERF_COUNT_1_DEFAULT 0x00000000
-#define mmGRBM_GFX_INDEX_DEFAULT 0xe0000000
-#define mmVGT_GSVS_RING_SIZE_DEFAULT 0x00000000
-#define mmVGT_PRIMITIVE_TYPE_DEFAULT 0x00000000
-#define mmVGT_INDEX_TYPE_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_0_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_1_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_2_DEFAULT 0x00000000
-#define mmVGT_STRMOUT_BUFFER_FILLED_SIZE_3_DEFAULT 0x00000000
-#define mmVGT_MAX_VTX_INDX_DEFAULT 0x00000000
-#define mmVGT_MIN_VTX_INDX_DEFAULT 0x00000000
-#define mmVGT_INDX_OFFSET_DEFAULT 0x00000000
-#define mmVGT_MULTI_PRIM_IB_RESET_EN_DEFAULT 0x00000000
-#define mmVGT_NUM_INDICES_DEFAULT 0x00000000
-#define mmVGT_NUM_INSTANCES_DEFAULT 0x00000000
-#define mmVGT_TF_RING_SIZE_DEFAULT 0x00002000
-#define mmVGT_HS_OFFCHIP_PARAM_DEFAULT 0x00000000
-#define mmVGT_TF_MEMORY_BASE_DEFAULT 0x00000000
-#define mmVGT_TF_MEMORY_BASE_HI_DEFAULT 0x00000000
-#define mmWD_POS_BUF_BASE_DEFAULT 0x00000000
-#define mmWD_POS_BUF_BASE_HI_DEFAULT 0x00000000
-#define mmWD_CNTL_SB_BUF_BASE_DEFAULT 0x00000000
-#define mmWD_CNTL_SB_BUF_BASE_HI_DEFAULT 0x00000000
-#define mmWD_INDEX_BUF_BASE_DEFAULT 0x00000000
-#define mmWD_INDEX_BUF_BASE_HI_DEFAULT 0x00000000
-#define mmIA_MULTI_VGT_PARAM_DEFAULT 0x006000ff
-#define mmVGT_OBJECT_ID_DEFAULT 0x00000000
-#define mmVGT_INSTANCE_BASE_ID_DEFAULT 0x00000000
-#define mmPA_SU_LINE_STIPPLE_VALUE_DEFAULT 0x00000000
-#define mmPA_SC_LINE_STIPPLE_STATE_DEFAULT 0x00000000
-#define mmPA_SC_SCREEN_EXTENT_MIN_0_DEFAULT 0x7fff7fff
-#define mmPA_SC_SCREEN_EXTENT_MAX_0_DEFAULT 0x80008000
-#define mmPA_SC_SCREEN_EXTENT_MIN_1_DEFAULT 0x7fff7fff
-#define mmPA_SC_SCREEN_EXTENT_MAX_1_DEFAULT 0x80008000
-#define mmPA_SC_P3D_TRAP_SCREEN_HV_EN_DEFAULT 0x00000000
-#define mmPA_SC_P3D_TRAP_SCREEN_H_DEFAULT 0x00000000
-#define mmPA_SC_P3D_TRAP_SCREEN_V_DEFAULT 0x00000000
-#define mmPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_DEFAULT 0x00000000
-#define mmPA_SC_P3D_TRAP_SCREEN_COUNT_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_HV_EN_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_H_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_V_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_DEFAULT 0x00000000
-#define mmPA_SC_HP3D_TRAP_SCREEN_COUNT_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_HV_EN_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_H_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_V_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_OCCURRENCE_DEFAULT 0x00000000
-#define mmPA_SC_TRAP_SCREEN_COUNT_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_BASE_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_SIZE_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_MASK_DEFAULT 0x0000cf80
-#define mmSQ_THREAD_TRACE_TOKEN_MASK_DEFAULT 0x00ffffff
-#define mmSQ_THREAD_TRACE_PERF_MASK_DEFAULT 0xffffffff
-#define mmSQ_THREAD_TRACE_CTRL_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_MODE_DEFAULT 0x02049249
-#define mmSQ_THREAD_TRACE_BASE2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_TOKEN_MASK2_DEFAULT 0xffffffff
-#define mmSQ_THREAD_TRACE_WPTR_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_STATUS_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_HIWATER_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_CNTR_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_USERDATA_0_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_USERDATA_1_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_USERDATA_2_DEFAULT 0x00000000
-#define mmSQ_THREAD_TRACE_USERDATA_3_DEFAULT 0x00000000
-#define mmSQC_CACHES_DEFAULT 0x00000000
-#define mmSQC_WRITEBACK_DEFAULT 0x00000000
-#define mmTA_CS_BC_BASE_ADDR_DEFAULT 0x00000000
-#define mmTA_CS_BC_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_UCONFIG_DEFAULT 0x40000040
-#define mmDB_OCCLUSION_COUNT0_LOW_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT0_HI_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT1_LOW_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT1_HI_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT2_LOW_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT2_HI_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT3_LOW_DEFAULT 0x00000000
-#define mmDB_OCCLUSION_COUNT3_HI_DEFAULT 0x00000000
-#define mmDB_ZPASS_COUNT_LOW_DEFAULT 0x00000000
-#define mmDB_ZPASS_COUNT_HI_DEFAULT 0x00000000
-#define mmGDS_RD_ADDR_DEFAULT 0x00000000
-#define mmGDS_RD_DATA_DEFAULT 0x00000000
-#define mmGDS_RD_BURST_ADDR_DEFAULT 0x00000000
-#define mmGDS_RD_BURST_COUNT_DEFAULT 0x00000000
-#define mmGDS_RD_BURST_DATA_DEFAULT 0x00000000
-#define mmGDS_WR_ADDR_DEFAULT 0x00000000
-#define mmGDS_WR_DATA_DEFAULT 0x00000000
-#define mmGDS_WR_BURST_ADDR_DEFAULT 0x00000000
-#define mmGDS_WR_BURST_DATA_DEFAULT 0x00000000
-#define mmGDS_WRITE_COMPLETE_DEFAULT 0x00000000
-#define mmGDS_ATOM_CNTL_DEFAULT 0x00000000
-#define mmGDS_ATOM_COMPLETE_DEFAULT 0x00000001
-#define mmGDS_ATOM_BASE_DEFAULT 0x00000000
-#define mmGDS_ATOM_SIZE_DEFAULT 0x00000000
-#define mmGDS_ATOM_OFFSET0_DEFAULT 0x00000000
-#define mmGDS_ATOM_OFFSET1_DEFAULT 0x00000000
-#define mmGDS_ATOM_DST_DEFAULT 0x00000000
-#define mmGDS_ATOM_OP_DEFAULT 0x00000000
-#define mmGDS_ATOM_SRC0_DEFAULT 0x00000000
-#define mmGDS_ATOM_SRC0_U_DEFAULT 0x00000000
-#define mmGDS_ATOM_SRC1_DEFAULT 0x00000000
-#define mmGDS_ATOM_SRC1_U_DEFAULT 0x00000000
-#define mmGDS_ATOM_READ0_DEFAULT 0x00000000
-#define mmGDS_ATOM_READ0_U_DEFAULT 0x00000000
-#define mmGDS_ATOM_READ1_DEFAULT 0x00000000
-#define mmGDS_ATOM_READ1_U_DEFAULT 0x00000000
-#define mmGDS_GWS_RESOURCE_CNTL_DEFAULT 0x00000000
-#define mmGDS_GWS_RESOURCE_DEFAULT 0x00000000
-#define mmGDS_GWS_RESOURCE_CNT_DEFAULT 0x00000000
-#define mmGDS_OA_CNTL_DEFAULT 0x00000000
-#define mmGDS_OA_COUNTER_DEFAULT 0x00000000
-#define mmGDS_OA_ADDRESS_DEFAULT 0x00000000
-#define mmGDS_OA_INCDEC_DEFAULT 0x00000000
-#define mmGDS_OA_RING_SIZE_DEFAULT 0x00000000
-#define mmSPI_CONFIG_CNTL_DEFAULT 0x0062c688
-#define mmSPI_CONFIG_CNTL_1_DEFAULT 0x01000104
-#define mmSPI_CONFIG_CNTL_2_DEFAULT 0x00000011
-
-
-// addressBlock: gc_perfddec
-#define mmCPG_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmCPG_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmCPG_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmCPG_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmCPC_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmCPC_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmCPC_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmCPC_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmCPF_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmCPF_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmCPF_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmCPF_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmCPF_LATENCY_STATS_DATA_DEFAULT 0x00000000
-#define mmCPG_LATENCY_STATS_DATA_DEFAULT 0x00000000
-#define mmCPC_LATENCY_STATS_DATA_DEFAULT 0x00000000
-#define mmGRBM_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmGRBM_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmGRBM_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmGRBM_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmGRBM_SE0_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmGRBM_SE0_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmGRBM_SE1_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmGRBM_SE1_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmGRBM_SE2_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmGRBM_SE2_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmGRBM_SE3_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmGRBM_SE3_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER4_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER4_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER5_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER5_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER6_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER6_HI_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER7_LO_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER7_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER4_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER4_LO_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER5_HI_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER5_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER4_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER4_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER5_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER5_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER6_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER6_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER7_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER7_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER8_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER8_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER9_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER9_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER10_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER10_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER11_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER11_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER12_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER12_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER13_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER13_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER14_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER14_HI_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER15_LO_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER15_HI_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmTCC_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmTCA_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER3_HI_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER0_LO_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER0_HI_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER1_LO_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER1_HI_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER2_LO_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER2_HI_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER3_LO_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER3_HI_DEFAULT 0x00000000
-
-
-// addressBlock: gc_utcl2_atcl2pfcntrdec
-#define mmATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
-
-
-// addressBlock: gc_utcl2_vml2prdec
-#define mmMC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
-
-
-// addressBlock: gc_perfsdec
-#define mmCPG_PERFCOUNTER1_SELECT_DEFAULT 0x11000401
-#define mmCPG_PERFCOUNTER0_SELECT1_DEFAULT 0x11000401
-#define mmCPG_PERFCOUNTER0_SELECT_DEFAULT 0x11000401
-#define mmCPC_PERFCOUNTER1_SELECT_DEFAULT 0x11000401
-#define mmCPC_PERFCOUNTER0_SELECT1_DEFAULT 0x11000401
-#define mmCPF_PERFCOUNTER1_SELECT_DEFAULT 0x11000401
-#define mmCPF_PERFCOUNTER0_SELECT1_DEFAULT 0x11000401
-#define mmCPF_PERFCOUNTER0_SELECT_DEFAULT 0x11000401
-#define mmCP_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmCPC_PERFCOUNTER0_SELECT_DEFAULT 0x11000401
-#define mmCPF_TC_PERF_COUNTER_WINDOW_SELECT_DEFAULT 0x00000000
-#define mmCPG_TC_PERF_COUNTER_WINDOW_SELECT_DEFAULT 0x00000000
-#define mmCPF_LATENCY_STATS_SELECT_DEFAULT 0x00000000
-#define mmCPG_LATENCY_STATS_SELECT_DEFAULT 0x00000000
-#define mmCPC_LATENCY_STATS_SELECT_DEFAULT 0x00000000
-#define mmCP_DRAW_OBJECT_DEFAULT 0x00000000
-#define mmCP_DRAW_OBJECT_COUNTER_DEFAULT 0x00000000
-#define mmCP_DRAW_WINDOW_MASK_HI_DEFAULT 0x00000000
-#define mmCP_DRAW_WINDOW_HI_DEFAULT 0x00000000
-#define mmCP_DRAW_WINDOW_LO_DEFAULT 0x00000000
-#define mmCP_DRAW_WINDOW_CNTL_DEFAULT 0x00000007
-#define mmGRBM_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmGRBM_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmGRBM_SE0_PERFCOUNTER_SELECT_DEFAULT 0x00000000
-#define mmGRBM_SE1_PERFCOUNTER_SELECT_DEFAULT 0x00000000
-#define mmGRBM_SE2_PERFCOUNTER_SELECT_DEFAULT 0x00000000
-#define mmGRBM_SE3_PERFCOUNTER_SELECT_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmWD_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmIA_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER1_SELECT1_DEFAULT 0x00000000
-#define mmVGT_PERFCOUNTER_SEID_MASK_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER1_SELECT1_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmPA_SU_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER4_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER5_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER6_SELECT_DEFAULT 0x00000000
-#define mmPA_SC_PERFCOUNTER7_SELECT_DEFAULT 0x00000000
-#define mmSPI_PERFCOUNTER0_SELECT_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER1_SELECT_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER2_SELECT_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER3_SELECT_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER0_SELECT1_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER1_SELECT1_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER2_SELECT1_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER3_SELECT1_DEFAULT 0x000fffff
-#define mmSPI_PERFCOUNTER4_SELECT_DEFAULT 0x000000ff
-#define mmSPI_PERFCOUNTER5_SELECT_DEFAULT 0x000000ff
-#define mmSPI_PERFCOUNTER_BINS_DEFAULT 0xfcb87430
-#define mmSQ_PERFCOUNTER0_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER1_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER2_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER3_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER4_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER5_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER6_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER7_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER8_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER9_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER10_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER11_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER12_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER13_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER14_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER15_SELECT_DEFAULT 0x0f0ff000
-#define mmSQ_PERFCOUNTER_CTRL_DEFAULT 0x00000000
-#define mmSQ_PERFCOUNTER_MASK_DEFAULT 0xffffffff
-#define mmSQ_PERFCOUNTER_CTRL2_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmSX_PERFCOUNTER1_SELECT1_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmGDS_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmTA_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmTD_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmTCP_PERFCOUNTER0_SELECT_DEFAULT 0x000fffff
-#define mmTCP_PERFCOUNTER0_SELECT1_DEFAULT 0x000fffff
-#define mmTCP_PERFCOUNTER1_SELECT_DEFAULT 0x000fffff
-#define mmTCP_PERFCOUNTER1_SELECT1_DEFAULT 0x000fffff
-#define mmTCP_PERFCOUNTER2_SELECT_DEFAULT 0x000003ff
-#define mmTCP_PERFCOUNTER3_SELECT_DEFAULT 0x000003ff
-#define mmTCC_PERFCOUNTER0_SELECT_DEFAULT 0x000fffff
-#define mmTCC_PERFCOUNTER0_SELECT1_DEFAULT 0x000fffff
-#define mmTCC_PERFCOUNTER1_SELECT_DEFAULT 0x000fffff
-#define mmTCC_PERFCOUNTER1_SELECT1_DEFAULT 0x000fffff
-#define mmTCC_PERFCOUNTER2_SELECT_DEFAULT 0x000003ff
-#define mmTCC_PERFCOUNTER3_SELECT_DEFAULT 0x000003ff
-#define mmTCA_PERFCOUNTER0_SELECT_DEFAULT 0x000fffff
-#define mmTCA_PERFCOUNTER0_SELECT1_DEFAULT 0x000fffff
-#define mmTCA_PERFCOUNTER1_SELECT_DEFAULT 0x000fffff
-#define mmTCA_PERFCOUNTER1_SELECT1_DEFAULT 0x000fffff
-#define mmTCA_PERFCOUNTER2_SELECT_DEFAULT 0x000003ff
-#define mmTCA_PERFCOUNTER3_SELECT_DEFAULT 0x000003ff
-#define mmCB_PERFCOUNTER_FILTER_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmCB_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER1_SELECT1_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmDB_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmRLC_SPM_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmRLC_SPM_PERFMON_RING_BASE_LO_DEFAULT 0x00000000
-#define mmRLC_SPM_PERFMON_RING_BASE_HI_DEFAULT 0x00000000
-#define mmRLC_SPM_PERFMON_RING_SIZE_DEFAULT 0x00000000
-#define mmRLC_SPM_PERFMON_SEGMENT_SIZE_DEFAULT 0x00000000
-#define mmRLC_SPM_SE_MUXSEL_ADDR_DEFAULT 0x00000000
-#define mmRLC_SPM_SE_MUXSEL_DATA_DEFAULT 0x00000000
-#define mmRLC_SPM_CPG_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_CPC_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_CPF_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_CB_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_DB_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_PA_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_GDS_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_IA_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_SC_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_TCC_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_TCA_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_TCP_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_TA_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_TD_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_VGT_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_SPI_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_SQG_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_SX_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_SPM_GLOBAL_MUXSEL_ADDR_DEFAULT 0x00000000
-#define mmRLC_SPM_GLOBAL_MUXSEL_DATA_DEFAULT 0x00000000
-#define mmRLC_SPM_RING_RDPTR_DEFAULT 0x00000000
-#define mmRLC_SPM_SEGMENT_THRESHOLD_DEFAULT 0x00000000
-#define mmRLC_SPM_RMI_PERFMON_SAMPLE_DELAY_DEFAULT 0x00000000
-#define mmRLC_PERFMON_CLK_CNTL_DEFAULT 0x00000001
-#define mmRLC_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmRLC_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_PERF_CNT_CNTL_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_PERF_CNT_WR_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_PERF_CNT_WR_DATA_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_PERF_CNT_RD_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_PERF_CNT_RD_DATA_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER0_SELECT_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER0_SELECT1_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER1_SELECT_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER2_SELECT_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER2_SELECT1_DEFAULT 0x00000000
-#define mmRMI_PERFCOUNTER3_SELECT_DEFAULT 0x00000000
-#define mmRMI_PERF_COUNTER_CNTL_DEFAULT 0x00080240
-
-
-// addressBlock: gc_utcl2_atcl2pfcntldec
-#define mmATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-
-// addressBlock: gc_utcl2_vml2pldec
-#define mmMC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-
-// addressBlock: gc_rlcpdec
-#define mmRLC_CNTL_DEFAULT 0x00000001
-#define mmRLC_STAT_DEFAULT 0x00000000
-#define mmRLC_SAFE_MODE_DEFAULT 0x00000000
-#define mmRLC_MEM_SLP_CNTL_DEFAULT 0x00020200
-#define mmSMU_RLC_RESPONSE_DEFAULT 0x00000000
-#define mmRLC_RLCV_SAFE_MODE_DEFAULT 0x00000000
-#define mmRLC_SMU_SAFE_MODE_DEFAULT 0x00000000
-#define mmRLC_RLCV_COMMAND_DEFAULT 0x00000000
-#define mmRLC_REFCLOCK_TIMESTAMP_LSB_DEFAULT 0x00000000
-#define mmRLC_REFCLOCK_TIMESTAMP_MSB_DEFAULT 0x00000000
-#define mmRLC_GPM_TIMER_INT_0_DEFAULT 0x00000000
-#define mmRLC_GPM_TIMER_INT_1_DEFAULT 0x00000000
-#define mmRLC_GPM_TIMER_INT_2_DEFAULT 0x00000000
-#define mmRLC_GPM_TIMER_CTRL_DEFAULT 0x00000000
-#define mmRLC_LB_CNTR_MAX_DEFAULT 0xffffffff
-#define mmRLC_GPM_TIMER_STAT_DEFAULT 0x00000000
-#define mmRLC_GPM_TIMER_INT_3_DEFAULT 0x00000000
-#define mmRLC_SERDES_WR_NONCU_MASTER_MASK_1_DEFAULT 0x00000000
-#define mmRLC_SERDES_NONCU_MASTER_BUSY_1_DEFAULT 0x00000000
-#define mmRLC_INT_STAT_DEFAULT 0x00000000
-#define mmRLC_LB_CNTL_DEFAULT 0x00000010
-#define mmRLC_MGCG_CTRL_DEFAULT 0x00018800
-#define mmRLC_LB_CNTR_INIT_DEFAULT 0x00000000
-#define mmRLC_LOAD_BALANCE_CNTR_DEFAULT 0x00000000
-#define mmRLC_JUMP_TABLE_RESTORE_DEFAULT 0x00000000
-#define mmRLC_PG_DELAY_2_DEFAULT 0x00000004
-#define mmRLC_GPU_CLOCK_COUNT_LSB_DEFAULT 0x00000000
-#define mmRLC_GPU_CLOCK_COUNT_MSB_DEFAULT 0x00000000
-#define mmRLC_CAPTURE_GPU_CLOCK_COUNT_DEFAULT 0x00000000
-#define mmRLC_UCODE_CNTL_DEFAULT 0x00000000
-#define mmRLC_GPM_THREAD_RESET_DEFAULT 0x0000000f
-#define mmRLC_GPM_CP_DMA_COMPLETE_T0_DEFAULT 0x00000000
-#define mmRLC_GPM_CP_DMA_COMPLETE_T1_DEFAULT 0x00000000
-#define mmRLC_FIREWALL_VIOLATION_DEFAULT 0x00000000
-#define mmRLC_GPM_STAT_DEFAULT 0x00100016
-#define mmRLC_GPU_CLOCK_32_RES_SEL_DEFAULT 0x00000000
-#define mmRLC_GPU_CLOCK_32_DEFAULT 0x00000000
-#define mmRLC_PG_CNTL_DEFAULT 0x00000000
-#define mmRLC_GPM_THREAD_PRIORITY_DEFAULT 0x08080808
-#define mmRLC_GPM_THREAD_ENABLE_DEFAULT 0x00000001
-#define mmRLC_CGTT_MGCG_OVERRIDE_DEFAULT 0xffffffff
-#define mmRLC_CGCG_CGLS_CTRL_DEFAULT 0x0001003c
-#define mmRLC_CGCG_RAMP_CTRL_DEFAULT 0x00021711
-#define mmRLC_DYN_PG_STATUS_DEFAULT 0xffffffff
-#define mmRLC_DYN_PG_REQUEST_DEFAULT 0xffffffff
-#define mmRLC_PG_DELAY_DEFAULT 0x00101010
-#define mmRLC_CU_STATUS_DEFAULT 0x00000000
-#define mmRLC_LB_INIT_CU_MASK_DEFAULT 0xffffffff
-#define mmRLC_LB_ALWAYS_ACTIVE_CU_MASK_DEFAULT 0x00000001
-#define mmRLC_LB_PARAMS_DEFAULT 0x00601008
-#define mmRLC_THREAD1_DELAY_DEFAULT 0x00400401
-#define mmRLC_PG_ALWAYS_ON_CU_MASK_DEFAULT 0x00000003
-#define mmRLC_MAX_PG_CU_DEFAULT 0x0000000b
-#define mmRLC_AUTO_PG_CTRL_DEFAULT 0x00000000
-#define mmRLC_SMU_GRBM_REG_SAVE_CTRL_DEFAULT 0x00000000
-#define mmRLC_SERDES_RD_MASTER_INDEX_DEFAULT 0x00000000
-#define mmRLC_SERDES_RD_DATA_0_DEFAULT 0x00000000
-#define mmRLC_SERDES_RD_DATA_1_DEFAULT 0x00000000
-#define mmRLC_SERDES_RD_DATA_2_DEFAULT 0x00000000
-#define mmRLC_SERDES_WR_CU_MASTER_MASK_DEFAULT 0x00000000
-#define mmRLC_SERDES_WR_NONCU_MASTER_MASK_DEFAULT 0x00000000
-#define mmRLC_SERDES_WR_CTRL_DEFAULT 0x00000000
-#define mmRLC_SERDES_WR_DATA_DEFAULT 0x00000000
-#define mmRLC_SERDES_CU_MASTER_BUSY_DEFAULT 0x00000000
-#define mmRLC_SERDES_NONCU_MASTER_BUSY_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_0_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_1_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_2_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_3_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_4_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_5_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_6_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_7_DEFAULT 0x00000000
-#define mmRLC_GPM_SCRATCH_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPM_SCRATCH_DATA_DEFAULT 0x00000000
-#define mmRLC_STATIC_PG_STATUS_DEFAULT 0xffffffff
-#define mmRLC_SPM_MC_CNTL_DEFAULT 0x00000000
-#define mmRLC_SPM_INT_CNTL_DEFAULT 0x00000000
-#define mmRLC_SPM_INT_STATUS_DEFAULT 0x00000000
-#define mmRLC_SMU_MESSAGE_DEFAULT 0x00000000
-#define mmRLC_GPM_LOG_SIZE_DEFAULT 0x00000000
-#define mmRLC_PG_DELAY_3_DEFAULT 0x00000000
-#define mmRLC_GPR_REG1_DEFAULT 0x00000000
-#define mmRLC_GPR_REG2_DEFAULT 0x00000000
-#define mmRLC_GPM_LOG_CONT_DEFAULT 0x00000000
-#define mmRLC_GPM_INT_DISABLE_TH0_DEFAULT 0x00000000
-#define mmRLC_GPM_INT_DISABLE_TH1_DEFAULT 0x00000000
-#define mmRLC_GPM_INT_FORCE_TH0_DEFAULT 0x00000000
-#define mmRLC_GPM_INT_FORCE_TH1_DEFAULT 0x00000000
-#define mmRLC_SRM_CNTL_DEFAULT 0x00000002
-#define mmRLC_SRM_ARAM_ADDR_DEFAULT 0x00000000
-#define mmRLC_SRM_ARAM_DATA_DEFAULT 0x00000000
-#define mmRLC_SRM_DRAM_ADDR_DEFAULT 0x00000000
-#define mmRLC_SRM_DRAM_DATA_DEFAULT 0x00000000
-#define mmRLC_SRM_GPM_COMMAND_DEFAULT 0x00000000
-#define mmRLC_SRM_GPM_COMMAND_STATUS_DEFAULT 0x00000000
-#define mmRLC_SRM_RLCV_COMMAND_DEFAULT 0x00000000
-#define mmRLC_SRM_RLCV_COMMAND_STATUS_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_0_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_1_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_2_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_3_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_4_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_5_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_6_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_ADDR_7_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_0_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_1_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_2_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_3_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_4_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_5_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_6_DEFAULT 0x00000000
-#define mmRLC_SRM_INDEX_CNTL_DATA_7_DEFAULT 0x00000000
-#define mmRLC_SRM_STAT_DEFAULT 0x00000000
-#define mmRLC_SRM_GPM_ABORT_DEFAULT 0x00000000
-#define mmRLC_CSIB_ADDR_LO_DEFAULT 0x00000000
-#define mmRLC_CSIB_ADDR_HI_DEFAULT 0x00000000
-#define mmRLC_CSIB_LENGTH_DEFAULT 0x00000000
-#define mmRLC_SMU_COMMAND_DEFAULT 0x00000000
-#define mmRLC_CP_SCHEDULERS_DEFAULT 0x58504840
-#define mmRLC_SMU_ARGUMENT_1_DEFAULT 0x00000000
-#define mmRLC_SMU_ARGUMENT_2_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_8_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_9_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_10_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_11_DEFAULT 0x00000000
-#define mmRLC_GPM_GENERAL_12_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_CNTL_0_DEFAULT 0x00000080
-#define mmRLC_GPM_UTCL1_CNTL_1_DEFAULT 0x00000080
-#define mmRLC_GPM_UTCL1_CNTL_2_DEFAULT 0x00000080
-#define mmRLC_SPM_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmRLC_UTCL1_STATUS_2_DEFAULT 0x00000000
-#define mmRLC_LB_THR_CONFIG_2_DEFAULT 0x00000000
-#define mmRLC_LB_THR_CONFIG_3_DEFAULT 0x00000000
-#define mmRLC_LB_THR_CONFIG_4_DEFAULT 0x00000000
-#define mmRLC_SPM_UTCL1_ERROR_1_DEFAULT 0x00000000
-#define mmRLC_SPM_UTCL1_ERROR_2_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH0_ERROR_1_DEFAULT 0x00000000
-#define mmRLC_LB_THR_CONFIG_1_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH0_ERROR_2_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH1_ERROR_1_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH1_ERROR_2_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH2_ERROR_1_DEFAULT 0x00000000
-#define mmRLC_GPM_UTCL1_TH2_ERROR_2_DEFAULT 0x00000000
-#define mmRLC_CGCG_CGLS_CTRL_3D_DEFAULT 0x0001003c
-#define mmRLC_CGCG_RAMP_CTRL_3D_DEFAULT 0x00021711
-#define mmRLC_SEMAPHORE_0_DEFAULT 0x00000000
-#define mmRLC_SEMAPHORE_1_DEFAULT 0x00000000
-#define mmRLC_CP_EOF_INT_DEFAULT 0x00000000
-#define mmRLC_CP_EOF_INT_CNT_DEFAULT 0x00000000
-#define mmRLC_SPARE_INT_DEFAULT 0x00000000
-#define mmRLC_PREWALKER_UTCL1_CNTL_DEFAULT 0x00000080
-#define mmRLC_PREWALKER_UTCL1_TRIG_DEFAULT 0x00000000
-#define mmRLC_PREWALKER_UTCL1_ADDR_LSB_DEFAULT 0x00000000
-#define mmRLC_PREWALKER_UTCL1_ADDR_MSB_DEFAULT 0x00000000
-#define mmRLC_PREWALKER_UTCL1_SIZE_LSB_DEFAULT 0x00000000
-#define mmRLC_PREWALKER_UTCL1_SIZE_MSB_DEFAULT 0x00000000
-#define mmRLC_DSM_TRIG_DEFAULT 0x00000000
-#define mmRLC_UTCL1_STATUS_DEFAULT 0x00000000
-#define mmRLC_R2I_CNTL_0_DEFAULT 0x00000000
-#define mmRLC_R2I_CNTL_1_DEFAULT 0x00000000
-#define mmRLC_R2I_CNTL_2_DEFAULT 0x00000000
-#define mmRLC_R2I_CNTL_3_DEFAULT 0x00000000
-#define mmRLC_UTCL2_CNTL_DEFAULT 0x00000000
-#define mmRLC_LBPW_CU_STAT_DEFAULT 0x00000000
-#define mmRLC_DS_CNTL_DEFAULT 0x00030003
-#define mmRLC_RLCV_SPARE_INT_DEFAULT 0x00000000
-
-
-// addressBlock: gc_pwrdec
-#define mmCGTS_SM_CTRL_REG_DEFAULT 0x00600200
-#define mmCGTS_RD_CTRL_REG_DEFAULT 0x00000000
-#define mmCGTS_RD_REG_DEFAULT 0x00000000
-#define mmCGTS_TCC_DISABLE_DEFAULT 0x00000000
-#define mmCGTS_USER_TCC_DISABLE_DEFAULT 0x00000000
-#define mmCGTS_CU0_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU0_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU0_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU0_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU0_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU1_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU1_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU1_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU1_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU1_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU2_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU2_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU2_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU2_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU2_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU3_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU3_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU3_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU3_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU3_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU4_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU4_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU4_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU4_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU4_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU5_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU5_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU5_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU5_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU5_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU6_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU6_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU6_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU6_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU6_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU7_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU7_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU7_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU7_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU7_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU8_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU8_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU8_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU8_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU8_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU9_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU9_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU9_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU9_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU9_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU10_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU10_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU10_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU10_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU10_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU11_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU11_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU11_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU11_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU11_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU12_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU12_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU12_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU12_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU12_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU13_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU13_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU13_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU13_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU13_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU14_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU14_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU14_TA_SQC_CTRL_REG_DEFAULT 0x00000007
-#define mmCGTS_CU14_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU14_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU15_SP0_CTRL_REG_DEFAULT 0x00010000
-#define mmCGTS_CU15_LDS_SQ_CTRL_REG_DEFAULT 0x00030002
-#define mmCGTS_CU15_TA_SQC_CTRL_REG_DEFAULT 0x00040007
-#define mmCGTS_CU15_SP1_CTRL_REG_DEFAULT 0x00060005
-#define mmCGTS_CU15_TD_TCP_CTRL_REG_DEFAULT 0x00090008
-#define mmCGTS_CU0_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU1_TCPI_CTRL_REG_DEFAULT 0x00000001
-#define mmCGTS_CU2_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU3_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU4_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU5_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU6_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU7_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU8_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU9_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU10_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU11_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU12_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU13_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU14_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTS_CU15_TCPI_CTRL_REG_DEFAULT 0x0000000a
-#define mmCGTT_SPI_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_PC_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_BCI_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_VGT_CLK_CTRL_DEFAULT 0x00018100
-#define mmCGTT_IA_CLK_CTRL_DEFAULT 0x06000100
-#define mmCGTT_WD_CLK_CTRL_DEFAULT 0x00018100
-#define mmCGTT_PA_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_SC_CLK_CTRL0_DEFAULT 0x00000100
-#define mmCGTT_SC_CLK_CTRL1_DEFAULT 0x00000100
-#define mmCGTT_SQ_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_SQG_CLK_CTRL_DEFAULT 0x00000100
-#define mmSQ_ALU_CLK_CTRL_DEFAULT 0x00000000
-#define mmSQ_TEX_CLK_CTRL_DEFAULT 0x00000000
-#define mmSQ_LDS_CLK_CTRL_DEFAULT 0x00000000
-#define mmSQ_POWER_THROTTLE_DEFAULT 0x3fff3fff
-#define mmSQ_POWER_THROTTLE2_DEFAULT 0x18800004
-#define mmCGTT_SX_CLK_CTRL0_DEFAULT 0x00000100
-#define mmCGTT_SX_CLK_CTRL1_DEFAULT 0x00000100
-#define mmCGTT_SX_CLK_CTRL2_DEFAULT 0x00000100
-#define mmCGTT_SX_CLK_CTRL3_DEFAULT 0x00000100
-#define mmCGTT_SX_CLK_CTRL4_DEFAULT 0x00000100
-#define mmTD_CGTT_CTRL_DEFAULT 0x00000100
-#define mmTA_CGTT_CTRL_DEFAULT 0x00000100
-#define mmCGTT_TCPI_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_TCI_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_GDS_CLK_CTRL_DEFAULT 0x00000100
-#define mmDB_CGTT_CLK_CTRL_0_DEFAULT 0x00000100
-#define mmCB_CGTT_SCLK_CTRL_DEFAULT 0x00000100
-#define mmTCC_CGTT_SCLK_CTRL_DEFAULT 0x00000100
-#define mmTCA_CGTT_SCLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_CP_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_CPF_CLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_CPC_CLK_CTRL_DEFAULT 0x00000100
-#define mmRLC_PWR_CTRL_DEFAULT 0x00000000
-#define mmCGTT_RLC_CLK_CTRL_DEFAULT 0x00000100
-#define mmRLC_GFX_RM_CNTL_DEFAULT 0x00000000
-#define mmRMI_CGTT_SCLK_CTRL_DEFAULT 0x00000100
-#define mmCGTT_TCPF_CLK_CTRL_DEFAULT 0x00000100
-
-
-// addressBlock: gc_ea_pwrdec
-#define mmGCEA_CGTT_CLK_CTRL_DEFAULT 0x00000100
-
-
-// addressBlock: gc_utcl2_vmsharedhvdec
-#define mmMC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000
-#define mmVM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100
-#define mmMC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000
-#define mmVM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000
-#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000
-#define mmUTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: gc_hypdec
-#define mmCP_HYP_PFP_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_PFP_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_HYP_PFP_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_PFP_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_HYP_ME_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_ME_RAM_RADDR_DEFAULT 0x00000000
-#define mmCP_ME_RAM_WADDR_DEFAULT 0x00000000
-#define mmCP_HYP_ME_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_ME_RAM_DATA_DEFAULT 0x00000000
-#define mmCP_CE_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_HYP_CE_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_CE_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_HYP_CE_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_HYP_MEC1_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_MEC_ME1_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_HYP_MEC1_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_MEC_ME1_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_HYP_MEC2_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_MEC_ME2_UCODE_ADDR_DEFAULT 0x00000000
-#define mmCP_HYP_MEC2_UCODE_DATA_DEFAULT 0x00000000
-#define mmCP_MEC_ME2_UCODE_DATA_DEFAULT 0x00000000
-#define mmRLC_GPM_UCODE_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPM_UCODE_DATA_DEFAULT 0x00000000
-#define mmGRBM_GFX_INDEX_SR_SELECT_DEFAULT 0x00000000
-#define mmGRBM_GFX_INDEX_SR_DATA_DEFAULT 0xe0000000
-#define mmGRBM_GFX_CNTL_SR_SELECT_DEFAULT 0x00000000
-#define mmGRBM_GFX_CNTL_SR_DATA_DEFAULT 0x00000000
-#define mmGRBM_CAM_INDEX_DEFAULT 0x00000000
-#define mmGRBM_HYP_CAM_INDEX_DEFAULT 0x00000000
-#define mmGRBM_CAM_DATA_DEFAULT 0x00000000
-#define mmGRBM_HYP_CAM_DATA_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VF_ENABLE_DEFAULT 0x00000000
-#define mmRLC_GFX_RM_CNTL_ADJ_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_CFG_REG6_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_CFG_REG8_DEFAULT 0x00000000
-#define mmRLC_RLCV_TIMER_INT_0_DEFAULT 0x00000000
-#define mmRLC_RLCV_TIMER_CTRL_DEFAULT 0x00000000
-#define mmRLC_RLCV_TIMER_STAT_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_DEFAULT 0x0000ffff
-#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VF_MASK_DEFAULT 0x00010001
-#define mmRLC_HYP_SEMAPHORE_2_DEFAULT 0x00000000
-#define mmRLC_HYP_SEMAPHORE_3_DEFAULT 0x00000000
-#define mmRLC_CLK_CNTL_DEFAULT 0x00000003
-#define mmRLC_GPU_IOV_SCH_BLOCK_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_CFG_REG1_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_CFG_REG2_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VM_BUSY_STATUS_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCH_0_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCH_3_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCH_1_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCH_2_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_UCODE_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_UCODE_DATA_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCRATCH_ADDR_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SCRATCH_DATA_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_F32_CNTL_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_F32_RESET_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SDMA0_STATUS_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SDMA1_STATUS_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SMU_RESPONSE_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_RLC_RESPONSE_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_INT_DISABLE_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_INT_FORCE_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SDMA0_BUSY_STATUS_DEFAULT 0x00000000
-#define mmRLC_GPU_IOV_SDMA1_BUSY_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: gccacind
-#define ixGC_CAC_CNTL_DEFAULT 0x000001fe
-#define ixGC_CAC_OVR_SEL_DEFAULT 0x00000000
-#define ixGC_CAC_OVR_VAL_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_BCI_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CB_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CB_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CP_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CP_1_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_DB_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_DB_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_GDS_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_GDS_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_IA_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_LDS_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_LDS_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_PA_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_PC_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_SC_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_SPI_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SPI_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SPI_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SQ_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SQ_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SQ_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SQ_3_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_SQ_4_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_SX_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_SXRB_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TA_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_TCC_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TCC_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TCC_2_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_TCP_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TCP_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TCP_2_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_TD_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TD_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_TD_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_VGT_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_VGT_1_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_WD_0_DEFAULT 0x00000001
-#define ixGC_CAC_WEIGHT_CU_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CU_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CU_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CU_3_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CU_4_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_CU_5_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_BCI0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CB0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CB1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CB2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CB3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CP0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CP1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CP2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_DB0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_DB1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_DB2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_DB3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_GDS0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_GDS1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_GDS2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_GDS3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_IA0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_LDS0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_LDS1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_LDS2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_LDS3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_PA0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_PA1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_PC0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SC0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SPI5_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_PG_0_DEFAULT 0x00000001
-#define ixGC_CAC_ACC_PG0_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_PG_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_0_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_EA0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_EA1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_EA2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_EA3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ATCL20_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_EA_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_UTCL2_ATCL2_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_EA_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_EA_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_RMI_0_DEFAULT 0x00000001
-#define ixGC_CAC_ACC_RMI0_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_RMI_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_1_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_UTCL2_ATCL21_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ATCL22_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ATCL23_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_EA4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_EA5_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_EA_2_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_SQ0_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ0_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ1_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ1_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ2_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ2_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ3_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ3_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ4_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ4_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ5_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ5_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ6_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ6_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ7_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ7_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ8_LOWER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SQ8_UPPER_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SX0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SXRB0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_SXRB1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TA0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCC0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCC1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCC2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCC3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCC4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCP0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCP1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCP2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCP3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TCP4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_TD5_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_VGT0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_VGT1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_VGT2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_WD0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU5_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU6_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU7_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU8_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU9_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_CU10_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_BCI_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_CB_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_CP_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_DB_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_GDS_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_IA_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_LDS_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_PA_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_PC_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_SC_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_SPI_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_CU_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_SQ_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_SX_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_SXRB_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_TA_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_TCC_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_TCP_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_TD_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_VGT_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_WD_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_BCI1_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_UTCL2_ATCL2_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_2_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_3_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_ROUTER_4_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_VML2_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_VML2_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_VML2_2_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_UTCL2_ATCL24_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER4_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER5_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER6_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER7_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER8_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_ROUTER9_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_VML20_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_VML21_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_VML22_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_VML23_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_VML24_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_UTCL2_ROUTER_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_UTCL2_VML2_DEFAULT 0x00000000
-#define ixGC_CAC_WEIGHT_UTCL2_WALKER_0_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_WALKER_1_DEFAULT 0x00010001
-#define ixGC_CAC_WEIGHT_UTCL2_WALKER_2_DEFAULT 0x00010001
-#define ixGC_CAC_ACC_UTCL2_WALKER0_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_WALKER1_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_WALKER2_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_WALKER3_DEFAULT 0x00000000
-#define ixGC_CAC_ACC_UTCL2_WALKER4_DEFAULT 0x00000000
-#define ixGC_CAC_OVRD_UTCL2_WALKER_DEFAULT 0x00000000
-
-
-// addressBlock: secacind
-#define ixSE_CAC_CNTL_DEFAULT 0x000001fe
-#define ixSE_CAC_OVR_SEL_DEFAULT 0x00000000
-#define ixSE_CAC_OVR_VAL_DEFAULT 0x00000000
-
-
-// addressBlock: sqind
-#define ixSQ_WAVE_MODE_DEFAULT 0x00000000
-#define ixSQ_WAVE_STATUS_DEFAULT 0x00000000
-#define ixSQ_WAVE_TRAPSTS_DEFAULT 0x00000000
-#define ixSQ_WAVE_HW_ID_DEFAULT 0x00000000
-#define ixSQ_WAVE_GPR_ALLOC_DEFAULT 0x00000000
-#define ixSQ_WAVE_LDS_ALLOC_DEFAULT 0x00000000
-#define ixSQ_WAVE_IB_STS_DEFAULT 0x00000000
-#define ixSQ_WAVE_PC_LO_DEFAULT 0x00000000
-#define ixSQ_WAVE_PC_HI_DEFAULT 0x00000000
-#define ixSQ_WAVE_INST_DW0_DEFAULT 0x00000000
-#define ixSQ_WAVE_INST_DW1_DEFAULT 0x00000000
-#define ixSQ_WAVE_IB_DBG0_DEFAULT 0x00000000
-#define ixSQ_WAVE_IB_DBG1_DEFAULT 0x00000000
-#define ixSQ_WAVE_FLUSH_IB_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP0_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP1_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP2_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP3_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP4_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP5_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP6_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP7_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP8_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP9_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP10_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP11_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP12_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP13_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP14_DEFAULT 0x00000000
-#define ixSQ_WAVE_TTMP15_DEFAULT 0x00000000
-#define ixSQ_WAVE_M0_DEFAULT 0x00000000
-#define ixSQ_WAVE_EXEC_LO_DEFAULT 0x00000000
-#define ixSQ_WAVE_EXEC_HI_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_AUTO_CTXID_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_AUTO_HI_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_AUTO_LO_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_CMN_CTXID_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_CMN_HI_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_WAVE_CTXID_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_WAVE_HI_DEFAULT 0x00000000
-#define ixSQ_INTERRUPT_WORD_WAVE_LO_DEFAULT 0x00000000
-
-
-
-
-
-
-
-
-// addressBlock: didtind
-#define ixDIDT_SQ_CTRL0_DEFAULT 0x0000ff00
-#define ixDIDT_SQ_CTRL1_DEFAULT 0x00ff00ff
-#define ixDIDT_SQ_CTRL2_DEFAULT 0x18800004
-#define ixDIDT_SQ_STALL_CTRL_DEFAULT 0x00fff000
-#define ixDIDT_SQ_TUNING_CTRL_DEFAULT 0x00010004
-#define ixDIDT_SQ_STALL_AUTO_RELEASE_CTRL_DEFAULT 0x00ffffff
-#define ixDIDT_SQ_CTRL3_DEFAULT 0x00038000
-#define ixDIDT_SQ_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_SQ_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_SQ_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_SQ_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_SQ_WEIGHT0_3_DEFAULT 0x00000000
-#define ixDIDT_SQ_WEIGHT4_7_DEFAULT 0x00000000
-#define ixDIDT_SQ_WEIGHT8_11_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_CTRL_DEFAULT 0x00001c00
-#define ixDIDT_SQ_EDC_THRESHOLD_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_SQ_EDC_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_SQ_EDC_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_SQ_EDC_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_SQ_EDC_STATUS_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_STALL_DELAY_1_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_STALL_DELAY_2_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_STALL_DELAY_3_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_OVERFLOW_DEFAULT 0x00000000
-#define ixDIDT_SQ_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define ixDIDT_DB_CTRL0_DEFAULT 0x0000ff00
-#define ixDIDT_DB_CTRL1_DEFAULT 0x00ff00ff
-#define ixDIDT_DB_CTRL2_DEFAULT 0x18800004
-#define ixDIDT_DB_STALL_CTRL_DEFAULT 0x00fff000
-#define ixDIDT_DB_TUNING_CTRL_DEFAULT 0x00010004
-#define ixDIDT_DB_STALL_AUTO_RELEASE_CTRL_DEFAULT 0x00ffffff
-#define ixDIDT_DB_CTRL3_DEFAULT 0x00038000
-#define ixDIDT_DB_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_DB_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_DB_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_DB_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_DB_WEIGHT0_3_DEFAULT 0x00000000
-#define ixDIDT_DB_WEIGHT4_7_DEFAULT 0x00000000
-#define ixDIDT_DB_WEIGHT8_11_DEFAULT 0x00000000
-#define ixDIDT_DB_EDC_CTRL_DEFAULT 0x00001c00
-#define ixDIDT_DB_EDC_THRESHOLD_DEFAULT 0x00000000
-#define ixDIDT_DB_EDC_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_DB_EDC_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_DB_EDC_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_DB_EDC_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_DB_EDC_STATUS_DEFAULT 0x00000000
-#define ixDIDT_DB_EDC_STALL_DELAY_1_DEFAULT 0x00000000
-#define ixDIDT_DB_EDC_OVERFLOW_DEFAULT 0x00000000
-#define ixDIDT_DB_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define ixDIDT_TD_CTRL0_DEFAULT 0x0000ff00
-#define ixDIDT_TD_CTRL1_DEFAULT 0x00ff00ff
-#define ixDIDT_TD_CTRL2_DEFAULT 0x18800004
-#define ixDIDT_TD_STALL_CTRL_DEFAULT 0x00fff000
-#define ixDIDT_TD_TUNING_CTRL_DEFAULT 0x00010004
-#define ixDIDT_TD_STALL_AUTO_RELEASE_CTRL_DEFAULT 0x00ffffff
-#define ixDIDT_TD_CTRL3_DEFAULT 0x00038000
-#define ixDIDT_TD_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_TD_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_TD_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_TD_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_TD_WEIGHT0_3_DEFAULT 0x00000000
-#define ixDIDT_TD_WEIGHT4_7_DEFAULT 0x00000000
-#define ixDIDT_TD_WEIGHT8_11_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_CTRL_DEFAULT 0x00001c00
-#define ixDIDT_TD_EDC_THRESHOLD_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_TD_EDC_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_TD_EDC_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_TD_EDC_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_TD_EDC_STATUS_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_STALL_DELAY_1_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_STALL_DELAY_2_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_STALL_DELAY_3_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_OVERFLOW_DEFAULT 0x00000000
-#define ixDIDT_TD_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define ixDIDT_TCP_CTRL0_DEFAULT 0x0000ff00
-#define ixDIDT_TCP_CTRL1_DEFAULT 0x00ff00ff
-#define ixDIDT_TCP_CTRL2_DEFAULT 0x18800004
-#define ixDIDT_TCP_STALL_CTRL_DEFAULT 0x00fff000
-#define ixDIDT_TCP_TUNING_CTRL_DEFAULT 0x00010004
-#define ixDIDT_TCP_STALL_AUTO_RELEASE_CTRL_DEFAULT 0x00ffffff
-#define ixDIDT_TCP_CTRL3_DEFAULT 0x00038000
-#define ixDIDT_TCP_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_TCP_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_TCP_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_TCP_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_TCP_WEIGHT0_3_DEFAULT 0x00000000
-#define ixDIDT_TCP_WEIGHT4_7_DEFAULT 0x00000000
-#define ixDIDT_TCP_WEIGHT8_11_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_CTRL_DEFAULT 0x00001c00
-#define ixDIDT_TCP_EDC_THRESHOLD_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_TCP_EDC_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_TCP_EDC_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_TCP_EDC_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_TCP_EDC_STATUS_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_STALL_DELAY_1_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_STALL_DELAY_2_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_STALL_DELAY_3_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_OVERFLOW_DEFAULT 0x00000000
-#define ixDIDT_TCP_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define ixDIDT_DBR_CTRL0_DEFAULT 0x0000ff00
-#define ixDIDT_DBR_CTRL1_DEFAULT 0x00ff00ff
-#define ixDIDT_DBR_CTRL2_DEFAULT 0x18800004
-#define ixDIDT_DBR_STALL_CTRL_DEFAULT 0x00fff000
-#define ixDIDT_DBR_TUNING_CTRL_DEFAULT 0x00010004
-#define ixDIDT_DBR_STALL_AUTO_RELEASE_CTRL_DEFAULT 0x00ffffff
-#define ixDIDT_DBR_CTRL3_DEFAULT 0x00038000
-#define ixDIDT_DBR_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_DBR_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_DBR_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_DBR_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_DBR_WEIGHT0_3_DEFAULT 0x00000000
-#define ixDIDT_DBR_WEIGHT4_7_DEFAULT 0x00000000
-#define ixDIDT_DBR_WEIGHT8_11_DEFAULT 0x00000000
-#define ixDIDT_DBR_EDC_CTRL_DEFAULT 0x00001c00
-#define ixDIDT_DBR_EDC_THRESHOLD_DEFAULT 0x00000000
-#define ixDIDT_DBR_EDC_STALL_PATTERN_1_2_DEFAULT 0x01010001
-#define ixDIDT_DBR_EDC_STALL_PATTERN_3_4_DEFAULT 0x11110421
-#define ixDIDT_DBR_EDC_STALL_PATTERN_5_6_DEFAULT 0x25291249
-#define ixDIDT_DBR_EDC_STALL_PATTERN_7_DEFAULT 0x00002aaa
-#define ixDIDT_DBR_EDC_STATUS_DEFAULT 0x00000000
-#define ixDIDT_DBR_EDC_STALL_DELAY_1_DEFAULT 0x00000000
-#define ixDIDT_DBR_EDC_OVERFLOW_DEFAULT 0x00000000
-#define ixDIDT_DBR_EDC_ROLLING_POWER_DELTA_DEFAULT 0x00000000
-#define ixDIDT_SQ_STALL_EVENT_COUNTER_DEFAULT 0x00000000
-#define ixDIDT_DB_STALL_EVENT_COUNTER_DEFAULT 0x00000000
-#define ixDIDT_TD_STALL_EVENT_COUNTER_DEFAULT 0x00000000
-#define ixDIDT_TCP_STALL_EVENT_COUNTER_DEFAULT 0x00000000
-#define ixDIDT_DBR_STALL_EVENT_COUNTER_DEFAULT 0x00000000
-
-
-
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_default.h
deleted file mode 100644
index 392ef77..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/MMHUB/mmhub_9_1_default.h
+++ /dev/null
@@ -1,1028 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mmhub_9_1_DEFAULT_HEADER
-#define _mmhub_9_1_DEFAULT_HEADER
-
-
-// addressBlock: mmhub_dagbdec
-#define mmDAGB0_RDCLI0_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI1_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI2_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI3_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI4_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI5_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI6_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI7_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI8_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI9_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI10_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI11_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI12_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI13_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI14_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI15_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI16_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI17_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI18_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI19_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI20_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI21_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI22_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI23_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI24_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI25_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI26_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI27_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI28_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI29_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI30_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RDCLI31_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_RD_CNTL_DEFAULT 0x03527df8
-#define mmDAGB0_RD_GMI_CNTL_DEFAULT 0x0000304f
-#define mmDAGB0_RD_ADDR_DAGB_DEFAULT 0x00000039
-#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
-#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
-#define mmDAGB0_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
-#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
-#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
-#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
-#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST2_DEFAULT 0x88888888
-#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER2_DEFAULT 0x11111111
-#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST3_DEFAULT 0x88888888
-#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER3_DEFAULT 0x11111111
-#define mmDAGB0_RD_VC0_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC1_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC2_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC3_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC4_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC5_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC6_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_VC7_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_RD_CNTL_MISC_DEFAULT 0x01a10408
-#define mmDAGB0_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7
-#define mmDAGB0_RDCLI_ASK_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_RDCLI_GO_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_RDCLI_TLB_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_RDCLI_OARB_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_RDCLI_OSD_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI0_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI1_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI2_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI3_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI4_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI5_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI6_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI7_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI8_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI9_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI10_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI11_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI12_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI13_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI14_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI15_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI16_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI17_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI18_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI19_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI20_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI21_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI22_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI23_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI24_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI25_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI26_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI27_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI28_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI29_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI30_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WRCLI31_DEFAULT 0xfe5fe0f9
-#define mmDAGB0_WR_CNTL_DEFAULT 0x03527df8
-#define mmDAGB0_WR_GMI_CNTL_DEFAULT 0x0000304f
-#define mmDAGB0_WR_ADDR_DAGB_DEFAULT 0x00000039
-#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888
-#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111
-#define mmDAGB0_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888
-#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111
-#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888
-#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111
-#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST2_DEFAULT 0x88888888
-#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER2_DEFAULT 0x11111111
-#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST3_DEFAULT 0x88888888
-#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER3_DEFAULT 0x11111111
-#define mmDAGB0_WR_DATA_DAGB_DEFAULT 0x00000001
-#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111
-#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000
-#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111
-#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000
-#define mmDAGB0_WR_DATA_DAGB_MAX_BURST2_DEFAULT 0x11111111
-#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER2_DEFAULT 0x00000000
-#define mmDAGB0_WR_DATA_DAGB_MAX_BURST3_DEFAULT 0x11111111
-#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER3_DEFAULT 0x00000000
-#define mmDAGB0_WR_VC0_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC1_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC2_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC3_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC4_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC5_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC6_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_VC7_CNTL_DEFAULT 0xff2ff082
-#define mmDAGB0_WR_CNTL_MISC_DEFAULT 0x01a10408
-#define mmDAGB0_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7
-#define mmDAGB0_WR_DATA_CREDIT_DEFAULT 0x5c626870
-#define mmDAGB0_WR_MISC_CREDIT_DEFAULT 0x0078dc88
-#define mmDAGB0_WRCLI_ASK_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_GO_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_TLB_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_OARB_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_OSD_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000
-#define mmDAGB0_DAGB_DLY_DEFAULT 0x00000000
-#define mmDAGB0_CNTL_MISC_DEFAULT 0xcf7c1ffa
-#define mmDAGB0_CNTL_MISC2_DEFAULT 0x00000000
-#define mmDAGB0_FIFO_EMPTY_DEFAULT 0x00ffffff
-#define mmDAGB0_FIFO_FULL_DEFAULT 0x00000000
-#define mmDAGB0_WR_CREDITS_FULL_DEFAULT 0x0007ffff
-#define mmDAGB0_RD_CREDITS_FULL_DEFAULT 0x0003ffff
-#define mmDAGB0_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmDAGB0_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmDAGB0_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmDAGB0_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmDAGB0_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-#define mmDAGB0_RESERVE0_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE1_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE2_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE3_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE4_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE5_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE6_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE7_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE8_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE9_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE10_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE11_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE12_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE13_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE14_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE15_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE16_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE17_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE18_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE19_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE20_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE21_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE22_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE23_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE24_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE25_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE26_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE27_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE28_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE29_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE30_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE31_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE32_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE33_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE34_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE35_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE36_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE37_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE38_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE39_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE40_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE41_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE42_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE43_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE44_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE45_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE46_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE47_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE48_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE49_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE50_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE51_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE52_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE53_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE54_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE55_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE56_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE57_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE58_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE59_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE60_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE61_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE62_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE63_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE64_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE65_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE66_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE67_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE68_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE69_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE70_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE71_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE72_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE73_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE74_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE75_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE76_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE77_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE78_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE79_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE80_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE81_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE82_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE83_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE84_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE85_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE86_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE87_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE88_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE89_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE90_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE91_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE92_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE93_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE94_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE95_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE96_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE97_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE98_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE99_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE100_DEFAULT 0x00000000
-#define mmDAGB0_RESERVE101_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_ea_mmeadec
-#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
-#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
-#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
-#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
-#define mmMMEA0_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
-#define mmMMEA0_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
-#define mmMMEA0_DRAM_RD_LAZY_DEFAULT 0x00000924
-#define mmMMEA0_DRAM_WR_LAZY_DEFAULT 0x00000924
-#define mmMMEA0_DRAM_RD_CAM_CNTL_DEFAULT 0x06db3333
-#define mmMMEA0_DRAM_WR_CAM_CNTL_DEFAULT 0x06db3333
-#define mmMMEA0_DRAM_PAGE_BURST_DEFAULT 0x20002000
-#define mmMMEA0_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA0_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA0_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA0_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA0_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA0_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA0_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmMMEA0_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA0_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRNORM_HOLE_CNTL_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC_BANK_CFG_DEFAULT 0x000001ef
-#define mmMMEA0_ADDRDEC_MISC_CFG_DEFAULT 0x3ffff000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmMMEA0_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
-#define mmMMEA0_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
-#define mmMMEA0_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
-#define mmMMEA0_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
-#define mmMMEA0_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmMMEA0_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmMMEA0_IO_GROUP_BURST_DEFAULT 0x1f031f03
-#define mmMMEA0_IO_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA0_IO_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA0_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA0_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA0_IO_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA0_IO_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA0_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
-#define mmMMEA0_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
-#define mmMMEA0_IO_RD_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmMMEA0_IO_WR_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA0_SDP_ARB_DRAM_DEFAULT 0x00102040
-#define mmMMEA0_SDP_ARB_FINAL_DEFAULT 0x00007fff
-#define mmMMEA0_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
-#define mmMMEA0_SDP_IO_PRIORITY_DEFAULT 0x00000000
-#define mmMMEA0_SDP_CREDITS_DEFAULT 0x000100bf
-#define mmMMEA0_SDP_TAG_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA0_SDP_TAG_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA0_SDP_VCC_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA0_SDP_VCC_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA0_SDP_VCD_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA0_SDP_VCD_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA0_SDP_REQ_CNTL_DEFAULT 0x0000000f
-#define mmMMEA0_MISC_DEFAULT 0x00180130
-#define mmMMEA0_LATENCY_SAMPLING_DEFAULT 0x00000000
-#define mmMMEA0_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmMMEA0_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmMMEA0_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmMMEA0_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-#define mmMMEA0_EDC_CNT_DEFAULT 0x00000000
-#define mmMMEA0_EDC_CNT2_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTL_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTLA_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTLB_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTL2_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTL2A_DEFAULT 0x00000000
-#define mmMMEA0_DSM_CNTL2B_DEFAULT 0x00000000
-#define mmMMEA0_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmMMEA0_EDC_MODE_DEFAULT 0x00000000
-#define mmMMEA0_ERR_STATUS_DEFAULT 0x00000000
-#define mmMMEA0_MISC2_DEFAULT 0x00000000
-#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555
-#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555
-#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555
-#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555
-#define mmMMEA1_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25
-#define mmMMEA1_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25
-#define mmMMEA1_DRAM_RD_LAZY_DEFAULT 0x00000924
-#define mmMMEA1_DRAM_WR_LAZY_DEFAULT 0x00000924
-#define mmMMEA1_DRAM_RD_CAM_CNTL_DEFAULT 0x06db3333
-#define mmMMEA1_DRAM_WR_CAM_CNTL_DEFAULT 0x06db3333
-#define mmMMEA1_DRAM_PAGE_BURST_DEFAULT 0x20002000
-#define mmMMEA1_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA1_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA1_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA1_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA1_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA1_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA1_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmMMEA1_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6
-#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA1_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRNORM_HOLE_CNTL_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC_BANK_CFG_DEFAULT 0x000001ef
-#define mmMMEA1_ADDRDEC_MISC_CFG_DEFAULT 0x3ffff000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe
-#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408
-#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408
-#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543
-#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543
-#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321
-#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321
-#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543
-#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543
-#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000
-#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000
-#define mmMMEA1_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
-#define mmMMEA1_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
-#define mmMMEA1_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4
-#define mmMMEA1_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4
-#define mmMMEA1_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmMMEA1_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777
-#define mmMMEA1_IO_GROUP_BURST_DEFAULT 0x1f031f03
-#define mmMMEA1_IO_RD_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA1_IO_WR_PRI_AGE_DEFAULT 0x00db6249
-#define mmMMEA1_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA1_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6
-#define mmMMEA1_IO_RD_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA1_IO_WR_PRI_FIXED_DEFAULT 0x00000924
-#define mmMMEA1_IO_RD_PRI_URGENCY_DEFAULT 0x00000492
-#define mmMMEA1_IO_WR_PRI_URGENCY_DEFAULT 0x00000492
-#define mmMMEA1_IO_RD_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmMMEA1_IO_WR_PRI_URGENCY_MASK_DEFAULT 0xffffffff
-#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f
-#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f
-#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff
-#define mmMMEA1_SDP_ARB_DRAM_DEFAULT 0x00102040
-#define mmMMEA1_SDP_ARB_FINAL_DEFAULT 0x00007fff
-#define mmMMEA1_SDP_DRAM_PRIORITY_DEFAULT 0x00000000
-#define mmMMEA1_SDP_IO_PRIORITY_DEFAULT 0x00000000
-#define mmMMEA1_SDP_CREDITS_DEFAULT 0x000100bf
-#define mmMMEA1_SDP_TAG_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA1_SDP_TAG_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA1_SDP_VCC_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA1_SDP_VCC_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA1_SDP_VCD_RESERVE0_DEFAULT 0x00000000
-#define mmMMEA1_SDP_VCD_RESERVE1_DEFAULT 0x00000000
-#define mmMMEA1_SDP_REQ_CNTL_DEFAULT 0x0000000f
-#define mmMMEA1_MISC_DEFAULT 0x00180130
-#define mmMMEA1_LATENCY_SAMPLING_DEFAULT 0x00000000
-#define mmMMEA1_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmMMEA1_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmMMEA1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmMMEA1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-#define mmMMEA1_EDC_CNT_DEFAULT 0x00000000
-#define mmMMEA1_EDC_CNT2_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTL_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTLA_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTLB_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTL2_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTL2A_DEFAULT 0x00000000
-#define mmMMEA1_DSM_CNTL2B_DEFAULT 0x00000000
-#define mmMMEA1_CGTT_CLK_CTRL_DEFAULT 0x00000100
-#define mmMMEA1_EDC_MODE_DEFAULT 0x00000000
-#define mmMMEA1_ERR_STATUS_DEFAULT 0x00000000
-#define mmMMEA1_MISC2_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_pctldec
-#define mmPCTL_MISC_DEFAULT 0x00000889
-#define mmPCTL_MMHUB_DEEPSLEEP_DEFAULT 0x00000000
-#define mmPCTL_MMHUB_DEEPSLEEP_OVERRIDE_DEFAULT 0x00000000
-#define mmPCTL_PG_IGNORE_DEEPSLEEP_DEFAULT 0x00000000
-#define mmPCTL_PG_DAGB_DEFAULT 0x00000000
-#define mmPCTL0_RENG_RAM_INDEX_DEFAULT 0x00000000
-#define mmPCTL0_RENG_RAM_DATA_DEFAULT 0x00000000
-#define mmPCTL0_RENG_EXECUTE_DEFAULT 0x00000000
-#define mmPCTL0_MISC_DEFAULT 0x00001000
-#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000
-#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000
-#define mmPCTL0_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
-#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET_DEFAULT 0xffffffff
-#define mmPCTL0_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
-#define mmPCTL1_RENG_RAM_INDEX_DEFAULT 0x00000000
-#define mmPCTL1_RENG_RAM_DATA_DEFAULT 0x00000000
-#define mmPCTL1_RENG_EXECUTE_DEFAULT 0x00000000
-#define mmPCTL1_MISC_DEFAULT 0x00000800
-#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x061f05a0
-#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x08590800
-#define mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
-#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET_DEFAULT 0xffffffff
-#define mmPCTL1_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
-#define mmPCTL2_RENG_RAM_INDEX_DEFAULT 0x00000000
-#define mmPCTL2_RENG_RAM_DATA_DEFAULT 0x00000000
-#define mmPCTL2_RENG_EXECUTE_DEFAULT 0x00000000
-#define mmPCTL2_MISC_DEFAULT 0x00000800
-#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x069f0620
-#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x08b3085a
-#define mmPCTL2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000
-#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET_DEFAULT 0xffffffff
-#define mmPCTL2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff
-
-
-// addressBlock: mmhub_l1tlb_vml1dec
-#define mmMC_VM_MX_L1_TLB0_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB1_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB2_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB3_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB4_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB5_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB6_STATUS_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB7_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_l1tlb_vml1pldec
-#define mmMC_VM_MX_L1_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_PERFCOUNTER3_CFG_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-
-// addressBlock: mmhub_l1tlb_vml1prdec
-#define mmMC_VM_MX_L1_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_PERFCOUNTER_HI_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_l1tlb_vmtlspfdec
-#define mmVM_L2_SAW_CNTL_DEFAULT 0x0c0b8602
-#define mmVM_L2_SAW_CNTL2_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CNTL3_DEFAULT 0x80100004
-#define mmVM_L2_SAW_CNTL4_DEFAULT 0x00000001
-#define mmVM_L2_SAW_CONTEXT0_CNTL_DEFAULT 0x00fffed8
-#define mmVM_L2_SAW_CONTEXT0_CNTL2_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_SAW_CONTEXTS_DISABLE_DEFAULT 0x00000000
-#define mmVM_L2_SAW_PIPES_BUSY_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_utcl2_atcl2dec
-#define mmATC_L2_CNTL_DEFAULT 0x000001c9
-#define mmATC_L2_CNTL2_DEFAULT 0x00000100
-#define mmATC_L2_CACHE_DATA0_DEFAULT 0x00000000
-#define mmATC_L2_CACHE_DATA1_DEFAULT 0x00000000
-#define mmATC_L2_CACHE_DATA2_DEFAULT 0x00000000
-#define mmATC_L2_CNTL3_DEFAULT 0x000001f8
-#define mmATC_L2_STATUS_DEFAULT 0x00000000
-#define mmATC_L2_STATUS2_DEFAULT 0x00000000
-#define mmATC_L2_MISC_CG_DEFAULT 0x00000200
-#define mmATC_L2_MEM_POWER_LS_DEFAULT 0x00000208
-#define mmATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: mmhub_utcl2_vml2pfdec
-#define mmVM_L2_CNTL_DEFAULT 0x00080602
-#define mmVM_L2_CNTL2_DEFAULT 0x00000000
-#define mmVM_L2_CNTL3_DEFAULT 0x80100007
-#define mmVM_L2_STATUS_DEFAULT 0x00000000
-#define mmVM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090
-#define mmVM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc
-#define mmVM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000
-#define mmVM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff
-#define mmVM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff
-#define mmVM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000
-#define mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000
-#define mmVM_L2_CNTL4_DEFAULT 0x000000c1
-#define mmVM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000
-#define mmVM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000
-#define mmVM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000
-#define mmVM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000
-#define mmVM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: mmhub_utcl2_vml2vcdec
-#define mmVM_CONTEXT0_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT1_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT2_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT3_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT4_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT5_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT6_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT7_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT8_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT9_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT10_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT11_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT12_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT13_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT14_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXT15_CNTL_DEFAULT 0x007ffe80
-#define mmVM_CONTEXTS_DISABLE_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000
-#define mmVM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000
-#define mmVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000
-#define mmVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_utcl2_vml2pldec
-#define mmMC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-
-// addressBlock: mmhub_utcl2_vml2prdec
-#define mmMC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmMC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_utcl2_vmsharedhvdec
-#define mmMC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000
-#define mmMC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000
-#define mmVM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100
-#define mmMC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000
-#define mmMC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000
-#define mmVM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000
-#define mmVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000
-#define mmVM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000
-#define mmUTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080
-
-
-// addressBlock: mmhub_utcl2_vmsharedpfdec
-#define mmMC_VM_NB_MMIOBASE_DEFAULT 0x00000000
-#define mmMC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000
-#define mmMC_VM_NB_PCI_CTRL_DEFAULT 0x00000000
-#define mmMC_VM_NB_PCI_ARB_DEFAULT 0x00000008
-#define mmMC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000
-#define mmMC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000
-#define mmMC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000
-#define mmMC_VM_FB_OFFSET_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000
-#define mmMC_VM_STEERING_DEFAULT 0x00000001
-#define mmMC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmMC_MEM_POWER_LS_DEFAULT 0x00000208
-#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000
-#define mmMC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000
-#define mmMC_VM_APT_CNTL_DEFAULT 0x00000000
-#define mmMC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000
-#define mmMC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff
-#define mmMC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_utcl2_vmsharedvcdec
-#define mmMC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000
-#define mmMC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000
-#define mmMC_VM_AGP_TOP_DEFAULT 0x00000000
-#define mmMC_VM_AGP_BOT_DEFAULT 0x00000000
-#define mmMC_VM_AGP_BASE_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000
-#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000
-#define mmMC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501
-
-
-// addressBlock: mmhub_utcl2_atcl2pfcntrdec
-#define mmATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000
-
-
-// addressBlock: mmhub_utcl2_atcl2pfcntldec
-#define mmATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_default.h
deleted file mode 100644
index 5793a10..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_default.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _vcn_1_0_DEFAULT_HEADER
-#define _vcn_1_0_DEFAULT_HEADER
-
-
-// addressBlock: uvd_uvd_pg_dec
-#define mmUVD_PGFSM_CONFIG_DEFAULT 0x00000000
-#define mmUVD_PGFSM_STATUS_DEFAULT 0x002aaaaa
-#define mmUVD_POWER_STATUS_DEFAULT 0x00000801
-#define mmCC_UVD_HARVESTING_DEFAULT 0x00000000
-#define mmUVD_SCRATCH1_DEFAULT 0x00000000
-#define mmUVD_SCRATCH2_DEFAULT 0x00000000
-#define mmUVD_SCRATCH3_DEFAULT 0x00000000
-#define mmUVD_SCRATCH4_DEFAULT 0x00000000
-#define mmUVD_SCRATCH5_DEFAULT 0x00000000
-#define mmUVD_SCRATCH6_DEFAULT 0x00000000
-#define mmUVD_SCRATCH7_DEFAULT 0x00000000
-#define mmUVD_SCRATCH8_DEFAULT 0x00000000
-#define mmUVD_SCRATCH9_DEFAULT 0x00000000
-#define mmUVD_SCRATCH10_DEFAULT 0x00000000
-#define mmUVD_SCRATCH11_DEFAULT 0x00000000
-#define mmUVD_SCRATCH12_DEFAULT 0x00000000
-#define mmUVD_SCRATCH13_DEFAULT 0x00000000
-#define mmUVD_SCRATCH14_DEFAULT 0x00000000
-#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_DPG_VCPU_CACHE_OFFSET0_DEFAULT 0x00000000
-
-
-// addressBlock: uvd_uvdgendec
-#define mmUVD_LCM_CGC_CNTRL_DEFAULT 0xa0f00000
-
-
-// addressBlock: uvd_uvdnpdec
-#define mmUVD_JPEG_CNTL_DEFAULT 0x00000004
-#define mmUVD_JPEG_RB_BASE_DEFAULT 0x00000000
-#define mmUVD_JPEG_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_JPEG_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_JPEG_RB_SIZE_DEFAULT 0x00000000
-#define mmUVD_JPEG_UV_TILING_CTRL_DEFAULT 0x02104800
-#define mmUVD_JPEG_TILING_CTRL_DEFAULT 0x02104800
-#define mmUVD_JPEG_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_JPEG_GPCOM_CMD_DEFAULT 0x00000000
-#define mmUVD_JPEG_GPCOM_DATA0_DEFAULT 0x00000000
-#define mmUVD_JPEG_GPCOM_DATA1_DEFAULT 0x00000000
-#define mmUVD_JPEG_JRB_BASE_LO_DEFAULT 0x00000000
-#define mmUVD_JPEG_JRB_BASE_HI_DEFAULT 0x00000000
-#define mmUVD_JPEG_JRB_SIZE_DEFAULT 0x00000000
-#define mmUVD_JPEG_JRB_RPTR_DEFAULT 0x00000000
-#define mmUVD_JPEG_JRB_WPTR_DEFAULT 0x00000000
-#define mmUVD_JPEG_UV_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_SEMA_ADDR_LOW_DEFAULT 0x00000000
-#define mmUVD_SEMA_ADDR_HIGH_DEFAULT 0x00000000
-#define mmUVD_SEMA_CMD_DEFAULT 0x00000080
-#define mmUVD_GPCOM_VCPU_CMD_DEFAULT 0x00000000
-#define mmUVD_GPCOM_VCPU_DATA0_DEFAULT 0x00000000
-#define mmUVD_GPCOM_VCPU_DATA1_DEFAULT 0x00000000
-#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_UDEC_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_UDEC_DB_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_UDEC_DBW_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_SUVD_CGC_GATE_DEFAULT 0x00000000
-#define mmUVD_SUVD_CGC_STATUS_DEFAULT 0x00000000
-#define mmUVD_SUVD_CGC_CTRL_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_NO_OP_DEFAULT 0x00000000
-#define mmUVD_JPEG_CNTL2_DEFAULT 0x00000000
-#define mmUVD_VERSION_DEFAULT 0x00010000
-#define mmUVD_GP_SCRATCH8_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH9_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH10_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH11_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH12_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH13_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH14_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH15_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH16_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH17_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH18_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH19_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH20_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH21_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH22_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH23_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO2_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI2_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE2_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR2_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR2_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR4_DEFAULT 0x00000000
-#define mmUVD_JRBC_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: uvd_uvddec
-#define mmUVD_SEMA_CNTL_DEFAULT 0x00000003
-#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_JRBC_IB_VMID_DEFAULT 0x00000000
-#define mmUVD_JRBC_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_JRBC_RB_CNTL_DEFAULT 0x00000100
-#define mmUVD_JRBC_IB_SIZE_DEFAULT 0x00000000
-#define mmUVD_JRBC_LMI_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUVD_JRBC_SOFT_RESET_DEFAULT 0x00000000
-#define mmUVD_JRBC_STATUS_DEFAULT 0x00000003
-#define mmUVD_RB_RPTR3_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR3_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO3_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI3_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE3_DEFAULT 0x00000000
-#define mmJPEG_CGC_GATE_DEFAULT 0x00300000
-#define mmUVD_CTX_INDEX_DEFAULT 0x00000000
-#define mmUVD_CTX_DATA_DEFAULT 0x00000000
-#define mmUVD_CGC_GATE_DEFAULT 0x000fffff
-#define mmUVD_CGC_STATUS_DEFAULT 0x00000000
-#define mmUVD_CGC_CTRL_DEFAULT 0x1fff018d
-#define mmUVD_GP_SCRATCH0_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH1_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH2_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH3_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH4_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH5_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH6_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH7_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE_VMID_DEFAULT 0x00000000
-#define mmUVD_LMI_CTRL2_DEFAULT 0x003e0000
-#define mmUVD_MASTINT_EN_DEFAULT 0x00000000
-#define mmJPEG_CGC_CTRL_DEFAULT 0x0000018d
-#define mmUVD_LMI_CTRL_DEFAULT 0x00104340
-#define mmUVD_LMI_STATUS_DEFAULT 0x003fff7f
-#define mmUVD_LMI_VM_CTRL_DEFAULT 0x00000000
-#define mmUVD_LMI_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUXA0_DEFAULT 0x00002040
-#define mmUVD_MPC_SET_MUXA1_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUXB0_DEFAULT 0x00002040
-#define mmUVD_MPC_SET_MUXB1_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUX_DEFAULT 0x00000088
-#define mmUVD_MPC_SET_ALU_DEFAULT 0x00000000
-#define mmUVD_GPCOM_SYS_CMD_DEFAULT 0x00000000
-#define mmUVD_GPCOM_SYS_DATA0_DEFAULT 0x00000000
-#define mmUVD_GPCOM_SYS_DATA1_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET0_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE0_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET1_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE1_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET2_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE2_DEFAULT 0x00000000
-#define mmUVD_VCPU_CNTL_DEFAULT 0x0ff20000
-#define mmUVD_SOFT_RESET_DEFAULT 0x00000008
-#define mmUVD_LMI_RBC_IB_VMID_DEFAULT 0x00000000
-#define mmUVD_RBC_IB_SIZE_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_WPTR_CNTL_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_CNTL_DEFAULT 0x01000101
-#define mmUVD_RBC_RB_RPTR_ADDR_DEFAULT 0x00000000
-#define mmUVD_STATUS_DEFAULT 0x00000000
-#define mmUVD_SEMA_TIMEOUT_STATUS_DEFAULT 0x00000000
-#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_CONTEXT_ID_DEFAULT 0x00000000
-#define mmUVD_CONTEXT_ID2_DEFAULT 0x00000000
-#define mmUVD_RBC_WPTR_POLL_CNTL_DEFAULT 0x00400100
-#define mmUVD_RBC_WPTR_POLL_ADDR_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO4_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI4_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE4_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR4_DEFAULT 0x00000000
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h
new file mode 100644
index 0000000..4be3cb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma0_4_0_DEFAULT_HEADER
+#define _sdma0_4_0_DEFAULT_HEADER
+
+
+// addressBlock: sdma0_sdma0dec
+#define mmSDMA0_UCODE_ADDR_DEFAULT 0x00000000
+#define mmSDMA0_UCODE_DATA_DEFAULT 0x00000000
+#define mmSDMA0_VM_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_VM_CTX_LO_DEFAULT 0x00000000
+#define mmSDMA0_VM_CTX_HI_DEFAULT 0x00000000
+#define mmSDMA0_ACTIVE_FCN_ID_DEFAULT 0x00000000
+#define mmSDMA0_VM_CTX_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_VIRT_RESET_REQ_DEFAULT 0x00000000
+#define mmSDMA0_VF_ENABLE_DEFAULT 0x00000000
+#define mmSDMA0_CONTEXT_REG_TYPE0_DEFAULT 0xfffdf79f
+#define mmSDMA0_CONTEXT_REG_TYPE1_DEFAULT 0x003fbcff
+#define mmSDMA0_CONTEXT_REG_TYPE2_DEFAULT 0x000003ff
+#define mmSDMA0_CONTEXT_REG_TYPE3_DEFAULT 0x00000000
+#define mmSDMA0_PUB_REG_TYPE0_DEFAULT 0x3c000000
+#define mmSDMA0_PUB_REG_TYPE1_DEFAULT 0x30003882
+#define mmSDMA0_PUB_REG_TYPE2_DEFAULT 0x0fc6e880
+#define mmSDMA0_PUB_REG_TYPE3_DEFAULT 0x00000000
+#define mmSDMA0_MMHUB_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_DEFAULT 0x00000000
+#define mmSDMA0_POWER_CNTL_DEFAULT 0x0003c000
+#define mmSDMA0_CLK_CTRL_DEFAULT 0xff000100
+#define mmSDMA0_CNTL_DEFAULT 0x00000002
+#define mmSDMA0_CHICKEN_BITS_DEFAULT 0x00831f07
+#define mmSDMA0_GB_ADDR_CONFIG_DEFAULT 0x00100012
+#define mmSDMA0_GB_ADDR_CONFIG_READ_DEFAULT 0x00100012
+#define mmSDMA0_RB_RPTR_FETCH_HI_DEFAULT 0x00000000
+#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_RB_RPTR_FETCH_DEFAULT 0x00000000
+#define mmSDMA0_IB_OFFSET_FETCH_DEFAULT 0x00000000
+#define mmSDMA0_PROGRAM_DEFAULT 0x00000000
+#define mmSDMA0_STATUS_REG_DEFAULT 0x46dee557
+#define mmSDMA0_STATUS1_REG_DEFAULT 0x000003ff
+#define mmSDMA0_RD_BURST_CNTL_DEFAULT 0x00000003
+#define mmSDMA0_HBM_PAGE_CONFIG_DEFAULT 0x00000000
+#define mmSDMA0_UCODE_CHECKSUM_DEFAULT 0x00000000
+#define mmSDMA0_F32_CNTL_DEFAULT 0x00000001
+#define mmSDMA0_FREEZE_DEFAULT 0x00000000
+#define mmSDMA0_PHASE0_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA0_PHASE1_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA_POWER_GATING_DEFAULT 0x00000000
+#define mmSDMA_PGFSM_CONFIG_DEFAULT 0x00000000
+#define mmSDMA_PGFSM_WRITE_DEFAULT 0x00000000
+#define mmSDMA_PGFSM_READ_DEFAULT 0x00000000
+#define mmSDMA0_EDC_CONFIG_DEFAULT 0x00000002
+#define mmSDMA0_BA_THRESHOLD_DEFAULT 0x03ff03ff
+#define mmSDMA0_ID_DEFAULT 0x00000001
+#define mmSDMA0_VERSION_DEFAULT 0x00000400
+#define mmSDMA0_EDC_COUNTER_DEFAULT 0x00000000
+#define mmSDMA0_EDC_COUNTER_CLEAR_DEFAULT 0x00000000
+#define mmSDMA0_STATUS2_REG_DEFAULT 0x00000000
+#define mmSDMA0_ATOMIC_CNTL_DEFAULT 0x00000200
+#define mmSDMA0_ATOMIC_PREOP_LO_DEFAULT 0x00000000
+#define mmSDMA0_ATOMIC_PREOP_HI_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_CNTL_DEFAULT 0xd0003019
+#define mmSDMA0_UTCL1_WATERMK_DEFAULT 0xfffbe1fe
+#define mmSDMA0_UTCL1_RD_STATUS_DEFAULT 0x201001ff
+#define mmSDMA0_UTCL1_WR_STATUS_DEFAULT 0x503001ff
+#define mmSDMA0_UTCL1_INV0_DEFAULT 0x00000600
+#define mmSDMA0_UTCL1_INV1_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_INV2_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_RD_XNACK0_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_RD_XNACK1_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_WR_XNACK0_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_WR_XNACK1_DEFAULT 0x00000000
+#define mmSDMA0_UTCL1_TIMEOUT_DEFAULT 0x00010001
+#define mmSDMA0_UTCL1_PAGE_DEFAULT 0x000003e0
+#define mmSDMA0_POWER_CNTL_IDLE_DEFAULT 0x06060200
+#define mmSDMA0_RELAX_ORDERING_LUT_DEFAULT 0xc0000006
+#define mmSDMA0_CHICKEN_BITS_2_DEFAULT 0x00000005
+#define mmSDMA0_STATUS3_REG_DEFAULT 0x00100000
+#define mmSDMA0_PHYSICAL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_PHYSICAL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PHASE2_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA0_ERROR_LOG_DEFAULT 0x0000000f
+#define mmSDMA0_PUB_DUMMY_REG0_DEFAULT 0x00000000
+#define mmSDMA0_PUB_DUMMY_REG1_DEFAULT 0x00000000
+#define mmSDMA0_PUB_DUMMY_REG2_DEFAULT 0x00000000
+#define mmSDMA0_PUB_DUMMY_REG3_DEFAULT 0x00000000
+#define mmSDMA0_F32_COUNTER_DEFAULT 0x00000000
+#define mmSDMA0_UNBREAKABLE_DEFAULT 0x00000000
+#define mmSDMA0_PERFMON_CNTL_DEFAULT 0x000ff7fd
+#define mmSDMA0_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
+#define mmSDMA0_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
+#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_DEFAULT 0x00640000
+#define mmSDMA0_CRD_CNTL_DEFAULT 0x000085c0
+#define mmSDMA0_MMHUB_TRUSTLVL_DEFAULT 0x00000000
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
+#define mmSDMA0_ULV_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_EA_DBIT_ADDR_DATA_DEFAULT 0x00000000
+#define mmSDMA0_EA_DBIT_ADDR_INDEX_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA0_GFX_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA0_GFX_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA0_GFX_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_GFX_CONTEXT_STATUS_DEFAULT 0x00000005
+#define mmSDMA0_GFX_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA0_GFX_CONTEXT_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_GFX_STATUS_DEFAULT 0x00000000
+#define mmSDMA0_GFX_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA0_GFX_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA0_GFX_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_GFX_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_GFX_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA0_GFX_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA0_GFX_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_GFX_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA0_GFX_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA0_GFX_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA0_PAGE_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA0_PAGE_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA0_PAGE_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_STATUS_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA0_PAGE_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA0_RLC0_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA0_RLC0_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA0_RLC0_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_STATUS_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA0_RLC0_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA0_RLC1_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA0_RLC1_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA0_RLC1_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_STATUS_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA0_RLC1_MIDCMD_CNTL_DEFAULT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h
new file mode 100644
index 0000000..9975869
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma0_4_0_OFFSET_HEADER
+#define _sdma0_4_0_OFFSET_HEADER
+
+
+
+// addressBlock: sdma0_sdma0dec
+// base address: 0x4980
+#define mmSDMA0_UCODE_ADDR 0x0000
+#define mmSDMA0_UCODE_ADDR_BASE_IDX 0
+#define mmSDMA0_UCODE_DATA 0x0001
+#define mmSDMA0_UCODE_DATA_BASE_IDX 0
+#define mmSDMA0_VM_CNTL 0x0004
+#define mmSDMA0_VM_CNTL_BASE_IDX 0
+#define mmSDMA0_VM_CTX_LO 0x0005
+#define mmSDMA0_VM_CTX_LO_BASE_IDX 0
+#define mmSDMA0_VM_CTX_HI 0x0006
+#define mmSDMA0_VM_CTX_HI_BASE_IDX 0
+#define mmSDMA0_ACTIVE_FCN_ID 0x0007
+#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSDMA0_VM_CTX_CNTL 0x0008
+#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0
+#define mmSDMA0_VIRT_RESET_REQ 0x0009
+#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSDMA0_VF_ENABLE 0x000a
+#define mmSDMA0_VF_ENABLE_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE0 0x000f
+#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE1 0x0010
+#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE2 0x0011
+#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0
+#define mmSDMA0_PUB_REG_TYPE3 0x0012
+#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0
+#define mmSDMA0_MMHUB_CNTL 0x0013
+#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0
+#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define mmSDMA0_POWER_CNTL 0x001a
+#define mmSDMA0_POWER_CNTL_BASE_IDX 0
+#define mmSDMA0_CLK_CTRL 0x001b
+#define mmSDMA0_CLK_CTRL_BASE_IDX 0
+#define mmSDMA0_CNTL 0x001c
+#define mmSDMA0_CNTL_BASE_IDX 0
+#define mmSDMA0_CHICKEN_BITS 0x001d
+#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0
+#define mmSDMA0_GB_ADDR_CONFIG 0x001e
+#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
+#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define mmSDMA0_RB_RPTR_FETCH 0x0022
+#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0
+#define mmSDMA0_IB_OFFSET_FETCH 0x0023
+#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
+#define mmSDMA0_PROGRAM 0x0024
+#define mmSDMA0_PROGRAM_BASE_IDX 0
+#define mmSDMA0_STATUS_REG 0x0025
+#define mmSDMA0_STATUS_REG_BASE_IDX 0
+#define mmSDMA0_STATUS1_REG 0x0026
+#define mmSDMA0_STATUS1_REG_BASE_IDX 0
+#define mmSDMA0_RD_BURST_CNTL 0x0027
+#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0
+#define mmSDMA0_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
+#define mmSDMA0_UCODE_CHECKSUM 0x0029
+#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0
+#define mmSDMA0_F32_CNTL 0x002a
+#define mmSDMA0_F32_CNTL_BASE_IDX 0
+#define mmSDMA0_FREEZE 0x002b
+#define mmSDMA0_FREEZE_BASE_IDX 0
+#define mmSDMA0_PHASE0_QUANTUM 0x002c
+#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0
+#define mmSDMA0_PHASE1_QUANTUM 0x002d
+#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0
+#define mmSDMA_POWER_GATING 0x002e
+#define mmSDMA_POWER_GATING_BASE_IDX 0
+#define mmSDMA_PGFSM_CONFIG 0x002f
+#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define mmSDMA_PGFSM_WRITE 0x0030
+#define mmSDMA_PGFSM_WRITE_BASE_IDX 0
+#define mmSDMA_PGFSM_READ 0x0031
+#define mmSDMA_PGFSM_READ_BASE_IDX 0
+#define mmSDMA0_EDC_CONFIG 0x0032
+#define mmSDMA0_EDC_CONFIG_BASE_IDX 0
+#define mmSDMA0_BA_THRESHOLD 0x0033
+#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0
+#define mmSDMA0_ID 0x0034
+#define mmSDMA0_ID_BASE_IDX 0
+#define mmSDMA0_VERSION 0x0035
+#define mmSDMA0_VERSION_BASE_IDX 0
+#define mmSDMA0_EDC_COUNTER 0x0036
+#define mmSDMA0_EDC_COUNTER_BASE_IDX 0
+#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define mmSDMA0_STATUS2_REG 0x0038
+#define mmSDMA0_STATUS2_REG_BASE_IDX 0
+#define mmSDMA0_ATOMIC_CNTL 0x0039
+#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0
+#define mmSDMA0_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
+#define mmSDMA0_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
+#define mmSDMA0_UTCL1_CNTL 0x003c
+#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0
+#define mmSDMA0_UTCL1_WATERMK 0x003d
+#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_STATUS 0x003e
+#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_STATUS 0x003f
+#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV0 0x0040
+#define mmSDMA0_UTCL1_INV0_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV1 0x0041
+#define mmSDMA0_UTCL1_INV1_BASE_IDX 0
+#define mmSDMA0_UTCL1_INV2 0x0042
+#define mmSDMA0_UTCL1_INV2_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
+#define mmSDMA0_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
+#define mmSDMA0_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
+#define mmSDMA0_UTCL1_TIMEOUT 0x0047
+#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
+#define mmSDMA0_UTCL1_PAGE 0x0048
+#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0
+#define mmSDMA0_POWER_CNTL_IDLE 0x0049
+#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0
+#define mmSDMA0_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
+#define mmSDMA0_CHICKEN_BITS_2 0x004b
+#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0
+#define mmSDMA0_STATUS3_REG 0x004c
+#define mmSDMA0_STATUS3_REG_BASE_IDX 0
+#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PHASE2_QUANTUM 0x004f
+#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0
+#define mmSDMA0_ERROR_LOG 0x0050
+#define mmSDMA0_ERROR_LOG_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG0 0x0051
+#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG1 0x0052
+#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG2 0x0053
+#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
+#define mmSDMA0_PUB_DUMMY_REG3 0x0054
+#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
+#define mmSDMA0_F32_COUNTER 0x0055
+#define mmSDMA0_F32_COUNTER_BASE_IDX 0
+#define mmSDMA0_UNBREAKABLE 0x0056
+#define mmSDMA0_UNBREAKABLE_BASE_IDX 0
+#define mmSDMA0_PERFMON_CNTL 0x0057
+#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
+#define mmSDMA0_CRD_CNTL 0x005b
+#define mmSDMA0_CRD_CNTL_BASE_IDX 0
+#define mmSDMA0_MMHUB_TRUSTLVL 0x005c
+#define mmSDMA0_MMHUB_TRUSTLVL_BASE_IDX 0
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSDMA0_ULV_CNTL 0x005e
+#define mmSDMA0_ULV_CNTL_BASE_IDX 0
+#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define mmSDMA0_GFX_RB_CNTL 0x0080
+#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_RB_BASE 0x0081
+#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0
+#define mmSDMA0_GFX_RB_BASE_HI 0x0082
+#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR 0x0083
+#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR 0x0085
+#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_IB_CNTL 0x008a
+#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_IB_RPTR 0x008b
+#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_GFX_IB_OFFSET 0x008c
+#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_GFX_IB_BASE_LO 0x008d
+#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_GFX_IB_BASE_HI 0x008e
+#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_GFX_IB_SIZE 0x008f
+#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_GFX_SKIP_CNTL 0x0090
+#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL 0x0092
+#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0
+#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_STATUS 0x00a8
+#define mmSDMA0_GFX_STATUS_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_GFX_WATERMARK 0x00aa
+#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0
+#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_GFX_PREEMPT 0x00b0
+#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0
+#define mmSDMA0_GFX_DUMMY_REG 0x00b1
+#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_CNTL 0x00e0
+#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_BASE 0x00e1
+#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_BASE_HI 0x00e2
+#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR 0x00e3
+#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_HI 0x00e4
+#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR 0x00e5
+#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_HI 0x00e6
+#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00e7
+#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e8
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e9
+#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_CNTL 0x00ea
+#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_RPTR 0x00eb
+#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_OFFSET 0x00ec
+#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_BASE_LO 0x00ed
+#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_BASE_HI 0x00ee
+#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_SIZE 0x00ef
+#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_PAGE_SKIP_CNTL 0x00f0
+#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00f1
+#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL 0x00f2
+#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0
+#define mmSDMA0_PAGE_STATUS 0x0108
+#define mmSDMA0_PAGE_STATUS_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL_LOG 0x0109
+#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_PAGE_WATERMARK 0x010a
+#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0
+#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x010b
+#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_PAGE_CSA_ADDR_LO 0x010c
+#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_CSA_ADDR_HI 0x010d
+#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x010f
+#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_PAGE_PREEMPT 0x0110
+#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0
+#define mmSDMA0_PAGE_DUMMY_REG 0x0111
+#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
+#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_PAGE_RB_AQL_CNTL 0x0114
+#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x0115
+#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0120
+#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0121
+#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA2 0x0122
+#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA3 0x0123
+#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA4 0x0124
+#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA5 0x0125
+#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA6 0x0126
+#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA7 0x0127
+#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0128
+#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0129
+#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_CNTL 0x0140
+#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_BASE 0x0141
+#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_BASE_HI 0x0142
+#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR 0x0143
+#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_HI 0x0144
+#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR 0x0145
+#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_HI 0x0146
+#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0147
+#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0148
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0149
+#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_CNTL 0x014a
+#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_RPTR 0x014b
+#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_OFFSET 0x014c
+#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_BASE_LO 0x014d
+#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_BASE_HI 0x014e
+#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_SIZE 0x014f
+#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC0_SKIP_CNTL 0x0150
+#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0151
+#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL 0x0152
+#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC0_STATUS 0x0168
+#define mmSDMA0_RLC0_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL_LOG 0x0169
+#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC0_WATERMARK 0x016a
+#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x016b
+#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC0_CSA_ADDR_LO 0x016c
+#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_CSA_ADDR_HI 0x016d
+#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x016f
+#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC0_PREEMPT 0x0170
+#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC0_DUMMY_REG 0x0171
+#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
+#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0174
+#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0175
+#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0180
+#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0181
+#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0182
+#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0183
+#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0184
+#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0185
+#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0186
+#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0187
+#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0188
+#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0189
+#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_CNTL 0x01a0
+#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_BASE 0x01a1
+#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_BASE_HI 0x01a2
+#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR 0x01a3
+#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_HI 0x01a4
+#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR 0x01a5
+#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_HI 0x01a6
+#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x01a7
+#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x01a8
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x01a9
+#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_CNTL 0x01aa
+#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_RPTR 0x01ab
+#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_OFFSET 0x01ac
+#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_BASE_LO 0x01ad
+#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_BASE_HI 0x01ae
+#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_SIZE 0x01af
+#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0
+#define mmSDMA0_RLC1_SKIP_CNTL 0x01b0
+#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_CONTEXT_STATUS 0x01b1
+#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL 0x01b2
+#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0
+#define mmSDMA0_RLC1_STATUS 0x01c8
+#define mmSDMA0_RLC1_STATUS_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL_LOG 0x01c9
+#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA0_RLC1_WATERMARK 0x01ca
+#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0
+#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01cb
+#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01cc
+#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01cd
+#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01cf
+#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA0_RLC1_PREEMPT 0x01d0
+#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0
+#define mmSDMA0_RLC1_DUMMY_REG 0x01d1
+#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
+#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01d4
+#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01d5
+#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01e0
+#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01e1
+#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01e2
+#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01e3
+#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01e4
+#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01e5
+#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01e6
+#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01e7
+#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01e8
+#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01e9
+#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h
new file mode 100644
index 0000000..f846cc8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h
@@ -0,0 +1,1852 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma0_4_0_SH_MASK_HEADER
+#define _sdma0_4_0_SH_MASK_HEADER
+
+
+// addressBlock: sdma0_sdma0dec
+//SDMA0_UCODE_ADDR
+#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA0_UCODE_DATA
+#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_VM_CNTL
+#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA0_VM_CTX_LO
+#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_VM_CTX_HI
+#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_ACTIVE_FCN_ID
+#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA0_VM_CTX_CNTL
+#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA0_VIRT_RESET_REQ
+#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA0_VF_ENABLE
+#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA0_CONTEXT_REG_TYPE0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA0_CONTEXT_REG_TYPE1
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA0_CONTEXT_REG_TYPE2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA0_CONTEXT_REG_TYPE3
+#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA0_PUB_REG_TYPE0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
+//SDMA0_MMHUB_CNTL
+#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA0_CONTEXT_GROUP_BOUNDARY
+#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA0_POWER_CNTL
+#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
+#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA0_CLK_CTRL
+#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA0_CNTL
+#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA0_CHICKEN_BITS
+#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA0_GB_ADDR_CONFIG
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA0_GB_ADDR_CONFIG_READ
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA0_RB_RPTR_FETCH_HI
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA0_RB_RPTR_FETCH
+#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA0_IB_OFFSET_FETCH
+#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PROGRAM
+#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA0_STATUS_REG
+#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA0_STATUS1_REG
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA0_RD_BURST_CNTL
+#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+//SDMA0_HBM_PAGE_CONFIG
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA0_UCODE_CHECKSUM
+#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_F32_CNTL
+#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA0_FREEZE
+#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA0_PHASE0_QUANTUM
+#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA0_PHASE1_QUANTUM
+#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_POWER_GATING
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
+#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
+#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
+#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//SDMA_PGFSM_CONFIG
+#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//SDMA_PGFSM_WRITE
+#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PGFSM_READ
+#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//SDMA0_EDC_CONFIG
+#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA0_BA_THRESHOLD
+#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA0_ID
+#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA0_VERSION
+#define SDMA0_VERSION__MINVER__SHIFT 0x0
+#define SDMA0_VERSION__MAJVER__SHIFT 0x8
+#define SDMA0_VERSION__REV__SHIFT 0x10
+#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA0_VERSION__REV_MASK 0x003F0000L
+//SDMA0_EDC_COUNTER
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA0_EDC_COUNTER_CLEAR
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA0_STATUS2_REG
+#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
+#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA0_ATOMIC_CNTL
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA0_ATOMIC_PREOP_LO
+#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_ATOMIC_PREOP_HI
+#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_CNTL
+#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA0_UTCL1_WATERMK
+#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
+#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
+#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
+#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
+#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
+#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
+#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
+//SDMA0_UTCL1_RD_STATUS
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA0_UTCL1_WR_STATUS
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA0_UTCL1_INV0
+#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA0_UTCL1_INV1
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_INV2
+#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK1
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA0_UTCL1_WR_XNACK0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_WR_XNACK1
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA0_UTCL1_TIMEOUT
+#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA0_UTCL1_PAGE
+#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA0_POWER_CNTL_IDLE
+#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA0_RELAX_ORDERING_LUT
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS_2
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA0_STATUS3_REG
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+//SDMA0_PHYSICAL_ADDR_LO
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA0_PHYSICAL_ADDR_HI
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA0_PHASE2_QUANTUM
+#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA0_ERROR_LOG
+#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA0_PUB_DUMMY_REG0
+#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG1
+#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG2
+#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG3
+#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_F32_COUNTER
+#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_UNBREAKABLE
+#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA0_PERFMON_CNTL
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA0_PERFCOUNTER0_RESULT
+#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_RESULT
+#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA0_CRD_CNTL
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA0_MMHUB_TRUSTLVL
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
+#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
+//SDMA0_GPU_IOV_VIOLATION_LOG
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SDMA0_ULV_CNTL
+#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA0_EA_DBIT_ADDR_DATA
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_EA_DBIT_ADDR_INDEX
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA0_GFX_RB_CNTL
+#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_GFX_RB_BASE
+#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_BASE_HI
+#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_GFX_RB_RPTR
+#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_RPTR_HI
+#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR
+#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_HI
+#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_CNTL
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_GFX_RB_RPTR_ADDR_HI
+#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_RPTR_ADDR_LO
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_IB_CNTL
+#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_GFX_IB_RPTR
+#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_GFX_IB_OFFSET
+#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_GFX_IB_BASE_LO
+#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_GFX_IB_BASE_HI
+#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_IB_SIZE
+#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_GFX_SKIP_CNTL
+#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA0_GFX_CONTEXT_STATUS
+#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_GFX_DOORBELL
+#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_GFX_CONTEXT_CNTL
+#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA0_GFX_STATUS
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_GFX_DOORBELL_LOG
+#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_GFX_WATERMARK
+#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_GFX_DOORBELL_OFFSET
+#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_GFX_CSA_ADDR_LO
+#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_CSA_ADDR_HI
+#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_IB_SUB_REMAIN
+#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_GFX_PREEMPT
+#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_GFX_DUMMY_REG
+#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_GFX_RB_AQL_CNTL
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_GFX_MINOR_PTR_UPDATE
+#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_GFX_MIDCMD_DATA0
+#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA1
+#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA2
+#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA3
+#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA4
+#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA5
+#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA6
+#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA7
+#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_DATA8
+#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_GFX_MIDCMD_CNTL
+#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_PAGE_RB_CNTL
+#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_PAGE_RB_BASE
+#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_BASE_HI
+#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_PAGE_RB_RPTR
+#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_RPTR_HI
+#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR
+#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_HI
+#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_PAGE_RB_RPTR_ADDR_HI
+#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_RPTR_ADDR_LO
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_IB_CNTL
+#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_PAGE_IB_RPTR
+#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PAGE_IB_OFFSET
+#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PAGE_IB_BASE_LO
+#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_PAGE_IB_BASE_HI
+#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_IB_SIZE
+#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_PAGE_SKIP_CNTL
+#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA0_PAGE_CONTEXT_STATUS
+#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_PAGE_DOORBELL
+#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_PAGE_STATUS
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_PAGE_DOORBELL_LOG
+#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_WATERMARK
+#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_PAGE_DOORBELL_OFFSET
+#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_PAGE_CSA_ADDR_LO
+#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_CSA_ADDR_HI
+#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_IB_SUB_REMAIN
+#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_PAGE_PREEMPT
+#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_PAGE_DUMMY_REG
+#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_PAGE_RB_AQL_CNTL
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_PAGE_MINOR_PTR_UPDATE
+#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_PAGE_MIDCMD_DATA0
+#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA1
+#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA2
+#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA3
+#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA4
+#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA5
+#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA6
+#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA7
+#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_DATA8
+#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_PAGE_MIDCMD_CNTL
+#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC0_RB_CNTL
+#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC0_RB_BASE
+#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_BASE_HI
+#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC0_RB_RPTR
+#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_RPTR_HI
+#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR
+#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_HI
+#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC0_RB_RPTR_ADDR_HI
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_RPTR_ADDR_LO
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_IB_CNTL
+#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC0_IB_RPTR
+#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC0_IB_OFFSET
+#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC0_IB_BASE_LO
+#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC0_IB_BASE_HI
+#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_IB_SIZE
+#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC0_SKIP_CNTL
+#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA0_RLC0_CONTEXT_STATUS
+#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC0_DOORBELL
+#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC0_STATUS
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC0_DOORBELL_LOG
+#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_WATERMARK
+#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC0_DOORBELL_OFFSET
+#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC0_CSA_ADDR_LO
+#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_CSA_ADDR_HI
+#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_IB_SUB_REMAIN
+#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_RLC0_PREEMPT
+#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC0_DUMMY_REG
+#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC0_RB_AQL_CNTL
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC0_MINOR_PTR_UPDATE
+#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC0_MIDCMD_DATA0
+#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA1
+#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA2
+#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA3
+#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA4
+#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA5
+#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA6
+#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA7
+#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_DATA8
+#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC0_MIDCMD_CNTL
+#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_RLC1_RB_CNTL
+#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_RLC1_RB_BASE
+#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_BASE_HI
+#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_RLC1_RB_RPTR
+#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_RPTR_HI
+#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR
+#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_HI
+#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA0_RLC1_RB_RPTR_ADDR_HI
+#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_RPTR_ADDR_LO
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_IB_CNTL
+#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA0_RLC1_IB_RPTR
+#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC1_IB_OFFSET
+#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_RLC1_IB_BASE_LO
+#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_RLC1_IB_BASE_HI
+#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_IB_SIZE
+#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_RLC1_SKIP_CNTL
+#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA0_RLC1_CONTEXT_STATUS
+#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA0_RLC1_DOORBELL
+#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_RLC1_STATUS
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA0_RLC1_DOORBELL_LOG
+#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_WATERMARK
+#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA0_RLC1_DOORBELL_OFFSET
+#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_RLC1_CSA_ADDR_LO
+#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_CSA_ADDR_HI
+#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_IB_SUB_REMAIN
+#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_RLC1_PREEMPT
+#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_RLC1_DUMMY_REG
+#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_RLC1_RB_AQL_CNTL
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA0_RLC1_MINOR_PTR_UPDATE
+#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_RLC1_MIDCMD_DATA0
+#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA1
+#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA2
+#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA3
+#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA4
+#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA5
+#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA6
+#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA7
+#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_DATA8
+#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_RLC1_MIDCMD_CNTL
+#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_default.h
index bafcecb..bafcecb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_offset.h
index 1544af6..1544af6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
index 1445bba..1445bba 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/SDMA0/sdma0_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h
new file mode 100644
index 0000000..9347337
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma1_4_0_DEFAULT_HEADER
+#define _sdma1_4_0_DEFAULT_HEADER
+
+
+// addressBlock: sdma1_sdma1dec
+#define mmSDMA1_UCODE_ADDR_DEFAULT 0x00000000
+#define mmSDMA1_UCODE_DATA_DEFAULT 0x00000000
+#define mmSDMA1_VM_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_VM_CTX_LO_DEFAULT 0x00000000
+#define mmSDMA1_VM_CTX_HI_DEFAULT 0x00000000
+#define mmSDMA1_ACTIVE_FCN_ID_DEFAULT 0x00000000
+#define mmSDMA1_VM_CTX_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_VIRT_RESET_REQ_DEFAULT 0x00000000
+#define mmSDMA1_VF_ENABLE_DEFAULT 0x00000000
+#define mmSDMA1_CONTEXT_REG_TYPE0_DEFAULT 0xfffdf79f
+#define mmSDMA1_CONTEXT_REG_TYPE1_DEFAULT 0x003fbcff
+#define mmSDMA1_CONTEXT_REG_TYPE2_DEFAULT 0x000003ff
+#define mmSDMA1_CONTEXT_REG_TYPE3_DEFAULT 0x00000000
+#define mmSDMA1_PUB_REG_TYPE0_DEFAULT 0x3c000000
+#define mmSDMA1_PUB_REG_TYPE1_DEFAULT 0x30003882
+#define mmSDMA1_PUB_REG_TYPE2_DEFAULT 0x0fc6e880
+#define mmSDMA1_PUB_REG_TYPE3_DEFAULT 0x00000000
+#define mmSDMA1_MMHUB_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_DEFAULT 0x00000000
+#define mmSDMA1_POWER_CNTL_DEFAULT 0x0003c000
+#define mmSDMA1_CLK_CTRL_DEFAULT 0xff000100
+#define mmSDMA1_CNTL_DEFAULT 0x00000002
+#define mmSDMA1_CHICKEN_BITS_DEFAULT 0x00831f07
+#define mmSDMA1_GB_ADDR_CONFIG_DEFAULT 0x00100012
+#define mmSDMA1_GB_ADDR_CONFIG_READ_DEFAULT 0x00100012
+#define mmSDMA1_RB_RPTR_FETCH_HI_DEFAULT 0x00000000
+#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_RB_RPTR_FETCH_DEFAULT 0x00000000
+#define mmSDMA1_IB_OFFSET_FETCH_DEFAULT 0x00000000
+#define mmSDMA1_PROGRAM_DEFAULT 0x00000000
+#define mmSDMA1_STATUS_REG_DEFAULT 0x46dee557
+#define mmSDMA1_STATUS1_REG_DEFAULT 0x000003ff
+#define mmSDMA1_RD_BURST_CNTL_DEFAULT 0x00000003
+#define mmSDMA1_HBM_PAGE_CONFIG_DEFAULT 0x00000000
+#define mmSDMA1_UCODE_CHECKSUM_DEFAULT 0x00000000
+#define mmSDMA1_F32_CNTL_DEFAULT 0x00000001
+#define mmSDMA1_FREEZE_DEFAULT 0x00000000
+#define mmSDMA1_PHASE0_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA1_PHASE1_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA1_EDC_CONFIG_DEFAULT 0x00000002
+#define mmSDMA1_BA_THRESHOLD_DEFAULT 0x03ff03ff
+#define mmSDMA1_ID_DEFAULT 0x00000001
+#define mmSDMA1_VERSION_DEFAULT 0x00000400
+#define mmSDMA1_EDC_COUNTER_DEFAULT 0x00000000
+#define mmSDMA1_EDC_COUNTER_CLEAR_DEFAULT 0x00000000
+#define mmSDMA1_STATUS2_REG_DEFAULT 0x00000001
+#define mmSDMA1_ATOMIC_CNTL_DEFAULT 0x00000200
+#define mmSDMA1_ATOMIC_PREOP_LO_DEFAULT 0x00000000
+#define mmSDMA1_ATOMIC_PREOP_HI_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_CNTL_DEFAULT 0xd0003019
+#define mmSDMA1_UTCL1_WATERMK_DEFAULT 0xfffbe1fe
+#define mmSDMA1_UTCL1_RD_STATUS_DEFAULT 0x201001ff
+#define mmSDMA1_UTCL1_WR_STATUS_DEFAULT 0x503001ff
+#define mmSDMA1_UTCL1_INV0_DEFAULT 0x00000600
+#define mmSDMA1_UTCL1_INV1_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_INV2_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_RD_XNACK0_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_RD_XNACK1_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_WR_XNACK0_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_WR_XNACK1_DEFAULT 0x00000000
+#define mmSDMA1_UTCL1_TIMEOUT_DEFAULT 0x00010001
+#define mmSDMA1_UTCL1_PAGE_DEFAULT 0x000003e0
+#define mmSDMA1_POWER_CNTL_IDLE_DEFAULT 0x06060200
+#define mmSDMA1_RELAX_ORDERING_LUT_DEFAULT 0xc0000006
+#define mmSDMA1_CHICKEN_BITS_2_DEFAULT 0x00000005
+#define mmSDMA1_STATUS3_REG_DEFAULT 0x00100000
+#define mmSDMA1_PHYSICAL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_PHYSICAL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PHASE2_QUANTUM_DEFAULT 0x00010002
+#define mmSDMA1_ERROR_LOG_DEFAULT 0x0000000f
+#define mmSDMA1_PUB_DUMMY_REG0_DEFAULT 0x00000000
+#define mmSDMA1_PUB_DUMMY_REG1_DEFAULT 0x00000000
+#define mmSDMA1_PUB_DUMMY_REG2_DEFAULT 0x00000000
+#define mmSDMA1_PUB_DUMMY_REG3_DEFAULT 0x00000000
+#define mmSDMA1_F32_COUNTER_DEFAULT 0x00000000
+#define mmSDMA1_UNBREAKABLE_DEFAULT 0x00000000
+#define mmSDMA1_PERFMON_CNTL_DEFAULT 0x000ff7fd
+#define mmSDMA1_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
+#define mmSDMA1_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
+#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_DEFAULT 0x00640000
+#define mmSDMA1_CRD_CNTL_DEFAULT 0x000085c0
+#define mmSDMA1_MMHUB_TRUSTLVL_DEFAULT 0x00000000
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
+#define mmSDMA1_ULV_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_EA_DBIT_ADDR_DATA_DEFAULT 0x00000000
+#define mmSDMA1_EA_DBIT_ADDR_INDEX_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA1_GFX_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA1_GFX_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA1_GFX_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_GFX_CONTEXT_STATUS_DEFAULT 0x00000005
+#define mmSDMA1_GFX_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA1_GFX_CONTEXT_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_GFX_STATUS_DEFAULT 0x00000000
+#define mmSDMA1_GFX_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA1_GFX_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA1_GFX_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_GFX_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_GFX_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA1_GFX_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA1_GFX_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_GFX_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA1_GFX_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA1_GFX_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA1_PAGE_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA1_PAGE_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA1_PAGE_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_STATUS_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA1_PAGE_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA1_RLC0_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA1_RLC0_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA1_RLC0_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_STATUS_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA1_RLC0_MIDCMD_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_CNTL_DEFAULT 0x00040000
+#define mmSDMA1_RLC1_RB_BASE_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_RPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_WPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_WPTR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_CNTL_DEFAULT 0x00000100
+#define mmSDMA1_RLC1_IB_RPTR_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_BASE_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_BASE_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_SIZE_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_SKIP_CNTL_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_CONTEXT_STATUS_DEFAULT 0x00000004
+#define mmSDMA1_RLC1_DOORBELL_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_STATUS_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_DOORBELL_LOG_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_WATERMARK_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_DOORBELL_OFFSET_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_CSA_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_CSA_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_IB_SUB_REMAIN_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_PREEMPT_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_DUMMY_REG_DEFAULT 0x0000000f
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_RB_AQL_CNTL_DEFAULT 0x00004000
+#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA0_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA1_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA2_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA3_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA4_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA5_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA6_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA7_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_DATA8_DEFAULT 0x00000000
+#define mmSDMA1_RLC1_MIDCMD_CNTL_DEFAULT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h
new file mode 100644
index 0000000..f2c151a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma1_4_0_OFFSET_HEADER
+#define _sdma1_4_0_OFFSET_HEADER
+
+
+
+// addressBlock: sdma1_sdma1dec
+// base address: 0x5180
+#define mmSDMA1_UCODE_ADDR 0x0000
+#define mmSDMA1_UCODE_ADDR_BASE_IDX 0
+#define mmSDMA1_UCODE_DATA 0x0001
+#define mmSDMA1_UCODE_DATA_BASE_IDX 0
+#define mmSDMA1_VM_CNTL 0x0004
+#define mmSDMA1_VM_CNTL_BASE_IDX 0
+#define mmSDMA1_VM_CTX_LO 0x0005
+#define mmSDMA1_VM_CTX_LO_BASE_IDX 0
+#define mmSDMA1_VM_CTX_HI 0x0006
+#define mmSDMA1_VM_CTX_HI_BASE_IDX 0
+#define mmSDMA1_ACTIVE_FCN_ID 0x0007
+#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmSDMA1_VM_CTX_CNTL 0x0008
+#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0
+#define mmSDMA1_VIRT_RESET_REQ 0x0009
+#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0
+#define mmSDMA1_VF_ENABLE 0x000a
+#define mmSDMA1_VF_ENABLE_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b
+#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c
+#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d
+#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e
+#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE0 0x000f
+#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE1 0x0010
+#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE2 0x0011
+#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0
+#define mmSDMA1_PUB_REG_TYPE3 0x0012
+#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0
+#define mmSDMA1_MMHUB_CNTL 0x0013
+#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0
+#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019
+#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define mmSDMA1_POWER_CNTL 0x001a
+#define mmSDMA1_POWER_CNTL_BASE_IDX 0
+#define mmSDMA1_CLK_CTRL 0x001b
+#define mmSDMA1_CLK_CTRL_BASE_IDX 0
+#define mmSDMA1_CNTL 0x001c
+#define mmSDMA1_CNTL_BASE_IDX 0
+#define mmSDMA1_CHICKEN_BITS 0x001d
+#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0
+#define mmSDMA1_GB_ADDR_CONFIG 0x001e
+#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
+#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f
+#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020
+#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define mmSDMA1_RB_RPTR_FETCH 0x0022
+#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0
+#define mmSDMA1_IB_OFFSET_FETCH 0x0023
+#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
+#define mmSDMA1_PROGRAM 0x0024
+#define mmSDMA1_PROGRAM_BASE_IDX 0
+#define mmSDMA1_STATUS_REG 0x0025
+#define mmSDMA1_STATUS_REG_BASE_IDX 0
+#define mmSDMA1_STATUS1_REG 0x0026
+#define mmSDMA1_STATUS1_REG_BASE_IDX 0
+#define mmSDMA1_RD_BURST_CNTL 0x0027
+#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0
+#define mmSDMA1_HBM_PAGE_CONFIG 0x0028
+#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
+#define mmSDMA1_UCODE_CHECKSUM 0x0029
+#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0
+#define mmSDMA1_F32_CNTL 0x002a
+#define mmSDMA1_F32_CNTL_BASE_IDX 0
+#define mmSDMA1_FREEZE 0x002b
+#define mmSDMA1_FREEZE_BASE_IDX 0
+#define mmSDMA1_PHASE0_QUANTUM 0x002c
+#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0
+#define mmSDMA1_PHASE1_QUANTUM 0x002d
+#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0
+#define mmSDMA1_EDC_CONFIG 0x0032
+#define mmSDMA1_EDC_CONFIG_BASE_IDX 0
+#define mmSDMA1_BA_THRESHOLD 0x0033
+#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0
+#define mmSDMA1_ID 0x0034
+#define mmSDMA1_ID_BASE_IDX 0
+#define mmSDMA1_VERSION 0x0035
+#define mmSDMA1_VERSION_BASE_IDX 0
+#define mmSDMA1_EDC_COUNTER 0x0036
+#define mmSDMA1_EDC_COUNTER_BASE_IDX 0
+#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037
+#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define mmSDMA1_STATUS2_REG 0x0038
+#define mmSDMA1_STATUS2_REG_BASE_IDX 0
+#define mmSDMA1_ATOMIC_CNTL 0x0039
+#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0
+#define mmSDMA1_ATOMIC_PREOP_LO 0x003a
+#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
+#define mmSDMA1_ATOMIC_PREOP_HI 0x003b
+#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
+#define mmSDMA1_UTCL1_CNTL 0x003c
+#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0
+#define mmSDMA1_UTCL1_WATERMK 0x003d
+#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_STATUS 0x003e
+#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_STATUS 0x003f
+#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV0 0x0040
+#define mmSDMA1_UTCL1_INV0_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV1 0x0041
+#define mmSDMA1_UTCL1_INV1_BASE_IDX 0
+#define mmSDMA1_UTCL1_INV2 0x0042
+#define mmSDMA1_UTCL1_INV2_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_XNACK0 0x0043
+#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
+#define mmSDMA1_UTCL1_RD_XNACK1 0x0044
+#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_XNACK0 0x0045
+#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
+#define mmSDMA1_UTCL1_WR_XNACK1 0x0046
+#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
+#define mmSDMA1_UTCL1_TIMEOUT 0x0047
+#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
+#define mmSDMA1_UTCL1_PAGE 0x0048
+#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0
+#define mmSDMA1_POWER_CNTL_IDLE 0x0049
+#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0
+#define mmSDMA1_RELAX_ORDERING_LUT 0x004a
+#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
+#define mmSDMA1_CHICKEN_BITS_2 0x004b
+#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0
+#define mmSDMA1_STATUS3_REG 0x004c
+#define mmSDMA1_STATUS3_REG_BASE_IDX 0
+#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d
+#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e
+#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PHASE2_QUANTUM 0x004f
+#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0
+#define mmSDMA1_ERROR_LOG 0x0050
+#define mmSDMA1_ERROR_LOG_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG0 0x0051
+#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG1 0x0052
+#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG2 0x0053
+#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
+#define mmSDMA1_PUB_DUMMY_REG3 0x0054
+#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
+#define mmSDMA1_F32_COUNTER 0x0055
+#define mmSDMA1_F32_COUNTER_BASE_IDX 0
+#define mmSDMA1_UNBREAKABLE 0x0056
+#define mmSDMA1_UNBREAKABLE_BASE_IDX 0
+#define mmSDMA1_PERFMON_CNTL 0x0057
+#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058
+#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059
+#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
+#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
+#define mmSDMA1_CRD_CNTL 0x005b
+#define mmSDMA1_CRD_CNTL_BASE_IDX 0
+#define mmSDMA1_MMHUB_TRUSTLVL 0x005c
+#define mmSDMA1_MMHUB_TRUSTLVL_BASE_IDX 0
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d
+#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define mmSDMA1_ULV_CNTL 0x005e
+#define mmSDMA1_ULV_CNTL_BASE_IDX 0
+#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060
+#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061
+#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define mmSDMA1_GFX_RB_CNTL 0x0080
+#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_RB_BASE 0x0081
+#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0
+#define mmSDMA1_GFX_RB_BASE_HI 0x0082
+#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR 0x0083
+#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_HI 0x0084
+#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR 0x0085
+#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_HI 0x0086
+#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088
+#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089
+#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_IB_CNTL 0x008a
+#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_IB_RPTR 0x008b
+#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_GFX_IB_OFFSET 0x008c
+#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_GFX_IB_BASE_LO 0x008d
+#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_GFX_IB_BASE_HI 0x008e
+#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_GFX_IB_SIZE 0x008f
+#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_GFX_SKIP_CNTL 0x0090
+#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091
+#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL 0x0092
+#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0
+#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093
+#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_STATUS 0x00a8
+#define mmSDMA1_GFX_STATUS_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9
+#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_GFX_WATERMARK 0x00aa
+#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0
+#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab
+#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac
+#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad
+#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af
+#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_GFX_PREEMPT 0x00b0
+#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0
+#define mmSDMA1_GFX_DUMMY_REG 0x00b1
+#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4
+#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5
+#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0
+#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1
+#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2
+#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3
+#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4
+#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5
+#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6
+#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7
+#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8
+#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9
+#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_CNTL 0x00e0
+#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_BASE 0x00e1
+#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_BASE_HI 0x00e2
+#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR 0x00e3
+#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_HI 0x00e4
+#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR 0x00e5
+#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_HI 0x00e6
+#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00e7
+#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e8
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e9
+#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_CNTL 0x00ea
+#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_RPTR 0x00eb
+#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_OFFSET 0x00ec
+#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_BASE_LO 0x00ed
+#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_BASE_HI 0x00ee
+#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_SIZE 0x00ef
+#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_PAGE_SKIP_CNTL 0x00f0
+#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00f1
+#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL 0x00f2
+#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0
+#define mmSDMA1_PAGE_STATUS 0x0108
+#define mmSDMA1_PAGE_STATUS_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL_LOG 0x0109
+#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_PAGE_WATERMARK 0x010a
+#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0
+#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x010b
+#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_PAGE_CSA_ADDR_LO 0x010c
+#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_CSA_ADDR_HI 0x010d
+#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x010f
+#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_PAGE_PREEMPT 0x0110
+#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0
+#define mmSDMA1_PAGE_DUMMY_REG 0x0111
+#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
+#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_PAGE_RB_AQL_CNTL 0x0114
+#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x0115
+#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0120
+#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0121
+#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA2 0x0122
+#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA3 0x0123
+#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA4 0x0124
+#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA5 0x0125
+#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA6 0x0126
+#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA7 0x0127
+#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0128
+#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0129
+#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_CNTL 0x0140
+#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_BASE 0x0141
+#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_BASE_HI 0x0142
+#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR 0x0143
+#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_HI 0x0144
+#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR 0x0145
+#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_HI 0x0146
+#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0147
+#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0148
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0149
+#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_CNTL 0x014a
+#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_RPTR 0x014b
+#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_OFFSET 0x014c
+#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_BASE_LO 0x014d
+#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_BASE_HI 0x014e
+#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_SIZE 0x014f
+#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC0_SKIP_CNTL 0x0150
+#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0151
+#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL 0x0152
+#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC0_STATUS 0x0168
+#define mmSDMA1_RLC0_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL_LOG 0x0169
+#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC0_WATERMARK 0x016a
+#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x016b
+#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC0_CSA_ADDR_LO 0x016c
+#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_CSA_ADDR_HI 0x016d
+#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x016f
+#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC0_PREEMPT 0x0170
+#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC0_DUMMY_REG 0x0171
+#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
+#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0174
+#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0175
+#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0180
+#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0181
+#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0182
+#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0183
+#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0184
+#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0185
+#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0186
+#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0187
+#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0188
+#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0189
+#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_CNTL 0x01a0
+#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_BASE 0x01a1
+#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_BASE_HI 0x01a2
+#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR 0x01a3
+#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_HI 0x01a4
+#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR 0x01a5
+#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_HI 0x01a6
+#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x01a7
+#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x01a8
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x01a9
+#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_CNTL 0x01aa
+#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_RPTR 0x01ab
+#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_OFFSET 0x01ac
+#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_BASE_LO 0x01ad
+#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_BASE_HI 0x01ae
+#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_SIZE 0x01af
+#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0
+#define mmSDMA1_RLC1_SKIP_CNTL 0x01b0
+#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_CONTEXT_STATUS 0x01b1
+#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL 0x01b2
+#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0
+#define mmSDMA1_RLC1_STATUS 0x01c8
+#define mmSDMA1_RLC1_STATUS_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL_LOG 0x01c9
+#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define mmSDMA1_RLC1_WATERMARK 0x01ca
+#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0
+#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01cb
+#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01cc
+#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01cd
+#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01cf
+#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define mmSDMA1_RLC1_PREEMPT 0x01d0
+#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0
+#define mmSDMA1_RLC1_DUMMY_REG 0x01d1
+#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
+#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01d4
+#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01d5
+#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01e0
+#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01e1
+#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01e2
+#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01e3
+#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01e4
+#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01e5
+#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01e6
+#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01e7
+#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01e8
+#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01e9
+#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h
new file mode 100644
index 0000000..99849e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h
@@ -0,0 +1,1810 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _sdma1_4_0_SH_MASK_HEADER
+#define _sdma1_4_0_SH_MASK_HEADER
+
+
+// addressBlock: sdma1_sdma1dec
+//SDMA1_UCODE_ADDR
+#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+//SDMA1_UCODE_DATA
+#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_VM_CNTL
+#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA1_VM_CTX_LO
+#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_VM_CTX_HI
+#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_ACTIVE_FCN_ID
+#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA1_VM_CTX_CNTL
+#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA1_VIRT_RESET_REQ
+#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA1_VF_ENABLE
+#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA1_CONTEXT_REG_TYPE0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA1_CONTEXT_REG_TYPE1
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA1_CONTEXT_REG_TYPE2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
+//SDMA1_CONTEXT_REG_TYPE3
+#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA1_PUB_REG_TYPE0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
+//SDMA1_MMHUB_CNTL
+#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA1_CONTEXT_GROUP_BOUNDARY
+#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA1_POWER_CNTL
+#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+//SDMA1_CLK_CTRL
+#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA1_CNTL
+#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+//SDMA1_CHICKEN_BITS
+#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
+#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
+#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
+#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
+#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
+#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
+#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
+//SDMA1_GB_ADDR_CONFIG
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA1_GB_ADDR_CONFIG_READ
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA1_RB_RPTR_FETCH_HI
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA1_RB_RPTR_FETCH
+#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA1_IB_OFFSET_FETCH
+#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PROGRAM
+#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA1_STATUS_REG
+#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA1_STATUS1_REG
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA1_RD_BURST_CNTL
+#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+//SDMA1_HBM_PAGE_CONFIG
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
+//SDMA1_UCODE_CHECKSUM
+#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_F32_CNTL
+#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA1_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L
+//SDMA1_FREEZE
+#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA1_PHASE0_QUANTUM
+#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_PHASE1_QUANTUM
+#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_EDC_CONFIG
+#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA1_BA_THRESHOLD
+#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA1_ID
+#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA1_VERSION
+#define SDMA1_VERSION__MINVER__SHIFT 0x0
+#define SDMA1_VERSION__MAJVER__SHIFT 0x8
+#define SDMA1_VERSION__REV__SHIFT 0x10
+#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA1_VERSION__REV_MASK 0x003F0000L
+//SDMA1_EDC_COUNTER
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA1_EDC_COUNTER_CLEAR
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA1_STATUS2_REG
+#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
+#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA1_ATOMIC_CNTL
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA1_ATOMIC_PREOP_LO
+#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_ATOMIC_PREOP_HI
+#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_CNTL
+#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA1_UTCL1_WATERMK
+#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
+#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
+#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
+#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
+#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
+#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
+#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
+#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
+//SDMA1_UTCL1_RD_STATUS
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA1_UTCL1_WR_STATUS
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA1_UTCL1_INV0
+#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA1_UTCL1_INV1
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_INV2
+#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK1
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA1_UTCL1_WR_XNACK0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_WR_XNACK1
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA1_UTCL1_TIMEOUT
+#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA1_UTCL1_PAGE
+#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+//SDMA1_POWER_CNTL_IDLE
+#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA1_RELAX_ORDERING_LUT
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS_2
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+//SDMA1_STATUS3_REG
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+//SDMA1_PHYSICAL_ADDR_LO
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA1_PHYSICAL_ADDR_HI
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA1_PHASE2_QUANTUM
+#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA1_ERROR_LOG
+#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA1_PUB_DUMMY_REG0
+#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG1
+#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG2
+#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG3
+#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_F32_COUNTER
+#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_UNBREAKABLE
+#define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0
+#define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L
+//SDMA1_PERFMON_CNTL
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
+#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
+#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
+#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
+#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
+#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
+#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
+//SDMA1_PERFCOUNTER0_RESULT
+#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_RESULT
+#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
+#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
+//SDMA1_CRD_CNTL
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA1_MMHUB_TRUSTLVL
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
+#define SDMA1_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
+//SDMA1_GPU_IOV_VIOLATION_LOG
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
+//SDMA1_ULV_CNTL
+#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA1_EA_DBIT_ADDR_DATA
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_EA_DBIT_ADDR_INDEX
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA1_GFX_RB_CNTL
+#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_GFX_RB_BASE
+#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_BASE_HI
+#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_GFX_RB_RPTR
+#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_RPTR_HI
+#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR
+#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_HI
+#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_CNTL
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_GFX_RB_RPTR_ADDR_HI
+#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_RPTR_ADDR_LO
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_IB_CNTL
+#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_GFX_IB_RPTR
+#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_GFX_IB_OFFSET
+#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_GFX_IB_BASE_LO
+#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_GFX_IB_BASE_HI
+#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_IB_SIZE
+#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_GFX_SKIP_CNTL
+#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA1_GFX_CONTEXT_STATUS
+#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_GFX_DOORBELL
+#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_GFX_CONTEXT_CNTL
+#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+//SDMA1_GFX_STATUS
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_GFX_DOORBELL_LOG
+#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_GFX_WATERMARK
+#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_GFX_DOORBELL_OFFSET
+#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_GFX_CSA_ADDR_LO
+#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_CSA_ADDR_HI
+#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_IB_SUB_REMAIN
+#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_GFX_PREEMPT
+#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_GFX_DUMMY_REG
+#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_GFX_RB_AQL_CNTL
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_GFX_MINOR_PTR_UPDATE
+#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_GFX_MIDCMD_DATA0
+#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA1
+#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA2
+#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA3
+#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA4
+#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA5
+#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA6
+#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA7
+#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_DATA8
+#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_GFX_MIDCMD_CNTL
+#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_PAGE_RB_CNTL
+#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_PAGE_RB_BASE
+#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_BASE_HI
+#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_PAGE_RB_RPTR
+#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_RPTR_HI
+#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR
+#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_HI
+#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_PAGE_RB_RPTR_ADDR_HI
+#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_RPTR_ADDR_LO
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_IB_CNTL
+#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_PAGE_IB_RPTR
+#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PAGE_IB_OFFSET
+#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PAGE_IB_BASE_LO
+#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_PAGE_IB_BASE_HI
+#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_IB_SIZE
+#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_PAGE_SKIP_CNTL
+#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA1_PAGE_CONTEXT_STATUS
+#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_PAGE_DOORBELL
+#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_PAGE_STATUS
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_PAGE_DOORBELL_LOG
+#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_WATERMARK
+#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_PAGE_DOORBELL_OFFSET
+#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_PAGE_CSA_ADDR_LO
+#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_CSA_ADDR_HI
+#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_IB_SUB_REMAIN
+#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_PAGE_PREEMPT
+#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_PAGE_DUMMY_REG
+#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_PAGE_RB_AQL_CNTL
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_PAGE_MINOR_PTR_UPDATE
+#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_PAGE_MIDCMD_DATA0
+#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA1
+#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA2
+#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA3
+#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA4
+#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA5
+#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA6
+#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA7
+#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_DATA8
+#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_PAGE_MIDCMD_CNTL
+#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC0_RB_CNTL
+#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC0_RB_BASE
+#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_BASE_HI
+#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC0_RB_RPTR
+#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_RPTR_HI
+#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR
+#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_HI
+#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC0_RB_RPTR_ADDR_HI
+#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_RPTR_ADDR_LO
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_IB_CNTL
+#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC0_IB_RPTR
+#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC0_IB_OFFSET
+#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC0_IB_BASE_LO
+#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC0_IB_BASE_HI
+#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_IB_SIZE
+#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC0_SKIP_CNTL
+#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA1_RLC0_CONTEXT_STATUS
+#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC0_DOORBELL
+#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC0_STATUS
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC0_DOORBELL_LOG
+#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_WATERMARK
+#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC0_DOORBELL_OFFSET
+#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC0_CSA_ADDR_LO
+#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_CSA_ADDR_HI
+#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_IB_SUB_REMAIN
+#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_RLC0_PREEMPT
+#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC0_DUMMY_REG
+#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC0_RB_AQL_CNTL
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC0_MINOR_PTR_UPDATE
+#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC0_MIDCMD_DATA0
+#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA1
+#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA2
+#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA3
+#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA4
+#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA5
+#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA6
+#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA7
+#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_DATA8
+#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC0_MIDCMD_CNTL
+#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_RLC1_RB_CNTL
+#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
+#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_RLC1_RB_BASE
+#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_BASE_HI
+#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_RLC1_RB_RPTR
+#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_RPTR_HI
+#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR
+#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_HI
+#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA1_RLC1_RB_RPTR_ADDR_HI
+#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_RPTR_ADDR_LO
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_IB_CNTL
+#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+//SDMA1_RLC1_IB_RPTR
+#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC1_IB_OFFSET
+#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_RLC1_IB_BASE_LO
+#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_RLC1_IB_BASE_HI
+#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_IB_SIZE
+#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_RLC1_SKIP_CNTL
+#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
+//SDMA1_RLC1_CONTEXT_STATUS
+#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA1_RLC1_DOORBELL
+#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_RLC1_STATUS
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA1_RLC1_DOORBELL_LOG
+#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_WATERMARK
+#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA1_RLC1_DOORBELL_OFFSET
+#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_RLC1_CSA_ADDR_LO
+#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_CSA_ADDR_HI
+#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_IB_SUB_REMAIN
+#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_RLC1_PREEMPT
+#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_RLC1_DUMMY_REG
+#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_RLC1_RB_AQL_CNTL
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA1_RLC1_MINOR_PTR_UPDATE
+#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_RLC1_MIDCMD_DATA0
+#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA1
+#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA2
+#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA3
+#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA4
+#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA5
+#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA6
+#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA7
+#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_DATA8
+#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_RLC1_MIDCMD_CNTL
+#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index b89347e..f35aba7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -1246,5 +1246,6 @@
#define ixGC_CAC_OVRD_CU 0xe7
#define ixCURRENT_PG_STATUS 0xc020029c
#define ixCURRENT_PG_STATUS_APU 0xd020029c
+#define ixPWR_SVI2_STATUS 0xC0200294
#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 654c109..481ee65 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -6078,6 +6078,8 @@
#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
-
-
+#define PWR_SVI2_STATUS__PLANE1_VID_MASK 0x000000ff
+#define PWR_SVI2_STATUS__PLANE1_VID__SHIFT 0x00000000
+#define PWR_SVI2_STATUS__PLANE2_VID_MASK 0x0000ff00
+#define PWR_SVI2_STATUS__PLANE2_VID__SHIFT 0x00000008
#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
index c1006fe..efd2704 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h
@@ -172,4 +172,7 @@
#define mmROM_SW_DATA_64 0x006d
#define mmROM_SW_DATA_64_BASE_IDX 0
+#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
+#define mmSMUSVI0_PLANE0_CURRENTVID 0x0013
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
index a0be5c9..2487ab9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h
@@ -254,5 +254,8 @@
//ROM_SW_DATA_64
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL
+/* SMUSVI0_PLANE0_CURRENTVID */
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_default.h
index 1a3c486..1a3c486 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_offset.h
index 6af3e6f..6af3e6f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_sh_mask.h
index b8cadcf..b8cadcf 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/THM/thm_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_default.h
index 0cbae8b..0cbae8b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_offset.h
index 3053fd3..3053fd3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_sh_mask.h
index f0306c5..f0306c5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h
new file mode 100644
index 0000000..128a18f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _umc_6_0_DEFAULT_HEADER
+#define _umc_6_0_DEFAULT_HEADER
+
+#define mmUMCCH0_0_EccCtrl_DEFAULT 0x00000000
+
+#define mmUMCCH0_0_UMC_CONFIG_DEFAULT 0x00000203
+
+#define mmUMCCH0_0_UmcLocalCap_DEFAULT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h
new file mode 100644
index 0000000..6985dbb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _umc_6_0_OFFSET_H_
+#define _umc_6_0_OFFSET_H_
+
+#define mmUMCCH0_0_EccCtrl 0x0053
+#define mmUMCCH0_0_EccCtrl_BASE_IDX 0
+#define mmUMCCH1_0_EccCtrl 0x0853
+#define mmUMCCH1_0_EccCtrl_BASE_IDX 0
+#define mmUMCCH2_0_EccCtrl 0x1053
+#define mmUMCCH2_0_EccCtrl_BASE_IDX 0
+#define mmUMCCH3_0_EccCtrl 0x1853
+#define mmUMCCH3_0_EccCtrl_BASE_IDX 0
+
+#define mmUMCCH0_0_UMC_CONFIG 0x0040
+#define mmUMCCH0_0_UMC_CONFIG_BASE_IDX 0
+#define mmUMCCH1_0_UMC_CONFIG 0x0840
+#define mmUMCCH1_0_UMC_CONFIG_BASE_IDX 0
+#define mmUMCCH2_0_UMC_CONFIG 0x1040
+#define mmUMCCH2_0_UMC_CONFIG_BASE_IDX 0
+#define mmUMCCH3_0_UMC_CONFIG 0x1840
+#define mmUMCCH3_0_UMC_CONFIG_BASE_IDX 0
+
+#define mmUMCCH0_0_UmcLocalCap 0x0306
+#define mmUMCCH0_0_UmcLocalCap_BASE_IDX 0
+#define mmUMCCH1_0_UmcLocalCap 0x0b06
+#define mmUMCCH1_0_UmcLocalCap_BASE_IDX 0
+#define mmUMCCH2_0_UmcLocalCap 0x1306
+#define mmUMCCH2_0_UmcLocalCap_BASE_IDX 0
+#define mmUMCCH3_0_UmcLocalCap 0x1b06
+#define mmUMCCH3_0_UmcLocalCap_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h
new file mode 100644
index 0000000..3e857d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _umc_6_0_SH_MASK_HEADER
+#define _umc_6_0_SH_MASK_HEADER
+
+#define UMCCH0_0_EccCtrl__RdEccEn_MASK 0x00000400L
+#define UMCCH0_0_EccCtrl__RdEccEn__SHIFT 0xa
+#define UMCCH0_0_EccCtrl__WrEccEn_MASK 0x00000001L
+#define UMCCH0_0_EccCtrl__WrEccEn__SHIFT 0x0
+
+#define UMCCH0_0_UMC_CONFIG__DramReady_MASK 0x80000000L
+#define UMCCH0_0_UMC_CONFIG__DramReady__SHIFT 0x1f
+
+#define UMCCH0_0_UmcLocalCap__EccDis_MASK 0x00000001L
+#define UMCCH0_0_UmcLocalCap__EccDis__SHIFT 0x0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
index 07aceff..07aceff 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
index b427f73..b427f73 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_default.h
index c2a46c7..c2a46c7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_default.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_offset.h
index 109303e..109303e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_sh_mask.h
index 4cf6e44..4cf6e44 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 18a3247..18a3247 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
index d6ba269..d6ba269 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/VCN/vcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h
deleted file mode 100644
index 1650dc3..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _athub_1_0_DEFAULT_HEADER
-#define _athub_1_0_DEFAULT_HEADER
-
-
-// addressBlock: athub_atsdec
-#define mmATC_ATS_CNTL_DEFAULT 0x009a0800
-#define mmATC_ATS_STATUS_DEFAULT 0x00000000
-#define mmATC_ATS_FAULT_CNTL_DEFAULT 0x000001ff
-#define mmATC_ATS_FAULT_STATUS_INFO_DEFAULT 0x00000000
-#define mmATC_ATS_FAULT_STATUS_ADDR_DEFAULT 0x00000000
-#define mmATC_ATS_DEFAULT_PAGE_LOW_DEFAULT 0x00000000
-#define mmATC_TRANS_FAULT_RSPCNTRL_DEFAULT 0xffffffff
-#define mmATC_ATS_FAULT_STATUS_INFO2_DEFAULT 0x00000000
-#define mmATHUB_MISC_CNTL_DEFAULT 0x00040200
-#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS_DEFAULT 0x00000000
-#define mmATC_VMID0_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID1_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID2_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID3_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID4_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID5_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID6_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID7_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID8_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID9_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID10_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID11_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID12_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID13_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID14_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID15_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_ATS_VMID_STATUS_DEFAULT 0x00000000
-#define mmATC_ATS_GFX_ATCL2_STATUS_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER3_CFG_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-#define mmATC_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmATC_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_DEFAULT 0x00000000
-#define mmATHUB_PCIE_PASID_CNTL_DEFAULT 0x00000000
-#define mmATHUB_PCIE_PAGE_REQ_CNTL_DEFAULT 0x00000000
-#define mmATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC_DEFAULT 0x00000000
-#define mmATHUB_COMMAND_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000
-#define mmATHUB_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000
-#define mmATHUB_MEM_POWER_LS_DEFAULT 0x00000208
-#define mmATS_IH_CREDIT_DEFAULT 0x00150002
-#define mmATHUB_IH_CREDIT_DEFAULT 0x00020002
-#define mmATC_VMID16_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID17_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID18_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID19_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID20_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID21_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID22_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID23_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID24_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID25_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID26_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID27_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID28_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID29_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID30_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_VMID31_PASID_MAPPING_DEFAULT 0x00000000
-#define mmATC_ATS_MMHUB_ATCL2_STATUS_DEFAULT 0x00000000
-#define mmATHUB_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmATHUB_SHARED_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmATC_ATS_SDPPORT_CNTL_DEFAULT 0x03ffa210
-#define mmATC_ATS_VMID_SNAPSHOT_GFX_STAT_DEFAULT 0x00000000
-#define mmATC_ATS_VMID_SNAPSHOT_MMHUB_STAT_DEFAULT 0x00000000
-
-
-// addressBlock: athub_xpbdec
-#define mmXPB_RTR_SRC_APRTR0_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR1_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR2_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR3_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR4_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR5_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR6_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR7_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR8_DEFAULT 0x00000000
-#define mmXPB_RTR_SRC_APRTR9_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_SRC_APRTR0_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_SRC_APRTR1_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_SRC_APRTR2_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_SRC_APRTR3_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP0_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP1_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP2_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP3_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP4_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP5_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP6_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP7_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP8_DEFAULT 0x00000000
-#define mmXPB_RTR_DEST_MAP9_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_DEST_MAP0_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_DEST_MAP1_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_DEST_MAP2_DEFAULT 0x00000000
-#define mmXPB_XDMA_RTR_DEST_MAP3_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG0_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG1_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG2_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG3_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG4_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG5_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG6_DEFAULT 0x00000000
-#define mmXPB_CLG_CFG7_DEFAULT 0x00000000
-#define mmXPB_CLG_EXTRA_DEFAULT 0x00000000
-#define mmXPB_CLG_EXTRA_MSK_DEFAULT 0x00000000
-#define mmXPB_LB_ADDR_DEFAULT 0x00000000
-#define mmXPB_WCB_STS_DEFAULT 0x00000000
-#define mmXPB_HST_CFG_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR_CFG_DEFAULT 0x0000000f
-#define mmXPB_P2P_BAR0_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR1_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR2_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR3_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR4_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR5_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR6_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR7_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR_SETUP_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR_DELTA_ABOVE_DEFAULT 0x00000000
-#define mmXPB_P2P_BAR_DELTA_BELOW_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR0_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR1_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR2_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR3_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR4_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR5_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR6_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR7_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR8_DEFAULT 0x00000000
-#define mmXPB_PEER_SYS_BAR9_DEFAULT 0x00000000
-#define mmXPB_XDMA_PEER_SYS_BAR0_DEFAULT 0x00000000
-#define mmXPB_XDMA_PEER_SYS_BAR1_DEFAULT 0x00000000
-#define mmXPB_XDMA_PEER_SYS_BAR2_DEFAULT 0x00000000
-#define mmXPB_XDMA_PEER_SYS_BAR3_DEFAULT 0x00000000
-#define mmXPB_CLK_GAT_DEFAULT 0x00040400
-#define mmXPB_INTF_CFG_DEFAULT 0x000f1040
-#define mmXPB_INTF_STS_DEFAULT 0x00000000
-#define mmXPB_PIPE_STS_DEFAULT 0x00000000
-#define mmXPB_SUB_CTRL_DEFAULT 0x00000000
-#define mmXPB_MAP_INVERT_FLUSH_NUM_LSB_DEFAULT 0x00000000
-#define mmXPB_PERF_KNOBS_DEFAULT 0x00000000
-#define mmXPB_STICKY_DEFAULT 0x00000000
-#define mmXPB_STICKY_W1C_DEFAULT 0x00000000
-#define mmXPB_MISC_CFG_DEFAULT 0x4d585042
-#define mmXPB_INTF_CFG2_DEFAULT 0x00000040
-#define mmXPB_CLG_EXTRA_RD_DEFAULT 0x00000000
-#define mmXPB_CLG_EXTRA_MSK_RD_DEFAULT 0x00000000
-#define mmXPB_CLG_GFX_MATCH_DEFAULT 0x03000000
-#define mmXPB_CLG_GFX_MATCH_MSK_DEFAULT 0x00000000
-#define mmXPB_CLG_MM_MATCH_DEFAULT 0x03000000
-#define mmXPB_CLG_MM_MATCH_MSK_DEFAULT 0x00000000
-#define mmXPB_CLG_GFX_UNITID_MAPPING0_DEFAULT 0x00000000
-#define mmXPB_CLG_GFX_UNITID_MAPPING1_DEFAULT 0x00000040
-#define mmXPB_CLG_GFX_UNITID_MAPPING2_DEFAULT 0x00000080
-#define mmXPB_CLG_GFX_UNITID_MAPPING3_DEFAULT 0x000000c0
-#define mmXPB_CLG_GFX_UNITID_MAPPING4_DEFAULT 0x00000100
-#define mmXPB_CLG_GFX_UNITID_MAPPING5_DEFAULT 0x00000140
-#define mmXPB_CLG_GFX_UNITID_MAPPING6_DEFAULT 0x00000000
-#define mmXPB_CLG_GFX_UNITID_MAPPING7_DEFAULT 0x000001c0
-#define mmXPB_CLG_MM_UNITID_MAPPING0_DEFAULT 0x00000000
-#define mmXPB_CLG_MM_UNITID_MAPPING1_DEFAULT 0x00000040
-#define mmXPB_CLG_MM_UNITID_MAPPING2_DEFAULT 0x00000080
-#define mmXPB_CLG_MM_UNITID_MAPPING3_DEFAULT 0x000000c0
-
-
-// addressBlock: athub_rpbdec
-#define mmRPB_PASSPW_CONF_DEFAULT 0x00000230
-#define mmRPB_BLOCKLEVEL_CONF_DEFAULT 0x000000f0
-#define mmRPB_TAG_CONF_DEFAULT 0x00204020
-#define mmRPB_EFF_CNTL_DEFAULT 0x00001010
-#define mmRPB_ARB_CNTL_DEFAULT 0x00040404
-#define mmRPB_ARB_CNTL2_DEFAULT 0x00040104
-#define mmRPB_BIF_CNTL_DEFAULT 0x01000404
-#define mmRPB_WR_SWITCH_CNTL_DEFAULT 0x02040810
-#define mmRPB_RD_SWITCH_CNTL_DEFAULT 0x02040810
-#define mmRPB_CID_QUEUE_WR_DEFAULT 0x00000000
-#define mmRPB_CID_QUEUE_RD_DEFAULT 0x00000000
-#define mmRPB_CID_QUEUE_EX_DEFAULT 0x00000000
-#define mmRPB_CID_QUEUE_EX_DATA_DEFAULT 0x00000000
-#define mmRPB_SWITCH_CNTL2_DEFAULT 0x02040810
-#define mmRPB_DEINTRLV_COMBINE_CNTL_DEFAULT 0x00000004
-#define mmRPB_VC_SWITCH_RDWR_DEFAULT 0x00004040
-#define mmRPB_PERFCOUNTER_LO_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER_HI_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER0_CFG_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER1_CFG_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER2_CFG_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER3_CFG_DEFAULT 0x00000000
-#define mmRPB_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000
-#define mmRPB_RD_QUEUE_CNTL_DEFAULT 0x00000000
-#define mmRPB_RD_QUEUE_CNTL2_DEFAULT 0x00000000
-#define mmRPB_WR_QUEUE_CNTL_DEFAULT 0x00000000
-#define mmRPB_WR_QUEUE_CNTL2_DEFAULT 0x00000000
-#define mmRPB_EA_QUEUE_WR_DEFAULT 0x00000000
-#define mmRPB_ATS_CNTL_DEFAULT 0x58088422
-#define mmRPB_ATS_CNTL2_DEFAULT 0x00050b13
-#define mmRPB_SDPPORT_CNTL_DEFAULT 0x0fd14814
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h
deleted file mode 100644
index 80042e1..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _athub_1_0_OFFSET_HEADER
-#define _athub_1_0_OFFSET_HEADER
-
-
-
-// addressBlock: athub_atsdec
-// base address: 0x3080
-#define mmATC_ATS_CNTL 0x0000
-#define mmATC_ATS_CNTL_BASE_IDX 0
-#define mmATC_ATS_STATUS 0x0003
-#define mmATC_ATS_STATUS_BASE_IDX 0
-#define mmATC_ATS_FAULT_CNTL 0x0004
-#define mmATC_ATS_FAULT_CNTL_BASE_IDX 0
-#define mmATC_ATS_FAULT_STATUS_INFO 0x0005
-#define mmATC_ATS_FAULT_STATUS_INFO_BASE_IDX 0
-#define mmATC_ATS_FAULT_STATUS_ADDR 0x0006
-#define mmATC_ATS_FAULT_STATUS_ADDR_BASE_IDX 0
-#define mmATC_ATS_DEFAULT_PAGE_LOW 0x0007
-#define mmATC_ATS_DEFAULT_PAGE_LOW_BASE_IDX 0
-#define mmATC_TRANS_FAULT_RSPCNTRL 0x0008
-#define mmATC_TRANS_FAULT_RSPCNTRL_BASE_IDX 0
-#define mmATC_ATS_FAULT_STATUS_INFO2 0x0009
-#define mmATC_ATS_FAULT_STATUS_INFO2_BASE_IDX 0
-#define mmATHUB_MISC_CNTL 0x000a
-#define mmATHUB_MISC_CNTL_BASE_IDX 0
-#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x000b
-#define mmATC_VMID_PASID_MAPPING_UPDATE_STATUS_BASE_IDX 0
-#define mmATC_VMID0_PASID_MAPPING 0x000c
-#define mmATC_VMID0_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID1_PASID_MAPPING 0x000d
-#define mmATC_VMID1_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID2_PASID_MAPPING 0x000e
-#define mmATC_VMID2_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID3_PASID_MAPPING 0x000f
-#define mmATC_VMID3_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID4_PASID_MAPPING 0x0010
-#define mmATC_VMID4_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID5_PASID_MAPPING 0x0011
-#define mmATC_VMID5_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID6_PASID_MAPPING 0x0012
-#define mmATC_VMID6_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID7_PASID_MAPPING 0x0013
-#define mmATC_VMID7_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID8_PASID_MAPPING 0x0014
-#define mmATC_VMID8_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID9_PASID_MAPPING 0x0015
-#define mmATC_VMID9_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID10_PASID_MAPPING 0x0016
-#define mmATC_VMID10_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID11_PASID_MAPPING 0x0017
-#define mmATC_VMID11_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID12_PASID_MAPPING 0x0018
-#define mmATC_VMID12_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID13_PASID_MAPPING 0x0019
-#define mmATC_VMID13_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID14_PASID_MAPPING 0x001a
-#define mmATC_VMID14_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID15_PASID_MAPPING 0x001b
-#define mmATC_VMID15_PASID_MAPPING_BASE_IDX 0
-#define mmATC_ATS_VMID_STATUS 0x001c
-#define mmATC_ATS_VMID_STATUS_BASE_IDX 0
-#define mmATC_ATS_GFX_ATCL2_STATUS 0x001d
-#define mmATC_ATS_GFX_ATCL2_STATUS_BASE_IDX 0
-#define mmATC_PERFCOUNTER0_CFG 0x001e
-#define mmATC_PERFCOUNTER0_CFG_BASE_IDX 0
-#define mmATC_PERFCOUNTER1_CFG 0x001f
-#define mmATC_PERFCOUNTER1_CFG_BASE_IDX 0
-#define mmATC_PERFCOUNTER2_CFG 0x0020
-#define mmATC_PERFCOUNTER2_CFG_BASE_IDX 0
-#define mmATC_PERFCOUNTER3_CFG 0x0021
-#define mmATC_PERFCOUNTER3_CFG_BASE_IDX 0
-#define mmATC_PERFCOUNTER_RSLT_CNTL 0x0022
-#define mmATC_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
-#define mmATC_PERFCOUNTER_LO 0x0023
-#define mmATC_PERFCOUNTER_LO_BASE_IDX 0
-#define mmATC_PERFCOUNTER_HI 0x0024
-#define mmATC_PERFCOUNTER_HI_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL 0x0025
-#define mmATHUB_PCIE_ATS_CNTL_BASE_IDX 0
-#define mmATHUB_PCIE_PASID_CNTL 0x0026
-#define mmATHUB_PCIE_PASID_CNTL_BASE_IDX 0
-#define mmATHUB_PCIE_PAGE_REQ_CNTL 0x0027
-#define mmATHUB_PCIE_PAGE_REQ_CNTL_BASE_IDX 0
-#define mmATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x0028
-#define mmATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC_BASE_IDX 0
-#define mmATHUB_COMMAND 0x0029
-#define mmATHUB_COMMAND_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_0 0x002a
-#define mmATHUB_PCIE_ATS_CNTL_VF_0_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_1 0x002b
-#define mmATHUB_PCIE_ATS_CNTL_VF_1_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_2 0x002c
-#define mmATHUB_PCIE_ATS_CNTL_VF_2_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_3 0x002d
-#define mmATHUB_PCIE_ATS_CNTL_VF_3_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_4 0x002e
-#define mmATHUB_PCIE_ATS_CNTL_VF_4_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_5 0x002f
-#define mmATHUB_PCIE_ATS_CNTL_VF_5_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_6 0x0030
-#define mmATHUB_PCIE_ATS_CNTL_VF_6_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_7 0x0031
-#define mmATHUB_PCIE_ATS_CNTL_VF_7_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_8 0x0032
-#define mmATHUB_PCIE_ATS_CNTL_VF_8_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_9 0x0033
-#define mmATHUB_PCIE_ATS_CNTL_VF_9_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_10 0x0034
-#define mmATHUB_PCIE_ATS_CNTL_VF_10_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_11 0x0035
-#define mmATHUB_PCIE_ATS_CNTL_VF_11_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_12 0x0036
-#define mmATHUB_PCIE_ATS_CNTL_VF_12_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_13 0x0037
-#define mmATHUB_PCIE_ATS_CNTL_VF_13_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_14 0x0038
-#define mmATHUB_PCIE_ATS_CNTL_VF_14_BASE_IDX 0
-#define mmATHUB_PCIE_ATS_CNTL_VF_15 0x0039
-#define mmATHUB_PCIE_ATS_CNTL_VF_15_BASE_IDX 0
-#define mmATHUB_MEM_POWER_LS 0x003a
-#define mmATHUB_MEM_POWER_LS_BASE_IDX 0
-#define mmATS_IH_CREDIT 0x003b
-#define mmATS_IH_CREDIT_BASE_IDX 0
-#define mmATHUB_IH_CREDIT 0x003c
-#define mmATHUB_IH_CREDIT_BASE_IDX 0
-#define mmATC_VMID16_PASID_MAPPING 0x003d
-#define mmATC_VMID16_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID17_PASID_MAPPING 0x003e
-#define mmATC_VMID17_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID18_PASID_MAPPING 0x003f
-#define mmATC_VMID18_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID19_PASID_MAPPING 0x0040
-#define mmATC_VMID19_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID20_PASID_MAPPING 0x0041
-#define mmATC_VMID20_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID21_PASID_MAPPING 0x0042
-#define mmATC_VMID21_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID22_PASID_MAPPING 0x0043
-#define mmATC_VMID22_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID23_PASID_MAPPING 0x0044
-#define mmATC_VMID23_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID24_PASID_MAPPING 0x0045
-#define mmATC_VMID24_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID25_PASID_MAPPING 0x0046
-#define mmATC_VMID25_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID26_PASID_MAPPING 0x0047
-#define mmATC_VMID26_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID27_PASID_MAPPING 0x0048
-#define mmATC_VMID27_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID28_PASID_MAPPING 0x0049
-#define mmATC_VMID28_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID29_PASID_MAPPING 0x004a
-#define mmATC_VMID29_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID30_PASID_MAPPING 0x004b
-#define mmATC_VMID30_PASID_MAPPING_BASE_IDX 0
-#define mmATC_VMID31_PASID_MAPPING 0x004c
-#define mmATC_VMID31_PASID_MAPPING_BASE_IDX 0
-#define mmATC_ATS_MMHUB_ATCL2_STATUS 0x004d
-#define mmATC_ATS_MMHUB_ATCL2_STATUS_BASE_IDX 0
-#define mmATHUB_SHARED_VIRT_RESET_REQ 0x004e
-#define mmATHUB_SHARED_VIRT_RESET_REQ_BASE_IDX 0
-#define mmATHUB_SHARED_ACTIVE_FCN_ID 0x004f
-#define mmATHUB_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
-#define mmATC_ATS_SDPPORT_CNTL 0x0050
-#define mmATC_ATS_SDPPORT_CNTL_BASE_IDX 0
-#define mmATC_ATS_VMID_SNAPSHOT_GFX_STAT 0x0052
-#define mmATC_ATS_VMID_SNAPSHOT_GFX_STAT_BASE_IDX 0
-#define mmATC_ATS_VMID_SNAPSHOT_MMHUB_STAT 0x0053
-#define mmATC_ATS_VMID_SNAPSHOT_MMHUB_STAT_BASE_IDX 0
-
-
-// addressBlock: athub_xpbdec
-// base address: 0x31f0
-#define mmXPB_RTR_SRC_APRTR0 0x005c
-#define mmXPB_RTR_SRC_APRTR0_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR1 0x005d
-#define mmXPB_RTR_SRC_APRTR1_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR2 0x005e
-#define mmXPB_RTR_SRC_APRTR2_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR3 0x005f
-#define mmXPB_RTR_SRC_APRTR3_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR4 0x0060
-#define mmXPB_RTR_SRC_APRTR4_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR5 0x0061
-#define mmXPB_RTR_SRC_APRTR5_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR6 0x0062
-#define mmXPB_RTR_SRC_APRTR6_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR7 0x0063
-#define mmXPB_RTR_SRC_APRTR7_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR8 0x0064
-#define mmXPB_RTR_SRC_APRTR8_BASE_IDX 0
-#define mmXPB_RTR_SRC_APRTR9 0x0065
-#define mmXPB_RTR_SRC_APRTR9_BASE_IDX 0
-#define mmXPB_XDMA_RTR_SRC_APRTR0 0x0066
-#define mmXPB_XDMA_RTR_SRC_APRTR0_BASE_IDX 0
-#define mmXPB_XDMA_RTR_SRC_APRTR1 0x0067
-#define mmXPB_XDMA_RTR_SRC_APRTR1_BASE_IDX 0
-#define mmXPB_XDMA_RTR_SRC_APRTR2 0x0068
-#define mmXPB_XDMA_RTR_SRC_APRTR2_BASE_IDX 0
-#define mmXPB_XDMA_RTR_SRC_APRTR3 0x0069
-#define mmXPB_XDMA_RTR_SRC_APRTR3_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP0 0x006a
-#define mmXPB_RTR_DEST_MAP0_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP1 0x006b
-#define mmXPB_RTR_DEST_MAP1_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP2 0x006c
-#define mmXPB_RTR_DEST_MAP2_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP3 0x006d
-#define mmXPB_RTR_DEST_MAP3_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP4 0x006e
-#define mmXPB_RTR_DEST_MAP4_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP5 0x006f
-#define mmXPB_RTR_DEST_MAP5_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP6 0x0070
-#define mmXPB_RTR_DEST_MAP6_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP7 0x0071
-#define mmXPB_RTR_DEST_MAP7_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP8 0x0072
-#define mmXPB_RTR_DEST_MAP8_BASE_IDX 0
-#define mmXPB_RTR_DEST_MAP9 0x0073
-#define mmXPB_RTR_DEST_MAP9_BASE_IDX 0
-#define mmXPB_XDMA_RTR_DEST_MAP0 0x0074
-#define mmXPB_XDMA_RTR_DEST_MAP0_BASE_IDX 0
-#define mmXPB_XDMA_RTR_DEST_MAP1 0x0075
-#define mmXPB_XDMA_RTR_DEST_MAP1_BASE_IDX 0
-#define mmXPB_XDMA_RTR_DEST_MAP2 0x0076
-#define mmXPB_XDMA_RTR_DEST_MAP2_BASE_IDX 0
-#define mmXPB_XDMA_RTR_DEST_MAP3 0x0077
-#define mmXPB_XDMA_RTR_DEST_MAP3_BASE_IDX 0
-#define mmXPB_CLG_CFG0 0x0078
-#define mmXPB_CLG_CFG0_BASE_IDX 0
-#define mmXPB_CLG_CFG1 0x0079
-#define mmXPB_CLG_CFG1_BASE_IDX 0
-#define mmXPB_CLG_CFG2 0x007a
-#define mmXPB_CLG_CFG2_BASE_IDX 0
-#define mmXPB_CLG_CFG3 0x007b
-#define mmXPB_CLG_CFG3_BASE_IDX 0
-#define mmXPB_CLG_CFG4 0x007c
-#define mmXPB_CLG_CFG4_BASE_IDX 0
-#define mmXPB_CLG_CFG5 0x007d
-#define mmXPB_CLG_CFG5_BASE_IDX 0
-#define mmXPB_CLG_CFG6 0x007e
-#define mmXPB_CLG_CFG6_BASE_IDX 0
-#define mmXPB_CLG_CFG7 0x007f
-#define mmXPB_CLG_CFG7_BASE_IDX 0
-#define mmXPB_CLG_EXTRA 0x0080
-#define mmXPB_CLG_EXTRA_BASE_IDX 0
-#define mmXPB_CLG_EXTRA_MSK 0x0081
-#define mmXPB_CLG_EXTRA_MSK_BASE_IDX 0
-#define mmXPB_LB_ADDR 0x0082
-#define mmXPB_LB_ADDR_BASE_IDX 0
-#define mmXPB_WCB_STS 0x0083
-#define mmXPB_WCB_STS_BASE_IDX 0
-#define mmXPB_HST_CFG 0x0084
-#define mmXPB_HST_CFG_BASE_IDX 0
-#define mmXPB_P2P_BAR_CFG 0x0085
-#define mmXPB_P2P_BAR_CFG_BASE_IDX 0
-#define mmXPB_P2P_BAR0 0x0086
-#define mmXPB_P2P_BAR0_BASE_IDX 0
-#define mmXPB_P2P_BAR1 0x0087
-#define mmXPB_P2P_BAR1_BASE_IDX 0
-#define mmXPB_P2P_BAR2 0x0088
-#define mmXPB_P2P_BAR2_BASE_IDX 0
-#define mmXPB_P2P_BAR3 0x0089
-#define mmXPB_P2P_BAR3_BASE_IDX 0
-#define mmXPB_P2P_BAR4 0x008a
-#define mmXPB_P2P_BAR4_BASE_IDX 0
-#define mmXPB_P2P_BAR5 0x008b
-#define mmXPB_P2P_BAR5_BASE_IDX 0
-#define mmXPB_P2P_BAR6 0x008c
-#define mmXPB_P2P_BAR6_BASE_IDX 0
-#define mmXPB_P2P_BAR7 0x008d
-#define mmXPB_P2P_BAR7_BASE_IDX 0
-#define mmXPB_P2P_BAR_SETUP 0x008e
-#define mmXPB_P2P_BAR_SETUP_BASE_IDX 0
-#define mmXPB_P2P_BAR_DELTA_ABOVE 0x0090
-#define mmXPB_P2P_BAR_DELTA_ABOVE_BASE_IDX 0
-#define mmXPB_P2P_BAR_DELTA_BELOW 0x0091
-#define mmXPB_P2P_BAR_DELTA_BELOW_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR0 0x0092
-#define mmXPB_PEER_SYS_BAR0_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR1 0x0093
-#define mmXPB_PEER_SYS_BAR1_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR2 0x0094
-#define mmXPB_PEER_SYS_BAR2_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR3 0x0095
-#define mmXPB_PEER_SYS_BAR3_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR4 0x0096
-#define mmXPB_PEER_SYS_BAR4_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR5 0x0097
-#define mmXPB_PEER_SYS_BAR5_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR6 0x0098
-#define mmXPB_PEER_SYS_BAR6_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR7 0x0099
-#define mmXPB_PEER_SYS_BAR7_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR8 0x009a
-#define mmXPB_PEER_SYS_BAR8_BASE_IDX 0
-#define mmXPB_PEER_SYS_BAR9 0x009b
-#define mmXPB_PEER_SYS_BAR9_BASE_IDX 0
-#define mmXPB_XDMA_PEER_SYS_BAR0 0x009c
-#define mmXPB_XDMA_PEER_SYS_BAR0_BASE_IDX 0
-#define mmXPB_XDMA_PEER_SYS_BAR1 0x009d
-#define mmXPB_XDMA_PEER_SYS_BAR1_BASE_IDX 0
-#define mmXPB_XDMA_PEER_SYS_BAR2 0x009e
-#define mmXPB_XDMA_PEER_SYS_BAR2_BASE_IDX 0
-#define mmXPB_XDMA_PEER_SYS_BAR3 0x009f
-#define mmXPB_XDMA_PEER_SYS_BAR3_BASE_IDX 0
-#define mmXPB_CLK_GAT 0x00a0
-#define mmXPB_CLK_GAT_BASE_IDX 0
-#define mmXPB_INTF_CFG 0x00a1
-#define mmXPB_INTF_CFG_BASE_IDX 0
-#define mmXPB_INTF_STS 0x00a2
-#define mmXPB_INTF_STS_BASE_IDX 0
-#define mmXPB_PIPE_STS 0x00a3
-#define mmXPB_PIPE_STS_BASE_IDX 0
-#define mmXPB_SUB_CTRL 0x00a4
-#define mmXPB_SUB_CTRL_BASE_IDX 0
-#define mmXPB_MAP_INVERT_FLUSH_NUM_LSB 0x00a5
-#define mmXPB_MAP_INVERT_FLUSH_NUM_LSB_BASE_IDX 0
-#define mmXPB_PERF_KNOBS 0x00a6
-#define mmXPB_PERF_KNOBS_BASE_IDX 0
-#define mmXPB_STICKY 0x00a7
-#define mmXPB_STICKY_BASE_IDX 0
-#define mmXPB_STICKY_W1C 0x00a8
-#define mmXPB_STICKY_W1C_BASE_IDX 0
-#define mmXPB_MISC_CFG 0x00a9
-#define mmXPB_MISC_CFG_BASE_IDX 0
-#define mmXPB_INTF_CFG2 0x00aa
-#define mmXPB_INTF_CFG2_BASE_IDX 0
-#define mmXPB_CLG_EXTRA_RD 0x00ab
-#define mmXPB_CLG_EXTRA_RD_BASE_IDX 0
-#define mmXPB_CLG_EXTRA_MSK_RD 0x00ac
-#define mmXPB_CLG_EXTRA_MSK_RD_BASE_IDX 0
-#define mmXPB_CLG_GFX_MATCH 0x00ad
-#define mmXPB_CLG_GFX_MATCH_BASE_IDX 0
-#define mmXPB_CLG_GFX_MATCH_MSK 0x00ae
-#define mmXPB_CLG_GFX_MATCH_MSK_BASE_IDX 0
-#define mmXPB_CLG_MM_MATCH 0x00af
-#define mmXPB_CLG_MM_MATCH_BASE_IDX 0
-#define mmXPB_CLG_MM_MATCH_MSK 0x00b0
-#define mmXPB_CLG_MM_MATCH_MSK_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING0 0x00b1
-#define mmXPB_CLG_GFX_UNITID_MAPPING0_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING1 0x00b2
-#define mmXPB_CLG_GFX_UNITID_MAPPING1_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING2 0x00b3
-#define mmXPB_CLG_GFX_UNITID_MAPPING2_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING3 0x00b4
-#define mmXPB_CLG_GFX_UNITID_MAPPING3_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING4 0x00b5
-#define mmXPB_CLG_GFX_UNITID_MAPPING4_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING5 0x00b6
-#define mmXPB_CLG_GFX_UNITID_MAPPING5_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING6 0x00b7
-#define mmXPB_CLG_GFX_UNITID_MAPPING6_BASE_IDX 0
-#define mmXPB_CLG_GFX_UNITID_MAPPING7 0x00b8
-#define mmXPB_CLG_GFX_UNITID_MAPPING7_BASE_IDX 0
-#define mmXPB_CLG_MM_UNITID_MAPPING0 0x00b9
-#define mmXPB_CLG_MM_UNITID_MAPPING0_BASE_IDX 0
-#define mmXPB_CLG_MM_UNITID_MAPPING1 0x00ba
-#define mmXPB_CLG_MM_UNITID_MAPPING1_BASE_IDX 0
-#define mmXPB_CLG_MM_UNITID_MAPPING2 0x00bb
-#define mmXPB_CLG_MM_UNITID_MAPPING2_BASE_IDX 0
-#define mmXPB_CLG_MM_UNITID_MAPPING3 0x00bc
-#define mmXPB_CLG_MM_UNITID_MAPPING3_BASE_IDX 0
-
-
-// addressBlock: athub_rpbdec
-// base address: 0x33b0
-#define mmRPB_PASSPW_CONF 0x00cc
-#define mmRPB_PASSPW_CONF_BASE_IDX 0
-#define mmRPB_BLOCKLEVEL_CONF 0x00cd
-#define mmRPB_BLOCKLEVEL_CONF_BASE_IDX 0
-#define mmRPB_TAG_CONF 0x00cf
-#define mmRPB_TAG_CONF_BASE_IDX 0
-#define mmRPB_EFF_CNTL 0x00d1
-#define mmRPB_EFF_CNTL_BASE_IDX 0
-#define mmRPB_ARB_CNTL 0x00d2
-#define mmRPB_ARB_CNTL_BASE_IDX 0
-#define mmRPB_ARB_CNTL2 0x00d3
-#define mmRPB_ARB_CNTL2_BASE_IDX 0
-#define mmRPB_BIF_CNTL 0x00d4
-#define mmRPB_BIF_CNTL_BASE_IDX 0
-#define mmRPB_WR_SWITCH_CNTL 0x00d5
-#define mmRPB_WR_SWITCH_CNTL_BASE_IDX 0
-#define mmRPB_RD_SWITCH_CNTL 0x00d7
-#define mmRPB_RD_SWITCH_CNTL_BASE_IDX 0
-#define mmRPB_CID_QUEUE_WR 0x00d8
-#define mmRPB_CID_QUEUE_WR_BASE_IDX 0
-#define mmRPB_CID_QUEUE_RD 0x00d9
-#define mmRPB_CID_QUEUE_RD_BASE_IDX 0
-#define mmRPB_CID_QUEUE_EX 0x00dc
-#define mmRPB_CID_QUEUE_EX_BASE_IDX 0
-#define mmRPB_CID_QUEUE_EX_DATA 0x00dd
-#define mmRPB_CID_QUEUE_EX_DATA_BASE_IDX 0
-#define mmRPB_SWITCH_CNTL2 0x00de
-#define mmRPB_SWITCH_CNTL2_BASE_IDX 0
-#define mmRPB_DEINTRLV_COMBINE_CNTL 0x00df
-#define mmRPB_DEINTRLV_COMBINE_CNTL_BASE_IDX 0
-#define mmRPB_VC_SWITCH_RDWR 0x00e0
-#define mmRPB_VC_SWITCH_RDWR_BASE_IDX 0
-#define mmRPB_PERFCOUNTER_LO 0x00e1
-#define mmRPB_PERFCOUNTER_LO_BASE_IDX 0
-#define mmRPB_PERFCOUNTER_HI 0x00e2
-#define mmRPB_PERFCOUNTER_HI_BASE_IDX 0
-#define mmRPB_PERFCOUNTER0_CFG 0x00e3
-#define mmRPB_PERFCOUNTER0_CFG_BASE_IDX 0
-#define mmRPB_PERFCOUNTER1_CFG 0x00e4
-#define mmRPB_PERFCOUNTER1_CFG_BASE_IDX 0
-#define mmRPB_PERFCOUNTER2_CFG 0x00e5
-#define mmRPB_PERFCOUNTER2_CFG_BASE_IDX 0
-#define mmRPB_PERFCOUNTER3_CFG 0x00e6
-#define mmRPB_PERFCOUNTER3_CFG_BASE_IDX 0
-#define mmRPB_PERFCOUNTER_RSLT_CNTL 0x00e7
-#define mmRPB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
-#define mmRPB_RD_QUEUE_CNTL 0x00e9
-#define mmRPB_RD_QUEUE_CNTL_BASE_IDX 0
-#define mmRPB_RD_QUEUE_CNTL2 0x00ea
-#define mmRPB_RD_QUEUE_CNTL2_BASE_IDX 0
-#define mmRPB_WR_QUEUE_CNTL 0x00eb
-#define mmRPB_WR_QUEUE_CNTL_BASE_IDX 0
-#define mmRPB_WR_QUEUE_CNTL2 0x00ec
-#define mmRPB_WR_QUEUE_CNTL2_BASE_IDX 0
-#define mmRPB_EA_QUEUE_WR 0x00ed
-#define mmRPB_EA_QUEUE_WR_BASE_IDX 0
-#define mmRPB_ATS_CNTL 0x00ee
-#define mmRPB_ATS_CNTL_BASE_IDX 0
-#define mmRPB_ATS_CNTL2 0x00ef
-#define mmRPB_ATS_CNTL2_BASE_IDX 0
-#define mmRPB_SDPPORT_CNTL 0x00f0
-#define mmRPB_SDPPORT_CNTL_BASE_IDX 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h
deleted file mode 100644
index 777b05c..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h
+++ /dev/null
@@ -1,2045 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _athub_1_0_SH_MASK_HEADER
-#define _athub_1_0_SH_MASK_HEADER
-
-
-// addressBlock: athub_atsdec
-//ATC_ATS_CNTL
-#define ATC_ATS_CNTL__DISABLE_ATC__SHIFT 0x0
-#define ATC_ATS_CNTL__DISABLE_PRI__SHIFT 0x1
-#define ATC_ATS_CNTL__DISABLE_PASID__SHIFT 0x2
-#define ATC_ATS_CNTL__CREDITS_ATS_RPB__SHIFT 0x8
-#define ATC_ATS_CNTL__INVALIDATION_LOG_KEEP_ORDER__SHIFT 0x14
-#define ATC_ATS_CNTL__TRANS_LOG_KEEP_ORDER__SHIFT 0x15
-#define ATC_ATS_CNTL__TRANS_EXE_RETURN__SHIFT 0x16
-#define ATC_ATS_CNTL__DISABLE_ATC_MASK 0x00000001L
-#define ATC_ATS_CNTL__DISABLE_PRI_MASK 0x00000002L
-#define ATC_ATS_CNTL__DISABLE_PASID_MASK 0x00000004L
-#define ATC_ATS_CNTL__CREDITS_ATS_RPB_MASK 0x00003F00L
-#define ATC_ATS_CNTL__INVALIDATION_LOG_KEEP_ORDER_MASK 0x00100000L
-#define ATC_ATS_CNTL__TRANS_LOG_KEEP_ORDER_MASK 0x00200000L
-#define ATC_ATS_CNTL__TRANS_EXE_RETURN_MASK 0x00C00000L
-//ATC_ATS_STATUS
-#define ATC_ATS_STATUS__BUSY__SHIFT 0x0
-#define ATC_ATS_STATUS__CRASHED__SHIFT 0x1
-#define ATC_ATS_STATUS__DEADLOCK_DETECTION__SHIFT 0x2
-#define ATC_ATS_STATUS__FLUSH_INVALIDATION_OUTSTANDING__SHIFT 0x3
-#define ATC_ATS_STATUS__NONFLUSH_INVALIDATION_OUTSTANDING__SHIFT 0x6
-#define ATC_ATS_STATUS__BUSY_MASK 0x00000001L
-#define ATC_ATS_STATUS__CRASHED_MASK 0x00000002L
-#define ATC_ATS_STATUS__DEADLOCK_DETECTION_MASK 0x00000004L
-#define ATC_ATS_STATUS__FLUSH_INVALIDATION_OUTSTANDING_MASK 0x00000038L
-#define ATC_ATS_STATUS__NONFLUSH_INVALIDATION_OUTSTANDING_MASK 0x000001C0L
-//ATC_ATS_FAULT_CNTL
-#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG__SHIFT 0x0
-#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE__SHIFT 0xa
-#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE__SHIFT 0x14
-#define ATC_ATS_FAULT_CNTL__FAULT_REGISTER_LOG_MASK 0x000001FFL
-#define ATC_ATS_FAULT_CNTL__FAULT_INTERRUPT_TABLE_MASK 0x0007FC00L
-#define ATC_ATS_FAULT_CNTL__FAULT_CRASH_TABLE_MASK 0x1FF00000L
-//ATC_ATS_FAULT_STATUS_INFO
-#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE__SHIFT 0x0
-#define ATC_ATS_FAULT_STATUS_INFO__VMID__SHIFT 0xa
-#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO__SHIFT 0xf
-#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2__SHIFT 0x10
-#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION__SHIFT 0x11
-#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST__SHIFT 0x12
-#define ATC_ATS_FAULT_STATUS_INFO__STATUS__SHIFT 0x13
-#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH__SHIFT 0x18
-#define ATC_ATS_FAULT_STATUS_INFO__FAULT_TYPE_MASK 0x000001FFL
-#define ATC_ATS_FAULT_STATUS_INFO__VMID_MASK 0x00007C00L
-#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO_MASK 0x00008000L
-#define ATC_ATS_FAULT_STATUS_INFO__EXTRA_INFO2_MASK 0x00010000L
-#define ATC_ATS_FAULT_STATUS_INFO__INVALIDATION_MASK 0x00020000L
-#define ATC_ATS_FAULT_STATUS_INFO__PAGE_REQUEST_MASK 0x00040000L
-#define ATC_ATS_FAULT_STATUS_INFO__STATUS_MASK 0x00F80000L
-#define ATC_ATS_FAULT_STATUS_INFO__PAGE_ADDR_HIGH_MASK 0x0F000000L
-//ATC_ATS_FAULT_STATUS_ADDR
-#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR__SHIFT 0x0
-#define ATC_ATS_FAULT_STATUS_ADDR__PAGE_ADDR_MASK 0xFFFFFFFFL
-//ATC_ATS_DEFAULT_PAGE_LOW
-#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE__SHIFT 0x0
-#define ATC_ATS_DEFAULT_PAGE_LOW__DEFAULT_PAGE_MASK 0xFFFFFFFFL
-//ATC_TRANS_FAULT_RSPCNTRL
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID0__SHIFT 0x0
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID1__SHIFT 0x1
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID2__SHIFT 0x2
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID3__SHIFT 0x3
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID4__SHIFT 0x4
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID5__SHIFT 0x5
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID6__SHIFT 0x6
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID7__SHIFT 0x7
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID8__SHIFT 0x8
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID9__SHIFT 0x9
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID10__SHIFT 0xa
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID11__SHIFT 0xb
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID12__SHIFT 0xc
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID13__SHIFT 0xd
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID14__SHIFT 0xe
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID15__SHIFT 0xf
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID16__SHIFT 0x10
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID17__SHIFT 0x11
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID18__SHIFT 0x12
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID19__SHIFT 0x13
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID20__SHIFT 0x14
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID21__SHIFT 0x15
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID22__SHIFT 0x16
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID23__SHIFT 0x17
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID24__SHIFT 0x18
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID25__SHIFT 0x19
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID26__SHIFT 0x1a
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID27__SHIFT 0x1b
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID28__SHIFT 0x1c
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID29__SHIFT 0x1d
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID30__SHIFT 0x1e
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID31__SHIFT 0x1f
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID0_MASK 0x00000001L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID1_MASK 0x00000002L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID2_MASK 0x00000004L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID3_MASK 0x00000008L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID4_MASK 0x00000010L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID5_MASK 0x00000020L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID6_MASK 0x00000040L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID7_MASK 0x00000080L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID8_MASK 0x00000100L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID9_MASK 0x00000200L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID10_MASK 0x00000400L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID11_MASK 0x00000800L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID12_MASK 0x00001000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID13_MASK 0x00002000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID14_MASK 0x00004000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID15_MASK 0x00008000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID16_MASK 0x00010000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID17_MASK 0x00020000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID18_MASK 0x00040000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID19_MASK 0x00080000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID20_MASK 0x00100000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID21_MASK 0x00200000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID22_MASK 0x00400000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID23_MASK 0x00800000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID24_MASK 0x01000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID25_MASK 0x02000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID26_MASK 0x04000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID27_MASK 0x08000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID28_MASK 0x10000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID29_MASK 0x20000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID30_MASK 0x40000000L
-#define ATC_TRANS_FAULT_RSPCNTRL__VMID31_MASK 0x80000000L
-//ATC_ATS_FAULT_STATUS_INFO2
-#define ATC_ATS_FAULT_STATUS_INFO2__VF__SHIFT 0x0
-#define ATC_ATS_FAULT_STATUS_INFO2__VFID__SHIFT 0x1
-#define ATC_ATS_FAULT_STATUS_INFO2__MMHUB_INV_VMID__SHIFT 0x9
-#define ATC_ATS_FAULT_STATUS_INFO2__VF_MASK 0x00000001L
-#define ATC_ATS_FAULT_STATUS_INFO2__VFID_MASK 0x0000001EL
-#define ATC_ATS_FAULT_STATUS_INFO2__MMHUB_INV_VMID_MASK 0x00003E00L
-//ATHUB_MISC_CNTL
-#define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x6
-#define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x12
-#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x13
-#define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x14
-#define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x15
-#define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x1b
-#define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x1c
-#define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x00000FC0L
-#define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00040000L
-#define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00080000L
-#define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00100000L
-#define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x07E00000L
-#define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x08000000L
-#define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x10000000L
-//ATC_VMID_PASID_MAPPING_UPDATE_STATUS
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED__SHIFT 0x0
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED__SHIFT 0x1
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED__SHIFT 0x2
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED__SHIFT 0x3
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED__SHIFT 0x4
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED__SHIFT 0x5
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED__SHIFT 0x6
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED__SHIFT 0x7
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED__SHIFT 0x8
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED__SHIFT 0x9
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED__SHIFT 0xa
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED__SHIFT 0xb
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED__SHIFT 0xc
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED__SHIFT 0xd
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED__SHIFT 0xe
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED__SHIFT 0xf
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID16_REMAPPING_FINISHED__SHIFT 0x10
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID17_REMAPPING_FINISHED__SHIFT 0x11
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID18_REMAPPING_FINISHED__SHIFT 0x12
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID19_REMAPPING_FINISHED__SHIFT 0x13
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID20_REMAPPING_FINISHED__SHIFT 0x14
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID21_REMAPPING_FINISHED__SHIFT 0x15
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID22_REMAPPING_FINISHED__SHIFT 0x16
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID23_REMAPPING_FINISHED__SHIFT 0x17
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID24_REMAPPING_FINISHED__SHIFT 0x18
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID25_REMAPPING_FINISHED__SHIFT 0x19
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID26_REMAPPING_FINISHED__SHIFT 0x1a
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID27_REMAPPING_FINISHED__SHIFT 0x1b
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID28_REMAPPING_FINISHED__SHIFT 0x1c
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID29_REMAPPING_FINISHED__SHIFT 0x1d
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID30_REMAPPING_FINISHED__SHIFT 0x1e
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID31_REMAPPING_FINISHED__SHIFT 0x1f
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID0_REMAPPING_FINISHED_MASK 0x00000001L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID1_REMAPPING_FINISHED_MASK 0x00000002L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID2_REMAPPING_FINISHED_MASK 0x00000004L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID3_REMAPPING_FINISHED_MASK 0x00000008L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID4_REMAPPING_FINISHED_MASK 0x00000010L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID5_REMAPPING_FINISHED_MASK 0x00000020L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID6_REMAPPING_FINISHED_MASK 0x00000040L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID7_REMAPPING_FINISHED_MASK 0x00000080L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID8_REMAPPING_FINISHED_MASK 0x00000100L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID9_REMAPPING_FINISHED_MASK 0x00000200L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID10_REMAPPING_FINISHED_MASK 0x00000400L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID11_REMAPPING_FINISHED_MASK 0x00000800L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID12_REMAPPING_FINISHED_MASK 0x00001000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID13_REMAPPING_FINISHED_MASK 0x00002000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID14_REMAPPING_FINISHED_MASK 0x00004000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID15_REMAPPING_FINISHED_MASK 0x00008000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID16_REMAPPING_FINISHED_MASK 0x00010000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID17_REMAPPING_FINISHED_MASK 0x00020000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID18_REMAPPING_FINISHED_MASK 0x00040000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID19_REMAPPING_FINISHED_MASK 0x00080000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID20_REMAPPING_FINISHED_MASK 0x00100000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID21_REMAPPING_FINISHED_MASK 0x00200000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID22_REMAPPING_FINISHED_MASK 0x00400000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID23_REMAPPING_FINISHED_MASK 0x00800000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID24_REMAPPING_FINISHED_MASK 0x01000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID25_REMAPPING_FINISHED_MASK 0x02000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID26_REMAPPING_FINISHED_MASK 0x04000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID27_REMAPPING_FINISHED_MASK 0x08000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID28_REMAPPING_FINISHED_MASK 0x10000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID29_REMAPPING_FINISHED_MASK 0x20000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID30_REMAPPING_FINISHED_MASK 0x40000000L
-#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS__VMID31_REMAPPING_FINISHED_MASK 0x80000000L
-//ATC_VMID0_PASID_MAPPING
-#define ATC_VMID0_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID0_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID0_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID0_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID0_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID1_PASID_MAPPING
-#define ATC_VMID1_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID1_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID1_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID1_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID1_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID2_PASID_MAPPING
-#define ATC_VMID2_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID2_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID2_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID2_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID2_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID3_PASID_MAPPING
-#define ATC_VMID3_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID3_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID3_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID3_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID3_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID4_PASID_MAPPING
-#define ATC_VMID4_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID4_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID4_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID4_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID4_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID5_PASID_MAPPING
-#define ATC_VMID5_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID5_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID5_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID5_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID5_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID6_PASID_MAPPING
-#define ATC_VMID6_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID6_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID6_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID6_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID6_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID7_PASID_MAPPING
-#define ATC_VMID7_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID7_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID7_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID7_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID7_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID8_PASID_MAPPING
-#define ATC_VMID8_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID8_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID8_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID8_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID8_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID9_PASID_MAPPING
-#define ATC_VMID9_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID9_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID9_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID9_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID9_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID10_PASID_MAPPING
-#define ATC_VMID10_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID10_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID10_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID10_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID10_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID11_PASID_MAPPING
-#define ATC_VMID11_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID11_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID11_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID11_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID11_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID12_PASID_MAPPING
-#define ATC_VMID12_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID12_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID12_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID12_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID12_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID13_PASID_MAPPING
-#define ATC_VMID13_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID13_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID13_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID13_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID13_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID14_PASID_MAPPING
-#define ATC_VMID14_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID14_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID14_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID14_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID14_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID15_PASID_MAPPING
-#define ATC_VMID15_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID15_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID15_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID15_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID15_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_ATS_VMID_STATUS
-#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING__SHIFT 0x0
-#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING__SHIFT 0x1
-#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING__SHIFT 0x2
-#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING__SHIFT 0x3
-#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING__SHIFT 0x4
-#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING__SHIFT 0x5
-#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING__SHIFT 0x6
-#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING__SHIFT 0x7
-#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING__SHIFT 0x8
-#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING__SHIFT 0x9
-#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING__SHIFT 0xa
-#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING__SHIFT 0xb
-#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING__SHIFT 0xc
-#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING__SHIFT 0xd
-#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING__SHIFT 0xe
-#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING__SHIFT 0xf
-#define ATC_ATS_VMID_STATUS__VMID16_OUTSTANDING__SHIFT 0x10
-#define ATC_ATS_VMID_STATUS__VMID17_OUTSTANDING__SHIFT 0x11
-#define ATC_ATS_VMID_STATUS__VMID18_OUTSTANDING__SHIFT 0x12
-#define ATC_ATS_VMID_STATUS__VMID19_OUTSTANDING__SHIFT 0x13
-#define ATC_ATS_VMID_STATUS__VMID20_OUTSTANDING__SHIFT 0x14
-#define ATC_ATS_VMID_STATUS__VMID21_OUTSTANDING__SHIFT 0x15
-#define ATC_ATS_VMID_STATUS__VMID22_OUTSTANDING__SHIFT 0x16
-#define ATC_ATS_VMID_STATUS__VMID23_OUTSTANDING__SHIFT 0x17
-#define ATC_ATS_VMID_STATUS__VMID24_OUTSTANDING__SHIFT 0x18
-#define ATC_ATS_VMID_STATUS__VMID25_OUTSTANDING__SHIFT 0x19
-#define ATC_ATS_VMID_STATUS__VMID26_OUTSTANDING__SHIFT 0x1a
-#define ATC_ATS_VMID_STATUS__VMID27_OUTSTANDING__SHIFT 0x1b
-#define ATC_ATS_VMID_STATUS__VMID28_OUTSTANDING__SHIFT 0x1c
-#define ATC_ATS_VMID_STATUS__VMID29_OUTSTANDING__SHIFT 0x1d
-#define ATC_ATS_VMID_STATUS__VMID30_OUTSTANDING__SHIFT 0x1e
-#define ATC_ATS_VMID_STATUS__VMID31_OUTSTANDING__SHIFT 0x1f
-#define ATC_ATS_VMID_STATUS__VMID0_OUTSTANDING_MASK 0x00000001L
-#define ATC_ATS_VMID_STATUS__VMID1_OUTSTANDING_MASK 0x00000002L
-#define ATC_ATS_VMID_STATUS__VMID2_OUTSTANDING_MASK 0x00000004L
-#define ATC_ATS_VMID_STATUS__VMID3_OUTSTANDING_MASK 0x00000008L
-#define ATC_ATS_VMID_STATUS__VMID4_OUTSTANDING_MASK 0x00000010L
-#define ATC_ATS_VMID_STATUS__VMID5_OUTSTANDING_MASK 0x00000020L
-#define ATC_ATS_VMID_STATUS__VMID6_OUTSTANDING_MASK 0x00000040L
-#define ATC_ATS_VMID_STATUS__VMID7_OUTSTANDING_MASK 0x00000080L
-#define ATC_ATS_VMID_STATUS__VMID8_OUTSTANDING_MASK 0x00000100L
-#define ATC_ATS_VMID_STATUS__VMID9_OUTSTANDING_MASK 0x00000200L
-#define ATC_ATS_VMID_STATUS__VMID10_OUTSTANDING_MASK 0x00000400L
-#define ATC_ATS_VMID_STATUS__VMID11_OUTSTANDING_MASK 0x00000800L
-#define ATC_ATS_VMID_STATUS__VMID12_OUTSTANDING_MASK 0x00001000L
-#define ATC_ATS_VMID_STATUS__VMID13_OUTSTANDING_MASK 0x00002000L
-#define ATC_ATS_VMID_STATUS__VMID14_OUTSTANDING_MASK 0x00004000L
-#define ATC_ATS_VMID_STATUS__VMID15_OUTSTANDING_MASK 0x00008000L
-#define ATC_ATS_VMID_STATUS__VMID16_OUTSTANDING_MASK 0x00010000L
-#define ATC_ATS_VMID_STATUS__VMID17_OUTSTANDING_MASK 0x00020000L
-#define ATC_ATS_VMID_STATUS__VMID18_OUTSTANDING_MASK 0x00040000L
-#define ATC_ATS_VMID_STATUS__VMID19_OUTSTANDING_MASK 0x00080000L
-#define ATC_ATS_VMID_STATUS__VMID20_OUTSTANDING_MASK 0x00100000L
-#define ATC_ATS_VMID_STATUS__VMID21_OUTSTANDING_MASK 0x00200000L
-#define ATC_ATS_VMID_STATUS__VMID22_OUTSTANDING_MASK 0x00400000L
-#define ATC_ATS_VMID_STATUS__VMID23_OUTSTANDING_MASK 0x00800000L
-#define ATC_ATS_VMID_STATUS__VMID24_OUTSTANDING_MASK 0x01000000L
-#define ATC_ATS_VMID_STATUS__VMID25_OUTSTANDING_MASK 0x02000000L
-#define ATC_ATS_VMID_STATUS__VMID26_OUTSTANDING_MASK 0x04000000L
-#define ATC_ATS_VMID_STATUS__VMID27_OUTSTANDING_MASK 0x08000000L
-#define ATC_ATS_VMID_STATUS__VMID28_OUTSTANDING_MASK 0x10000000L
-#define ATC_ATS_VMID_STATUS__VMID29_OUTSTANDING_MASK 0x20000000L
-#define ATC_ATS_VMID_STATUS__VMID30_OUTSTANDING_MASK 0x40000000L
-#define ATC_ATS_VMID_STATUS__VMID31_OUTSTANDING_MASK 0x80000000L
-//ATC_ATS_GFX_ATCL2_STATUS
-#define ATC_ATS_GFX_ATCL2_STATUS__POWERED_DOWN__SHIFT 0x0
-#define ATC_ATS_GFX_ATCL2_STATUS__POWERED_DOWN_MASK 0x00000001L
-//ATC_PERFCOUNTER0_CFG
-#define ATC_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
-#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
-#define ATC_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
-#define ATC_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
-#define ATC_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
-#define ATC_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
-#define ATC_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define ATC_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
-#define ATC_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
-#define ATC_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
-//ATC_PERFCOUNTER1_CFG
-#define ATC_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
-#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
-#define ATC_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
-#define ATC_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
-#define ATC_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
-#define ATC_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
-#define ATC_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define ATC_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
-#define ATC_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
-#define ATC_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
-//ATC_PERFCOUNTER2_CFG
-#define ATC_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
-#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
-#define ATC_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
-#define ATC_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
-#define ATC_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
-#define ATC_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
-#define ATC_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define ATC_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
-#define ATC_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
-#define ATC_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
-//ATC_PERFCOUNTER3_CFG
-#define ATC_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
-#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
-#define ATC_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
-#define ATC_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
-#define ATC_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
-#define ATC_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
-#define ATC_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define ATC_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
-#define ATC_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
-#define ATC_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
-//ATC_PERFCOUNTER_RSLT_CNTL
-#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
-#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
-#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
-#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
-#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
-#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
-#define ATC_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
-#define ATC_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
-#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
-#define ATC_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
-#define ATC_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
-#define ATC_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
-//ATC_PERFCOUNTER_LO
-#define ATC_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
-#define ATC_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
-//ATC_PERFCOUNTER_HI
-#define ATC_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
-#define ATC_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
-#define ATC_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
-#define ATC_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
-//ATHUB_PCIE_ATS_CNTL
-#define ATHUB_PCIE_ATS_CNTL__STU__SHIFT 0x10
-#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL__STU_MASK 0x001F0000L
-#define ATHUB_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_PASID_CNTL
-#define ATHUB_PCIE_PASID_CNTL__PASID_EN__SHIFT 0x10
-#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x11
-#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x12
-#define ATHUB_PCIE_PASID_CNTL__PASID_EN_MASK 0x00010000L
-#define ATHUB_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x00020000L
-#define ATHUB_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x00040000L
-//ATHUB_PCIE_PAGE_REQ_CNTL
-#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0
-#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1
-#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x00000001L
-#define ATHUB_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x00000002L
-//ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC
-#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0
-#define ATHUB_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL
-//ATHUB_COMMAND
-#define ATHUB_COMMAND__BUS_MASTER_EN__SHIFT 0x2
-#define ATHUB_COMMAND__BUS_MASTER_EN_MASK 0x00000004L
-//ATHUB_PCIE_ATS_CNTL_VF_0
-#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_1
-#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_2
-#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_3
-#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_4
-#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_5
-#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_6
-#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_7
-#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_8
-#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_9
-#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_10
-#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_11
-#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_12
-#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_13
-#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_14
-#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_PCIE_ATS_CNTL_VF_15
-#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f
-#define ATHUB_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L
-//ATHUB_MEM_POWER_LS
-#define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
-#define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
-#define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
-#define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
-//ATS_IH_CREDIT
-#define ATS_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define ATS_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
-#define ATS_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define ATS_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
-//ATHUB_IH_CREDIT
-#define ATHUB_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define ATHUB_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
-#define ATHUB_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define ATHUB_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
-//ATC_VMID16_PASID_MAPPING
-#define ATC_VMID16_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID16_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID16_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID16_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID16_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID16_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID17_PASID_MAPPING
-#define ATC_VMID17_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID17_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID17_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID17_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID17_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID17_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID18_PASID_MAPPING
-#define ATC_VMID18_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID18_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID18_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID18_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID18_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID18_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID19_PASID_MAPPING
-#define ATC_VMID19_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID19_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID19_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID19_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID19_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID19_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID20_PASID_MAPPING
-#define ATC_VMID20_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID20_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID20_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID20_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID20_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID20_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID21_PASID_MAPPING
-#define ATC_VMID21_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID21_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID21_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID21_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID21_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID21_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID22_PASID_MAPPING
-#define ATC_VMID22_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID22_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID22_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID22_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID22_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID22_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID23_PASID_MAPPING
-#define ATC_VMID23_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID23_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID23_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID23_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID23_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID23_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID24_PASID_MAPPING
-#define ATC_VMID24_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID24_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID24_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID24_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID24_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID24_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID25_PASID_MAPPING
-#define ATC_VMID25_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID25_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID25_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID25_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID25_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID25_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID26_PASID_MAPPING
-#define ATC_VMID26_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID26_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID26_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID26_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID26_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID26_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID27_PASID_MAPPING
-#define ATC_VMID27_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID27_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID27_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID27_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID27_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID27_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID28_PASID_MAPPING
-#define ATC_VMID28_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID28_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID28_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID28_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID28_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID28_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID29_PASID_MAPPING
-#define ATC_VMID29_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID29_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID29_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID29_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID29_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID29_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID30_PASID_MAPPING
-#define ATC_VMID30_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID30_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID30_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID30_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID30_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID30_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_VMID31_PASID_MAPPING
-#define ATC_VMID31_PASID_MAPPING__PASID__SHIFT 0x0
-#define ATC_VMID31_PASID_MAPPING__NO_INVALIDATION__SHIFT 0x1e
-#define ATC_VMID31_PASID_MAPPING__VALID__SHIFT 0x1f
-#define ATC_VMID31_PASID_MAPPING__PASID_MASK 0x0000FFFFL
-#define ATC_VMID31_PASID_MAPPING__NO_INVALIDATION_MASK 0x40000000L
-#define ATC_VMID31_PASID_MAPPING__VALID_MASK 0x80000000L
-//ATC_ATS_MMHUB_ATCL2_STATUS
-#define ATC_ATS_MMHUB_ATCL2_STATUS__POWERED_DOWN__SHIFT 0x0
-#define ATC_ATS_MMHUB_ATCL2_STATUS__POWERED_DOWN_MASK 0x00000001L
-//ATHUB_SHARED_VIRT_RESET_REQ
-#define ATHUB_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
-#define ATHUB_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
-#define ATHUB_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
-#define ATHUB_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
-//ATHUB_SHARED_ACTIVE_FCN_ID
-#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define ATHUB_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define ATHUB_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define ATHUB_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//ATC_ATS_SDPPORT_CNTL
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE__SHIFT 0x0
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE__SHIFT 0x1
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD__SHIFT 0x3
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE__SHIFT 0x7
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK__SHIFT 0x8
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD__SHIFT 0x9
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE__SHIFT 0xd
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_RDY_MODE__SHIFT 0xe
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_MMHUB_RDY_MODE__SHIFT 0xf
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN__SHIFT 0x10
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV__SHIFT 0x11
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN__SHIFT 0x12
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x13
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN__SHIFT 0x14
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV__SHIFT 0x15
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN__SHIFT 0x16
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV__SHIFT 0x17
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN__SHIFT 0x18
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV__SHIFT 0x19
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_SELF_ACTIVATE_MASK 0x00000001L
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_CFG_MODE_MASK 0x00000006L
-#define ATC_ATS_SDPPORT_CNTL__ATS_INV_HALT_THRESHOLD_MASK 0x00000078L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_SELF_ACTIVATE_MASK 0x00000080L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_QUICK_COMACK_MASK 0x00000100L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_HALT_THRESHOLD_MASK 0x00001E00L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_TRANS_PASSIVE_MODE_MASK 0x00002000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_RDY_MODE_MASK 0x00004000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_MMHUB_RDY_MODE_MASK 0x00008000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKEN_MASK 0x00010000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPCKENRCV_MASK 0x00020000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKEN_MASK 0x00040000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_RDRSPDATACKENRCV_MASK 0x00080000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKEN_MASK 0x00100000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_WRRSPCKENRCV_MASK 0x00200000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKEN_MASK 0x00400000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_REQCKENRCV_MASK 0x00800000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKEN_MASK 0x01000000L
-#define ATC_ATS_SDPPORT_CNTL__UTCL2_GFX_SDPVDCI_ORIGDATACKENRCV_MASK 0x02000000L
-//ATC_ATS_VMID_SNAPSHOT_GFX_STAT
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID0__SHIFT 0x0
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID1__SHIFT 0x1
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID2__SHIFT 0x2
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID3__SHIFT 0x3
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID4__SHIFT 0x4
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID5__SHIFT 0x5
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID6__SHIFT 0x6
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID7__SHIFT 0x7
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID8__SHIFT 0x8
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID9__SHIFT 0x9
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID10__SHIFT 0xa
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID11__SHIFT 0xb
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID12__SHIFT 0xc
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID13__SHIFT 0xd
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID14__SHIFT 0xe
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID15__SHIFT 0xf
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID0_MASK 0x00000001L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID1_MASK 0x00000002L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID2_MASK 0x00000004L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID3_MASK 0x00000008L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID4_MASK 0x00000010L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID5_MASK 0x00000020L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID6_MASK 0x00000040L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID7_MASK 0x00000080L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID8_MASK 0x00000100L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID9_MASK 0x00000200L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID10_MASK 0x00000400L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID11_MASK 0x00000800L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID12_MASK 0x00001000L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID13_MASK 0x00002000L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID14_MASK 0x00004000L
-#define ATC_ATS_VMID_SNAPSHOT_GFX_STAT__VMID15_MASK 0x00008000L
-//ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID0__SHIFT 0x0
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID1__SHIFT 0x1
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID2__SHIFT 0x2
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID3__SHIFT 0x3
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID4__SHIFT 0x4
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID5__SHIFT 0x5
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID6__SHIFT 0x6
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID7__SHIFT 0x7
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID8__SHIFT 0x8
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID9__SHIFT 0x9
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID10__SHIFT 0xa
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID11__SHIFT 0xb
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID12__SHIFT 0xc
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID13__SHIFT 0xd
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID14__SHIFT 0xe
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID15__SHIFT 0xf
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID0_MASK 0x00000001L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID1_MASK 0x00000002L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID2_MASK 0x00000004L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID3_MASK 0x00000008L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID4_MASK 0x00000010L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID5_MASK 0x00000020L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID6_MASK 0x00000040L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID7_MASK 0x00000080L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID8_MASK 0x00000100L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID9_MASK 0x00000200L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID10_MASK 0x00000400L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID11_MASK 0x00000800L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID12_MASK 0x00001000L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID13_MASK 0x00002000L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID14_MASK 0x00004000L
-#define ATC_ATS_VMID_SNAPSHOT_MMHUB_STAT__VMID15_MASK 0x00008000L
-
-
-// addressBlock: athub_xpbdec
-//XPB_RTR_SRC_APRTR0
-#define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR1
-#define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR2
-#define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR3
-#define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR4
-#define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR5
-#define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR6
-#define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR7
-#define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR8
-#define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_SRC_APRTR9
-#define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0
-#define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_XDMA_RTR_SRC_APRTR0
-#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0
-#define XPB_XDMA_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_XDMA_RTR_SRC_APRTR1
-#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0
-#define XPB_XDMA_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_XDMA_RTR_SRC_APRTR2
-#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0
-#define XPB_XDMA_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_XDMA_RTR_SRC_APRTR3
-#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0
-#define XPB_XDMA_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL
-//XPB_RTR_DEST_MAP0
-#define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP1
-#define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP2
-#define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP3
-#define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP4
-#define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP5
-#define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP6
-#define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP7
-#define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP8
-#define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L
-//XPB_RTR_DEST_MAP9
-#define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0
-#define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1
-#define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14
-#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a
-#define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L
-#define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L
-#define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L
-//XPB_XDMA_RTR_DEST_MAP0
-#define XPB_XDMA_RTR_DEST_MAP0__NMR__SHIFT 0x0
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a
-#define XPB_XDMA_RTR_DEST_MAP0__NMR_MASK 0x00000001L
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L
-#define XPB_XDMA_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_XDMA_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L
-//XPB_XDMA_RTR_DEST_MAP1
-#define XPB_XDMA_RTR_DEST_MAP1__NMR__SHIFT 0x0
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a
-#define XPB_XDMA_RTR_DEST_MAP1__NMR_MASK 0x00000001L
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L
-#define XPB_XDMA_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_XDMA_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L
-//XPB_XDMA_RTR_DEST_MAP2
-#define XPB_XDMA_RTR_DEST_MAP2__NMR__SHIFT 0x0
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a
-#define XPB_XDMA_RTR_DEST_MAP2__NMR_MASK 0x00000001L
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L
-#define XPB_XDMA_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_XDMA_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L
-//XPB_XDMA_RTR_DEST_MAP3
-#define XPB_XDMA_RTR_DEST_MAP3__NMR__SHIFT 0x0
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18
-#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a
-#define XPB_XDMA_RTR_DEST_MAP3__NMR_MASK 0x00000001L
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L
-#define XPB_XDMA_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L
-#define XPB_XDMA_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L
-//XPB_CLG_CFG0
-#define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG1
-#define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG2
-#define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG3
-#define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG4
-#define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG5
-#define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG6
-#define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_CFG7
-#define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0
-#define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7
-#define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa
-#define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL
-#define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L
-#define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L
-//XPB_CLG_EXTRA
-#define XPB_CLG_EXTRA__CMP0_HIGH__SHIFT 0x0
-#define XPB_CLG_EXTRA__CMP0_LOW__SHIFT 0x6
-#define XPB_CLG_EXTRA__VLD0__SHIFT 0xb
-#define XPB_CLG_EXTRA__CLG0_NUM__SHIFT 0xc
-#define XPB_CLG_EXTRA__CMP1_HIGH__SHIFT 0xf
-#define XPB_CLG_EXTRA__CMP1_LOW__SHIFT 0x15
-#define XPB_CLG_EXTRA__VLD1__SHIFT 0x1a
-#define XPB_CLG_EXTRA__CLG1_NUM__SHIFT 0x1b
-#define XPB_CLG_EXTRA__CMP0_HIGH_MASK 0x0000003FL
-#define XPB_CLG_EXTRA__CMP0_LOW_MASK 0x000007C0L
-#define XPB_CLG_EXTRA__VLD0_MASK 0x00000800L
-#define XPB_CLG_EXTRA__CLG0_NUM_MASK 0x00007000L
-#define XPB_CLG_EXTRA__CMP1_HIGH_MASK 0x001F8000L
-#define XPB_CLG_EXTRA__CMP1_LOW_MASK 0x03E00000L
-#define XPB_CLG_EXTRA__VLD1_MASK 0x04000000L
-#define XPB_CLG_EXTRA__CLG1_NUM_MASK 0x38000000L
-//XPB_CLG_EXTRA_MSK
-#define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0
-#define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x6
-#define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xb
-#define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x11
-#define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x0000003FL
-#define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x000007C0L
-#define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x0001F800L
-#define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x003E0000L
-//XPB_LB_ADDR
-#define XPB_LB_ADDR__CMP0__SHIFT 0x0
-#define XPB_LB_ADDR__MASK0__SHIFT 0xa
-#define XPB_LB_ADDR__CMP1__SHIFT 0x14
-#define XPB_LB_ADDR__MASK1__SHIFT 0x1a
-#define XPB_LB_ADDR__CMP0_MASK 0x000003FFL
-#define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L
-#define XPB_LB_ADDR__CMP1_MASK 0x03F00000L
-#define XPB_LB_ADDR__MASK1_MASK 0xFC000000L
-//XPB_WCB_STS
-#define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0
-#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10
-#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17
-#define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL
-#define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L
-#define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L
-//XPB_HST_CFG
-#define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0
-#define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L
-//XPB_P2P_BAR_CFG
-#define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0
-#define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4
-#define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6
-#define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7
-#define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8
-#define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9
-#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa
-#define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb
-#define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc
-#define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL
-#define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L
-#define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L
-#define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L
-#define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L
-#define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L
-#define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L
-#define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L
-#define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L
-//XPB_P2P_BAR0
-#define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR0__VALID__SHIFT 0xc
-#define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR0__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR0__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR0__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR1
-#define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR1__VALID__SHIFT 0xc
-#define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR1__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR1__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR1__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR2
-#define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR2__VALID__SHIFT 0xc
-#define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR2__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR2__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR2__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR3
-#define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR3__VALID__SHIFT 0xc
-#define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR3__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR3__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR3__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR4
-#define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR4__VALID__SHIFT 0xc
-#define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR4__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR4__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR4__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR5
-#define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR5__VALID__SHIFT 0xc
-#define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR5__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR5__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR5__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR6
-#define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR6__VALID__SHIFT 0xc
-#define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR6__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR6__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR6__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR7
-#define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0
-#define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4
-#define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR7__VALID__SHIFT 0xc
-#define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR7__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL
-#define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L
-#define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR7__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR7__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR_SETUP
-#define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0
-#define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8
-#define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc
-#define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd
-#define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe
-#define XPB_P2P_BAR_SETUP__RESERVED__SHIFT 0xf
-#define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10
-#define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL
-#define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L
-#define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L
-#define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L
-#define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L
-#define XPB_P2P_BAR_SETUP__RESERVED_MASK 0x00008000L
-#define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L
-//XPB_P2P_BAR_DELTA_ABOVE
-#define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0
-#define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8
-#define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL
-#define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L
-//XPB_P2P_BAR_DELTA_BELOW
-#define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0
-#define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8
-#define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL
-#define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L
-//XPB_PEER_SYS_BAR0
-#define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR1
-#define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR2
-#define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR3
-#define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR4
-#define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR5
-#define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR6
-#define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR7
-#define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR8
-#define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL
-//XPB_PEER_SYS_BAR9
-#define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0
-#define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1
-#define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L
-#define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL
-//XPB_XDMA_PEER_SYS_BAR0
-#define XPB_XDMA_PEER_SYS_BAR0__VALID__SHIFT 0x0
-#define XPB_XDMA_PEER_SYS_BAR0__ADDR__SHIFT 0x1
-#define XPB_XDMA_PEER_SYS_BAR0__VALID_MASK 0x00000001L
-#define XPB_XDMA_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL
-//XPB_XDMA_PEER_SYS_BAR1
-#define XPB_XDMA_PEER_SYS_BAR1__VALID__SHIFT 0x0
-#define XPB_XDMA_PEER_SYS_BAR1__ADDR__SHIFT 0x1
-#define XPB_XDMA_PEER_SYS_BAR1__VALID_MASK 0x00000001L
-#define XPB_XDMA_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL
-//XPB_XDMA_PEER_SYS_BAR2
-#define XPB_XDMA_PEER_SYS_BAR2__VALID__SHIFT 0x0
-#define XPB_XDMA_PEER_SYS_BAR2__ADDR__SHIFT 0x1
-#define XPB_XDMA_PEER_SYS_BAR2__VALID_MASK 0x00000001L
-#define XPB_XDMA_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL
-//XPB_XDMA_PEER_SYS_BAR3
-#define XPB_XDMA_PEER_SYS_BAR3__VALID__SHIFT 0x0
-#define XPB_XDMA_PEER_SYS_BAR3__ADDR__SHIFT 0x1
-#define XPB_XDMA_PEER_SYS_BAR3__VALID_MASK 0x00000001L
-#define XPB_XDMA_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL
-//XPB_CLK_GAT
-#define XPB_CLK_GAT__ONDLY__SHIFT 0x0
-#define XPB_CLK_GAT__OFFDLY__SHIFT 0x6
-#define XPB_CLK_GAT__RDYDLY__SHIFT 0xc
-#define XPB_CLK_GAT__ENABLE__SHIFT 0x12
-#define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13
-#define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL
-#define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L
-#define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L
-#define XPB_CLK_GAT__ENABLE_MASK 0x00040000L
-#define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L
-//XPB_INTF_CFG
-#define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0
-#define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8
-#define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10
-#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL__SHIFT 0x17
-#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL__SHIFT 0x18
-#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL__SHIFT 0x19
-#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL__SHIFT 0x1a
-#define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b
-#define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d
-#define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e
-#define XPB_INTF_CFG__XSP_ORDERING_VAL__SHIFT 0x1f
-#define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL
-#define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L
-#define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L
-#define XPB_INTF_CFG__BIF_REG_SNOOP_SEL_MASK 0x00800000L
-#define XPB_INTF_CFG__BIF_REG_SNOOP_VAL_MASK 0x01000000L
-#define XPB_INTF_CFG__BIF_MEM_SNOOP_SEL_MASK 0x02000000L
-#define XPB_INTF_CFG__BIF_MEM_SNOOP_VAL_MASK 0x04000000L
-#define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L
-#define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L
-#define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L
-#define XPB_INTF_CFG__XSP_ORDERING_VAL_MASK 0x80000000L
-//XPB_INTF_STS
-#define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0
-#define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8
-#define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf
-#define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10
-#define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11
-#define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12
-#define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13
-#define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL
-#define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L
-#define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L
-#define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L
-#define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L
-#define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L
-#define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L
-//XPB_PIPE_STS
-#define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0
-#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1
-#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8
-#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf
-#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10
-#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11
-#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12
-#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13
-#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14
-#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15
-#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16
-#define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17
-#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18
-#define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L
-#define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL
-#define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L
-#define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L
-#define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L
-#define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L
-#define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L
-#define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L
-#define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L
-#define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L
-#define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L
-#define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L
-#define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L
-//XPB_SUB_CTRL
-#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0
-#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1
-#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2
-#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3
-#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4
-#define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5
-#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6
-#define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7
-#define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8
-#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9
-#define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa
-#define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb
-#define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc
-#define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd
-#define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe
-#define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf
-#define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10
-#define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11
-#define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12
-#define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13
-#define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L
-#define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L
-#define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L
-#define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L
-#define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L
-#define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L
-#define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L
-#define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L
-#define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L
-#define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L
-#define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L
-#define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L
-#define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L
-#define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L
-#define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L
-#define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L
-#define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L
-#define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L
-#define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L
-#define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L
-//XPB_MAP_INVERT_FLUSH_NUM_LSB
-#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0
-#define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL
-//XPB_PERF_KNOBS
-#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0
-#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6
-#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc
-#define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL
-#define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L
-#define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L
-//XPB_STICKY
-#define XPB_STICKY__BITS__SHIFT 0x0
-#define XPB_STICKY__BITS_MASK 0xFFFFFFFFL
-//XPB_STICKY_W1C
-#define XPB_STICKY_W1C__BITS__SHIFT 0x0
-#define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL
-//XPB_MISC_CFG
-#define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0
-#define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8
-#define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10
-#define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18
-#define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f
-#define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL
-#define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L
-#define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L
-#define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L
-#define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L
-//XPB_INTF_CFG2
-#define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0
-#define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL
-//XPB_CLG_EXTRA_RD
-#define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0
-#define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6
-#define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb
-#define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc
-#define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf
-#define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15
-#define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a
-#define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b
-#define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL
-#define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L
-#define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L
-#define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L
-#define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L
-#define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L
-#define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L
-#define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L
-//XPB_CLG_EXTRA_MSK_RD
-#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0
-#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6
-#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb
-#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11
-#define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL
-#define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L
-#define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L
-#define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L
-//XPB_CLG_GFX_MATCH
-#define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0
-#define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x6
-#define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0xc
-#define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x12
-#define XPB_CLG_GFX_MATCH__FARBIRC0_VLD__SHIFT 0x18
-#define XPB_CLG_GFX_MATCH__FARBIRC1_VLD__SHIFT 0x19
-#define XPB_CLG_GFX_MATCH__FARBIRC2_VLD__SHIFT 0x1a
-#define XPB_CLG_GFX_MATCH__FARBIRC3_VLD__SHIFT 0x1b
-#define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x0000003FL
-#define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x00000FC0L
-#define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x0003F000L
-#define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0x00FC0000L
-#define XPB_CLG_GFX_MATCH__FARBIRC0_VLD_MASK 0x01000000L
-#define XPB_CLG_GFX_MATCH__FARBIRC1_VLD_MASK 0x02000000L
-#define XPB_CLG_GFX_MATCH__FARBIRC2_VLD_MASK 0x04000000L
-#define XPB_CLG_GFX_MATCH__FARBIRC3_VLD_MASK 0x08000000L
-//XPB_CLG_GFX_MATCH_MSK
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0xc
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x12
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x0003F000L
-#define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0x00FC0000L
-//XPB_CLG_MM_MATCH
-#define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0
-#define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x6
-#define XPB_CLG_MM_MATCH__FARBIRC2_ID__SHIFT 0xc
-#define XPB_CLG_MM_MATCH__FARBIRC3_ID__SHIFT 0x12
-#define XPB_CLG_MM_MATCH__FARBIRC0_VLD__SHIFT 0x18
-#define XPB_CLG_MM_MATCH__FARBIRC1_VLD__SHIFT 0x19
-#define XPB_CLG_MM_MATCH__FARBIRC2_VLD__SHIFT 0x1a
-#define XPB_CLG_MM_MATCH__FARBIRC3_VLD__SHIFT 0x1b
-#define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x0000003FL
-#define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x00000FC0L
-#define XPB_CLG_MM_MATCH__FARBIRC2_ID_MASK 0x0003F000L
-#define XPB_CLG_MM_MATCH__FARBIRC3_ID_MASK 0x00FC0000L
-#define XPB_CLG_MM_MATCH__FARBIRC0_VLD_MASK 0x01000000L
-#define XPB_CLG_MM_MATCH__FARBIRC1_VLD_MASK 0x02000000L
-#define XPB_CLG_MM_MATCH__FARBIRC2_VLD_MASK 0x04000000L
-#define XPB_CLG_MM_MATCH__FARBIRC3_VLD_MASK 0x08000000L
-//XPB_CLG_MM_MATCH_MSK
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0xc
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x12
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x0003F000L
-#define XPB_CLG_MM_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0x00FC0000L
-//XPB_CLG_GFX_UNITID_MAPPING0
-#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING1
-#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING2
-#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING3
-#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING4
-#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING4__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING4__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING5
-#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING5__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING5__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING6
-#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING6__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING6__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_GFX_UNITID_MAPPING7
-#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_GFX_UNITID_MAPPING7__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_GFX_UNITID_MAPPING7__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_MM_UNITID_MAPPING0
-#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_MM_UNITID_MAPPING0__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_MM_UNITID_MAPPING0__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_MM_UNITID_MAPPING1
-#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_MM_UNITID_MAPPING1__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_MM_UNITID_MAPPING1__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_MM_UNITID_MAPPING2
-#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_MM_UNITID_MAPPING2__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_MM_UNITID_MAPPING2__DEST_CLG_NUM_MASK 0x000001C0L
-//XPB_CLG_MM_UNITID_MAPPING3
-#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW__SHIFT 0x0
-#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD__SHIFT 0x5
-#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM__SHIFT 0x6
-#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_LOW_MASK 0x0000001FL
-#define XPB_CLG_MM_UNITID_MAPPING3__UNITID_VLD_MASK 0x00000020L
-#define XPB_CLG_MM_UNITID_MAPPING3__DEST_CLG_NUM_MASK 0x000001C0L
-
-
-// addressBlock: athub_rpbdec
-//RPB_PASSPW_CONF
-#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0
-#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1
-#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE__SHIFT 0x2
-#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0x3
-#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0x4
-#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x5
-#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0x6
-#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x7
-#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE__SHIFT 0x8
-#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x9
-#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0xa
-#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN__SHIFT 0xb
-#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xc
-#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd
-#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0xe
-#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0xf
-#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x10
-#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x11
-#define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L
-#define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L
-#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_MASK 0x00000004L
-#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000008L
-#define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00000010L
-#define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00000020L
-#define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00000040L
-#define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00000080L
-#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_MASK 0x00000100L
-#define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00000200L
-#define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00000400L
-#define RPB_PASSPW_CONF__ATC_TR_PASSPW_OVERRIDE_EN_MASK 0x00000800L
-#define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00001000L
-#define RPB_PASSPW_CONF__ATC_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L
-#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00004000L
-#define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00008000L
-#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00010000L
-#define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00020000L
-//RPB_BLOCKLEVEL_CONF
-#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0
-#define RPB_BLOCKLEVEL_CONF__ATC_TR_BLOCKLEVEL__SHIFT 0x2
-#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x4
-#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x6
-#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0x8
-#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xa
-#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0xc
-#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xe
-#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xf
-#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10
-#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x11
-#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L
-#define RPB_BLOCKLEVEL_CONF__ATC_TR_BLOCKLEVEL_MASK 0x0000000CL
-#define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000030L
-#define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x000000C0L
-#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00000300L
-#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x00000C00L
-#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00003000L
-#define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00004000L
-#define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00008000L
-#define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L
-#define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00020000L
-//RPB_TAG_CONF
-#define RPB_TAG_CONF__RPB_ATS_TR__SHIFT 0x0
-#define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0x8
-#define RPB_TAG_CONF__RPB_ATS_PR__SHIFT 0x10
-#define RPB_TAG_CONF__RPB_ATS_TR_MASK 0x000000FFL
-#define RPB_TAG_CONF__RPB_IO_WR_MASK 0x0000FF00L
-#define RPB_TAG_CONF__RPB_ATS_PR_MASK 0x00FF0000L
-//RPB_EFF_CNTL
-#define RPB_EFF_CNTL__WR_LAZY_TIMER__SHIFT 0x0
-#define RPB_EFF_CNTL__RD_LAZY_TIMER__SHIFT 0x8
-#define RPB_EFF_CNTL__WR_LAZY_TIMER_MASK 0x000000FFL
-#define RPB_EFF_CNTL__RD_LAZY_TIMER_MASK 0x0000FF00L
-//RPB_ARB_CNTL
-#define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0
-#define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8
-#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10
-#define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18
-#define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19
-#define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL
-#define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L
-#define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L
-#define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L
-#define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L
-//RPB_ARB_CNTL2
-#define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0
-#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8
-#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10
-#define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL
-#define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L
-#define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L
-//RPB_BIF_CNTL
-#define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0
-#define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8
-#define RPB_BIF_CNTL__ARB_MODE__SHIFT 0x10
-#define RPB_BIF_CNTL__DRAIN_VC_NUM__SHIFT 0x11
-#define RPB_BIF_CNTL__SWITCH_ENABLE__SHIFT 0x12
-#define RPB_BIF_CNTL__SWITCH_THRESHOLD__SHIFT 0x13
-#define RPB_BIF_CNTL__PAGE_PRI_EN__SHIFT 0x1b
-#define RPB_BIF_CNTL__TR_PRI_EN__SHIFT 0x1c
-#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE__SHIFT 0x1d
-#define RPB_BIF_CNTL__PARITY_CHECK_EN__SHIFT 0x1e
-#define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL
-#define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L
-#define RPB_BIF_CNTL__ARB_MODE_MASK 0x00010000L
-#define RPB_BIF_CNTL__DRAIN_VC_NUM_MASK 0x00020000L
-#define RPB_BIF_CNTL__SWITCH_ENABLE_MASK 0x00040000L
-#define RPB_BIF_CNTL__SWITCH_THRESHOLD_MASK 0x07F80000L
-#define RPB_BIF_CNTL__PAGE_PRI_EN_MASK 0x08000000L
-#define RPB_BIF_CNTL__TR_PRI_EN_MASK 0x10000000L
-#define RPB_BIF_CNTL__VC0_CHAINED_OVERRIDE_MASK 0x20000000L
-#define RPB_BIF_CNTL__PARITY_CHECK_EN_MASK 0x40000000L
-//RPB_WR_SWITCH_CNTL
-#define RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x0
-#define RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x7
-#define RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0xe
-#define RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x15
-#define RPB_WR_SWITCH_CNTL__SWITCH_NUM_MODE__SHIFT 0x1c
-#define RPB_WR_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x0000007FL
-#define RPB_WR_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x00003F80L
-#define RPB_WR_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x001FC000L
-#define RPB_WR_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0x0FE00000L
-#define RPB_WR_SWITCH_CNTL__SWITCH_NUM_MODE_MASK 0x10000000L
-//RPB_RD_SWITCH_CNTL
-#define RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM__SHIFT 0x0
-#define RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM__SHIFT 0x7
-#define RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM__SHIFT 0xe
-#define RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM__SHIFT 0x15
-#define RPB_RD_SWITCH_CNTL__SWITCH_NUM_MODE__SHIFT 0x1c
-#define RPB_RD_SWITCH_CNTL__QUEUE0_SWITCH_NUM_MASK 0x0000007FL
-#define RPB_RD_SWITCH_CNTL__QUEUE1_SWITCH_NUM_MASK 0x00003F80L
-#define RPB_RD_SWITCH_CNTL__QUEUE2_SWITCH_NUM_MASK 0x001FC000L
-#define RPB_RD_SWITCH_CNTL__QUEUE3_SWITCH_NUM_MASK 0x0FE00000L
-#define RPB_RD_SWITCH_CNTL__SWITCH_NUM_MODE_MASK 0x10000000L
-//RPB_CID_QUEUE_WR
-#define RPB_CID_QUEUE_WR__CLIENT_ID_LOW__SHIFT 0x0
-#define RPB_CID_QUEUE_WR__CLIENT_ID_HIGH__SHIFT 0x5
-#define RPB_CID_QUEUE_WR__UPDATE_MODE__SHIFT 0xb
-#define RPB_CID_QUEUE_WR__WRITE_QUEUE__SHIFT 0xc
-#define RPB_CID_QUEUE_WR__READ_QUEUE__SHIFT 0xf
-#define RPB_CID_QUEUE_WR__UPDATE__SHIFT 0x12
-#define RPB_CID_QUEUE_WR__CLIENT_ID_LOW_MASK 0x0000001FL
-#define RPB_CID_QUEUE_WR__CLIENT_ID_HIGH_MASK 0x000007E0L
-#define RPB_CID_QUEUE_WR__UPDATE_MODE_MASK 0x00000800L
-#define RPB_CID_QUEUE_WR__WRITE_QUEUE_MASK 0x00007000L
-#define RPB_CID_QUEUE_WR__READ_QUEUE_MASK 0x00038000L
-#define RPB_CID_QUEUE_WR__UPDATE_MASK 0x00040000L
-//RPB_CID_QUEUE_RD
-#define RPB_CID_QUEUE_RD__CLIENT_ID_LOW__SHIFT 0x0
-#define RPB_CID_QUEUE_RD__CLIENT_ID_HIGH__SHIFT 0x5
-#define RPB_CID_QUEUE_RD__WRITE_QUEUE__SHIFT 0xb
-#define RPB_CID_QUEUE_RD__READ_QUEUE__SHIFT 0xe
-#define RPB_CID_QUEUE_RD__CLIENT_ID_LOW_MASK 0x0000001FL
-#define RPB_CID_QUEUE_RD__CLIENT_ID_HIGH_MASK 0x000007E0L
-#define RPB_CID_QUEUE_RD__WRITE_QUEUE_MASK 0x00003800L
-#define RPB_CID_QUEUE_RD__READ_QUEUE_MASK 0x0001C000L
-//RPB_CID_QUEUE_EX
-#define RPB_CID_QUEUE_EX__START__SHIFT 0x0
-#define RPB_CID_QUEUE_EX__OFFSET__SHIFT 0x1
-#define RPB_CID_QUEUE_EX__START_MASK 0x00000001L
-#define RPB_CID_QUEUE_EX__OFFSET_MASK 0x000001FEL
-//RPB_CID_QUEUE_EX_DATA
-#define RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES__SHIFT 0x0
-#define RPB_CID_QUEUE_EX_DATA__READ_ENTRIES__SHIFT 0x10
-#define RPB_CID_QUEUE_EX_DATA__WRITE_ENTRIES_MASK 0x0000FFFFL
-#define RPB_CID_QUEUE_EX_DATA__READ_ENTRIES_MASK 0xFFFF0000L
-//RPB_SWITCH_CNTL2
-#define RPB_SWITCH_CNTL2__RD_QUEUE4_SWITCH_NUM__SHIFT 0x0
-#define RPB_SWITCH_CNTL2__RD_QUEUE5_SWITCH_NUM__SHIFT 0x7
-#define RPB_SWITCH_CNTL2__WR_QUEUE4_SWITCH_NUM__SHIFT 0xe
-#define RPB_SWITCH_CNTL2__WR_QUEUE5_SWITCH_NUM__SHIFT 0x15
-#define RPB_SWITCH_CNTL2__RD_QUEUE4_SWITCH_NUM_MASK 0x0000007FL
-#define RPB_SWITCH_CNTL2__RD_QUEUE5_SWITCH_NUM_MASK 0x00003F80L
-#define RPB_SWITCH_CNTL2__WR_QUEUE4_SWITCH_NUM_MASK 0x001FC000L
-#define RPB_SWITCH_CNTL2__WR_QUEUE5_SWITCH_NUM_MASK 0x0FE00000L
-//RPB_DEINTRLV_COMBINE_CNTL
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L
-#define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L
-//RPB_VC_SWITCH_RDWR
-#define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0
-#define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2
-#define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa
-#define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L
-#define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL
-#define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L
-//RPB_PERFCOUNTER_LO
-#define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
-#define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
-//RPB_PERFCOUNTER_HI
-#define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
-#define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
-#define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
-#define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
-//RPB_PERFCOUNTER0_CFG
-#define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
-#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
-#define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
-#define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
-#define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
-#define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
-#define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
-#define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
-#define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
-//RPB_PERFCOUNTER1_CFG
-#define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
-#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
-#define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
-#define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
-#define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
-#define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
-#define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
-#define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
-#define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
-//RPB_PERFCOUNTER2_CFG
-#define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
-#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
-#define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
-#define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
-#define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
-#define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
-#define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
-#define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
-#define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
-//RPB_PERFCOUNTER3_CFG
-#define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
-#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
-#define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
-#define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
-#define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
-#define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
-#define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
-#define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
-#define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
-#define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
-//RPB_PERFCOUNTER_RSLT_CNTL
-#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
-#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
-#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
-#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
-#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
-#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
-#define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
-#define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
-#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
-#define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
-#define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
-#define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
-//RPB_RD_QUEUE_CNTL
-#define RPB_RD_QUEUE_CNTL__ARB_MODE__SHIFT 0x0
-#define RPB_RD_QUEUE_CNTL__Q4_SHARED__SHIFT 0x1
-#define RPB_RD_QUEUE_CNTL__Q5_SHARED__SHIFT 0x2
-#define RPB_RD_QUEUE_CNTL__Q4_UNITID_EA_MODE__SHIFT 0x3
-#define RPB_RD_QUEUE_CNTL__Q5_UNITID_EA_MODE__SHIFT 0x4
-#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_LOW__SHIFT 0x5
-#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_HIGH__SHIFT 0xa
-#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_LOW__SHIFT 0x10
-#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_HIGH__SHIFT 0x15
-#define RPB_RD_QUEUE_CNTL__ARB_MODE_MASK 0x00000001L
-#define RPB_RD_QUEUE_CNTL__Q4_SHARED_MASK 0x00000002L
-#define RPB_RD_QUEUE_CNTL__Q5_SHARED_MASK 0x00000004L
-#define RPB_RD_QUEUE_CNTL__Q4_UNITID_EA_MODE_MASK 0x00000008L
-#define RPB_RD_QUEUE_CNTL__Q5_UNITID_EA_MODE_MASK 0x00000010L
-#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_LOW_MASK 0x000003E0L
-#define RPB_RD_QUEUE_CNTL__Q4_PATTERN_HIGH_MASK 0x0000FC00L
-#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_LOW_MASK 0x001F0000L
-#define RPB_RD_QUEUE_CNTL__Q5_PATTERN_HIGH_MASK 0x07E00000L
-//RPB_RD_QUEUE_CNTL2
-#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW__SHIFT 0x0
-#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH__SHIFT 0x5
-#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW__SHIFT 0xb
-#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH__SHIFT 0x10
-#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW_MASK 0x0000001FL
-#define RPB_RD_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH_MASK 0x000007E0L
-#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW_MASK 0x0000F800L
-#define RPB_RD_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH_MASK 0x003F0000L
-//RPB_WR_QUEUE_CNTL
-#define RPB_WR_QUEUE_CNTL__ARB_MODE__SHIFT 0x0
-#define RPB_WR_QUEUE_CNTL__Q4_SHARED__SHIFT 0x1
-#define RPB_WR_QUEUE_CNTL__Q5_SHARED__SHIFT 0x2
-#define RPB_WR_QUEUE_CNTL__Q4_UNITID_EA_MODE__SHIFT 0x3
-#define RPB_WR_QUEUE_CNTL__Q5_UNITID_EA_MODE__SHIFT 0x4
-#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_LOW__SHIFT 0x5
-#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_HIGH__SHIFT 0xa
-#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_LOW__SHIFT 0x10
-#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_HIGH__SHIFT 0x15
-#define RPB_WR_QUEUE_CNTL__ARB_MODE_MASK 0x00000001L
-#define RPB_WR_QUEUE_CNTL__Q4_SHARED_MASK 0x00000002L
-#define RPB_WR_QUEUE_CNTL__Q5_SHARED_MASK 0x00000004L
-#define RPB_WR_QUEUE_CNTL__Q4_UNITID_EA_MODE_MASK 0x00000008L
-#define RPB_WR_QUEUE_CNTL__Q5_UNITID_EA_MODE_MASK 0x00000010L
-#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_LOW_MASK 0x000003E0L
-#define RPB_WR_QUEUE_CNTL__Q4_PATTERN_HIGH_MASK 0x0000FC00L
-#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_LOW_MASK 0x001F0000L
-#define RPB_WR_QUEUE_CNTL__Q5_PATTERN_HIGH_MASK 0x07E00000L
-//RPB_WR_QUEUE_CNTL2
-#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW__SHIFT 0x0
-#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH__SHIFT 0x5
-#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW__SHIFT 0xb
-#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH__SHIFT 0x10
-#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_LOW_MASK 0x0000001FL
-#define RPB_WR_QUEUE_CNTL2__Q4_PATTERN_MASK_HIGH_MASK 0x000007E0L
-#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_LOW_MASK 0x0000F800L
-#define RPB_WR_QUEUE_CNTL2__Q5_PATTERN_MASK_HIGH_MASK 0x003F0000L
-//RPB_EA_QUEUE_WR
-#define RPB_EA_QUEUE_WR__EA_NUMBER__SHIFT 0x0
-#define RPB_EA_QUEUE_WR__WRITE_QUEUE__SHIFT 0x5
-#define RPB_EA_QUEUE_WR__READ_QUEUE__SHIFT 0x8
-#define RPB_EA_QUEUE_WR__UPDATE__SHIFT 0xb
-#define RPB_EA_QUEUE_WR__EA_NUMBER_MASK 0x0000001FL
-#define RPB_EA_QUEUE_WR__WRITE_QUEUE_MASK 0x000000E0L
-#define RPB_EA_QUEUE_WR__READ_QUEUE_MASK 0x00000700L
-#define RPB_EA_QUEUE_WR__UPDATE_MASK 0x00000800L
-//RPB_ATS_CNTL
-#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0
-#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1
-#define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2
-#define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7
-#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM__SHIFT 0xf
-#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13
-#define RPB_ATS_CNTL__WR_AT__SHIFT 0x17
-#define RPB_ATS_CNTL__INVAL_COM_CMD__SHIFT 0x19
-#define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L
-#define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L
-#define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL
-#define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L
-#define RPB_ATS_CNTL__ATCTR_SWITCH_NUM_MASK 0x00078000L
-#define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L
-#define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L
-#define RPB_ATS_CNTL__INVAL_COM_CMD_MASK 0x7E000000L
-//RPB_ATS_CNTL2
-#define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x0
-#define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0x6
-#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0xc
-#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0xf
-#define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x12
-#define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x0000003FL
-#define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x00000FC0L
-#define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x00007000L
-#define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00038000L
-#define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x000C0000L
-//RPB_SDPPORT_CNTL
-#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0
-#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5
-#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6
-#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE__SHIFT 0xa
-#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE__SHIFT 0xb
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT__SHIFT 0xd
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER__SHIFT 0xe
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS__SHIFT 0xf
-#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD__SHIFT 0x10
-#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE__SHIFT 0x14
-#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK__SHIFT 0x15
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b
-#define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L
-#define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L
-#define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L
-#define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L
-#define RPB_SDPPORT_CNTL__NBIF_HST_SELF_ACTIVATE_MASK 0x00000400L
-#define RPB_SDPPORT_CNTL__NBIF_HST_CFG_MODE_MASK 0x00001800L
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_REISSUE_CREDIT_MASK 0x00002000L
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_SATURATE_COUNTER_MASK 0x00004000L
-#define RPB_SDPPORT_CNTL__NBIF_HST_ENABLE_DISRUPT_FULLDIS_MASK 0x00008000L
-#define RPB_SDPPORT_CNTL__NBIF_HST_HALT_THRESHOLD_MASK 0x000F0000L
-#define RPB_SDPPORT_CNTL__NBIF_HST_PASSIVE_MODE_MASK 0x00100000L
-#define RPB_SDPPORT_CNTL__NBIF_HST_QUICK_COMACK_MASK 0x00200000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L
-#define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h
deleted file mode 100644
index 8a0007c..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h
+++ /dev/null
@@ -1,9868 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _dce_12_0_DEFAULT_HEADER
-#define _dce_12_0_DEFAULT_HEADER
-
-
-// addressBlock: dce_dc_dispdec_VGA_MEM_WRITE_PAGE_ADDR
-#define mmdispdec_VGA_MEM_WRITE_PAGE_ADDR_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dispdec_VGA_MEM_READ_PAGE_ADDR
-#define mmdispdec_VGA_MEM_READ_PAGE_ADDR_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon0_dispdec
-#define mmDC_PERFMON0_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON0_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON0_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon13_dispdec
-#define mmDC_PERFMON13_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON13_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON13_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_displaypllregs_dispdec
-#define mmPPLL_VREG_CFG_DEFAULT 0x00000000
-#define mmPPLL_MODE_CNTL_DEFAULT 0x00020100
-#define mmPPLL_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmPPLL_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmPPLL_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmPPLL_FREQ_CTRL3_DEFAULT 0x00190040
-#define mmPPLL_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmPPLL_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmPPLL_CAL_CTRL_DEFAULT 0x64000002
-#define mmPPLL_LOOP_CTRL_DEFAULT 0x00000090
-#define mmPPLL_REFCLK_CNTL_DEFAULT 0x00018004
-#define mmPPLL_CLKOUT_CNTL_DEFAULT 0x00022500
-#define mmPPLL_DFT_CNTL_DEFAULT 0x00000004
-#define mmPPLL_ANALOG_CNTL_DEFAULT 0x00000000
-#define mmPPLL_POSTDIV_DEFAULT 0x00000400
-#define mmPPLL_OBSERVE0_DEFAULT 0x00000000
-#define mmPPLL_OBSERVE1_DEFAULT 0x04b00000
-#define mmPPLL_UPDATE_CNTL_DEFAULT 0x00000000
-#define mmPPLL_OBSERVE0_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dccg_pll0_dispdec
-#define mmPLL_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmPLL_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon1_dispdec
-#define mmDC_PERFMON1_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON1_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON1_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_mcif_wb0_dispdec
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_CUR_LINE_R_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_PITCH_DEFAULT 0x04000400
-#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_ARBITRATION_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_SCLK_CHANGE_DEFAULT 0x00000008
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_1_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_2_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_3_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUF_4_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL_DEFAULT 0x000f0000
-#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL_DEFAULT 0x00000040
-#define mmMCIF_WB0_MCIF_WB_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB0_MCIF_WB_WARM_UP_CNTL_DEFAULT 0x00001000
-#define mmMCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL_DEFAULT 0x00000002
-#define mmMCIF_WB0_MULTI_LEVEL_QOS_CTRL_DEFAULT 0x00000080
-#define mmMCIF_WB0_MCIF_WB_BUF_LUMA_SIZE_DEFAULT 0x000fffff
-#define mmMCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE_DEFAULT 0x000fffff
-
-
-// addressBlock: dce_dc_mcif_wb1_dispdec
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_SW_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_CUR_LINE_R_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_PITCH_DEFAULT 0x04000400
-#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_ARBITRATION_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_SCLK_CHANGE_DEFAULT 0x00000008
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_1_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_2_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_3_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUF_4_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_BUFMGR_VCE_CONTROL_DEFAULT 0x000f0000
-#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_NB_PSTATE_CONTROL_DEFAULT 0x00000040
-#define mmMCIF_WB1_MCIF_WB_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_CLOCK_GATER_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB1_MCIF_WB_WARM_UP_CNTL_DEFAULT 0x00001000
-#define mmMCIF_WB1_MCIF_WB_SELF_REFRESH_CONTROL_DEFAULT 0x00000002
-#define mmMCIF_WB1_MULTI_LEVEL_QOS_CTRL_DEFAULT 0x00000080
-#define mmMCIF_WB1_MCIF_WB_BUF_LUMA_SIZE_DEFAULT 0x000fffff
-#define mmMCIF_WB1_MCIF_WB_BUF_CHROMA_SIZE_DEFAULT 0x000fffff
-
-
-// addressBlock: dce_dc_mcif_wb2_dispdec
-#define mmMCIF_WB2_MCIF_WB_BUFMGR_SW_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUFMGR_CUR_LINE_R_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUFMGR_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_PITCH_DEFAULT 0x04000400
-#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_1_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_STATUS2_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_ARBITRATION_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_SCLK_CHANGE_DEFAULT 0x00000008
-#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_1_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_2_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_3_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_Y_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUF_4_ADDR_C_OFFSET_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_BUFMGR_VCE_CONTROL_DEFAULT 0x000f0000
-#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_LATENCY_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_NB_PSTATE_CONTROL_DEFAULT 0x00000040
-#define mmMCIF_WB2_MCIF_WB_WATERMARK_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_CLOCK_GATER_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WB2_MCIF_WB_WARM_UP_CNTL_DEFAULT 0x00001000
-#define mmMCIF_WB2_MCIF_WB_SELF_REFRESH_CONTROL_DEFAULT 0x00000002
-#define mmMCIF_WB2_MULTI_LEVEL_QOS_CTRL_DEFAULT 0x00000080
-#define mmMCIF_WB2_MCIF_WB_BUF_LUMA_SIZE_DEFAULT 0x000fffff
-#define mmMCIF_WB2_MCIF_WB_BUF_CHROMA_SIZE_DEFAULT 0x000fffff
-
-
-// addressBlock: dce_dc_cwb0_dispdec
-#define mmCWB0_CWB_CTRL_DEFAULT 0x00000110
-#define mmCWB0_CWB_FENCE_PAR0_DEFAULT 0x03ff03ff
-#define mmCWB0_CWB_FENCE_PAR1_DEFAULT 0x000102ff
-#define mmCWB0_CWB_CRC_CTRL_DEFAULT 0x00000000
-#define mmCWB0_CWB_CRC_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmCWB0_CWB_CRC_BLUE_MASK_DEFAULT 0x0000ffff
-#define mmCWB0_CWB_CRC_RED_GREEN_RESULT_DEFAULT 0x00000000
-#define mmCWB0_CWB_CRC_BLUE_RESULT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_cwb1_dispdec
-#define mmCWB1_CWB_CTRL_DEFAULT 0x00000110
-#define mmCWB1_CWB_FENCE_PAR0_DEFAULT 0x03ff03ff
-#define mmCWB1_CWB_FENCE_PAR1_DEFAULT 0x000102ff
-#define mmCWB1_CWB_CRC_CTRL_DEFAULT 0x00000000
-#define mmCWB1_CWB_CRC_RED_GREEN_MASK_DEFAULT 0xffffffff
-#define mmCWB1_CWB_CRC_BLUE_MASK_DEFAULT 0x0000ffff
-#define mmCWB1_CWB_CRC_RED_GREEN_RESULT_DEFAULT 0x00000000
-#define mmCWB1_CWB_CRC_BLUE_RESULT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon9_dispdec
-#define mmDC_PERFMON9_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON9_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON9_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dispdec
-#define mmVGA_MEM_WRITE_PAGE_ADDR_DEFAULT 0x00000000
-#define mmVGA_MEM_READ_PAGE_ADDR_DEFAULT 0x00000000
-#define mmVGA_RENDER_CONTROL_DEFAULT 0x0000000f
-#define mmVGA_SEQUENCER_RESET_CONTROL_DEFAULT 0x00003f3f
-#define mmVGA_MODE_CONTROL_DEFAULT 0x00000000
-#define mmVGA_SURFACE_PITCH_SELECT_DEFAULT 0x00000002
-#define mmVGA_MEMORY_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmVGA_DISPBUF1_SURFACE_ADDR_DEFAULT 0x00000000
-#define mmVGA_DISPBUF2_SURFACE_ADDR_DEFAULT 0x00000000
-#define mmVGA_MEMORY_BASE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmVGA_HDP_CONTROL_DEFAULT 0x00000000
-#define mmVGA_CACHE_CONTROL_DEFAULT 0x00000000
-#define mmD1VGA_CONTROL_DEFAULT 0x00000000
-#define mmD2VGA_CONTROL_DEFAULT 0x00000000
-#define mmVGA_STATUS_DEFAULT 0x00000000
-#define mmVGA_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmVGA_STATUS_CLEAR_DEFAULT 0x00000000
-#define mmVGA_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmVGA_MAIN_CONTROL_DEFAULT 0x00005018
-#define mmVGA_TEST_CONTROL_DEFAULT 0x00000000
-#define mmVGA_QOS_CTRL_DEFAULT 0x00000000
-#define mmCRTC8_IDX_DEFAULT 0x00000000
-#define mmCRTC8_DATA_DEFAULT 0x00000000
-#define mmGENFC_WT_DEFAULT 0x00000000
-#define mmGENS1_DEFAULT 0x00000000
-#define mmATTRDW_DEFAULT 0x00000000
-#define mmATTRX_DEFAULT 0x00000000
-#define mmATTRDR_DEFAULT 0x00000000
-#define mmGENMO_WT_DEFAULT 0x00000000
-#define mmGENS0_DEFAULT 0x00000000
-#define mmGENENB_DEFAULT 0x00000000
-#define mmSEQ8_IDX_DEFAULT 0x00000000
-#define mmSEQ8_DATA_DEFAULT 0x00000000
-#define mmDAC_MASK_DEFAULT 0x00000000
-#define mmDAC_R_INDEX_DEFAULT 0x00000000
-#define mmDAC_W_INDEX_DEFAULT 0x00000000
-#define mmDAC_DATA_DEFAULT 0x00000000
-#define mmGENFC_RD_DEFAULT 0x00000000
-#define mmGENMO_RD_DEFAULT 0x00000000
-#define mmGRPH8_IDX_DEFAULT 0x00000000
-#define mmGRPH8_DATA_DEFAULT 0x00000000
-#define mmCRTC8_IDX_1_DEFAULT 0x00000000
-#define mmCRTC8_DATA_1_DEFAULT 0x00000000
-#define mmGENFC_WT_1_DEFAULT 0x00000000
-#define mmGENS1_1_DEFAULT 0x00000000
-#define mmD3VGA_CONTROL_DEFAULT 0x00000000
-#define mmD4VGA_CONTROL_DEFAULT 0x00000000
-#define mmD5VGA_CONTROL_DEFAULT 0x00000000
-#define mmD6VGA_CONTROL_DEFAULT 0x00000000
-#define mmVGA_SOURCE_SELECT_DEFAULT 0x00000100
-#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLC_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPHYPLLD_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCFEV0_CRTC_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCFEV1_CRTC_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmSYMCLKLPA_CLOCK_ENABLE_DEFAULT 0x00000000
-#define mmSYMCLKLPB_CLOCK_ENABLE_DEFAULT 0x00000100
-#define mmDPREFCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmREFCLK_CNTL_DEFAULT 0x00000000
-#define mmMIPI_CLK_CNTL_DEFAULT 0x00000000
-#define mmREFCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmPHYPLLE_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDSICLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmDCCG_CBUS_WRCMD_DELAY_DEFAULT 0x00000003
-#define mmDCCG_DS_DTO_INCR_DEFAULT 0x00000000
-#define mmDCCG_DS_DTO_MODULO_DEFAULT 0x00000000
-#define mmDCCG_DS_CNTL_DEFAULT 0x00000000
-#define mmDCCG_DS_HW_CAL_INTERVAL_DEFAULT 0x00989680
-#define mmSYMCLKG_CLOCK_ENABLE_DEFAULT 0x00000600
-#define mmDPREFCLK_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK0_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK1_CNTL_DEFAULT 0x00000000
-#define mmAOMCLK2_CNTL_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO2_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO2_MODULO_DEFAULT 0x00000001
-#define mmDCE_VERSION_DEFAULT 0x00000000
-#define mmPHYPLLG_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_GTC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_GTC_DTO_INCR_DEFAULT 0x00000000
-#define mmDCCG_GTC_DTO_MODULO_DEFAULT 0x00000000
-#define mmDCCG_GTC_CURRENT_DEFAULT 0x00000000
-#define mmDENTIST_DISPCLK_CNTL_DEFAULT 0x64010064
-#define mmMIPI_DTO_CNTL_DEFAULT 0x00000000
-#define mmMIPI_DTO_PHASE_DEFAULT 0x00000000
-#define mmMIPI_DTO_MODULO_DEFAULT 0x00000000
-#define mmDAC_CLK_ENABLE_DEFAULT 0x00000000
-#define mmDVO_CLK_ENABLE_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_WRITE_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_CONTROL_DEFAULT 0x00000000
-#define mmDMCU_SMU_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmSMU_CONTROL_DEFAULT 0x00000000
-#define mmSMU_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmAVSYNC_COUNTER_READ_DEFAULT 0x00000000
-#define mmMILLISECOND_TIME_BASE_DIV_DEFAULT 0x001186a0
-#define mmDISPCLK_FREQ_CHANGE_CNTL_DEFAULT 0x08010028
-#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_DEFAULT 0x00000001
-#define mmDCCG_PERFMON_CNTL_DEFAULT 0xfffff800
-#define mmDCCG_GATE_DISABLE_CNTL_DEFAULT 0x74ee00fd
-#define mmDISPCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmSCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmDCCG_CAC_STATUS_DEFAULT 0x00000000
-#define mmPIXCLK1_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPIXCLK2_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmPIXCLK0_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmMICROSECOND_TIME_BASE_DIV_DEFAULT 0x00120464
-#define mmDCCG_GATE_DISABLE_CNTL2_DEFAULT 0x037f037f
-#define mmSYMCLK_CGTT_BLK_CTRL_REG_DEFAULT 0x00000200
-#define mmPHYPLLF_PIXCLK_RESYNC_CNTL_DEFAULT 0x00000000
-#define mmDCCG_DISP_CNTL_REG_DEFAULT 0x00000000
-#define mmCRTC0_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO0_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO0_MODULO_DEFAULT 0x00000000
-#define mmCRTC0_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO1_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO1_MODULO_DEFAULT 0x00000000
-#define mmCRTC1_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO2_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO2_MODULO_DEFAULT 0x00000000
-#define mmCRTC2_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO3_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO3_MODULO_DEFAULT 0x00000000
-#define mmCRTC3_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO4_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO4_MODULO_DEFAULT 0x00000000
-#define mmCRTC4_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP_DTO5_PHASE_DEFAULT 0x00000000
-#define mmDP_DTO5_MODULO_DEFAULT 0x00000000
-#define mmCRTC5_PHYPLL_PIXEL_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCCG_SOFT_RESET_DEFAULT 0x00000000
-#define mmSYMCLKA_CLOCK_ENABLE_DEFAULT 0x00000000
-#define mmSYMCLKB_CLOCK_ENABLE_DEFAULT 0x00000100
-#define mmSYMCLKC_CLOCK_ENABLE_DEFAULT 0x00000200
-#define mmSYMCLKD_CLOCK_ENABLE_DEFAULT 0x00000300
-#define mmSYMCLKE_CLOCK_ENABLE_DEFAULT 0x00000400
-#define mmSYMCLKF_CLOCK_ENABLE_DEFAULT 0x00000500
-#define mmDVOACLKD_CNTL_DEFAULT 0x00070000
-#define mmDVOACLKC_MVP_CNTL_DEFAULT 0x00030000
-#define mmDVOACLKC_CNTL_DEFAULT 0x00030000
-#define mmDCCG_AUDIO_DTO_SOURCE_DEFAULT 0x00000030
-#define mmDCCG_AUDIO_DTO0_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO0_MODULE_DEFAULT 0x00000001
-#define mmDCCG_AUDIO_DTO1_PHASE_DEFAULT 0x00000000
-#define mmDCCG_AUDIO_DTO1_MODULE_DEFAULT 0x00000001
-#define mmDCCG_TEST_CLK_SEL_DEFAULT 0x01ff01ff
-#define mmFBC_CNTL_DEFAULT 0x00000500
-#define mmFBC_IDLE_FORCE_CLEAR_MASK_DEFAULT 0x00000000
-#define mmFBC_START_STOP_DELAY_DEFAULT 0x00000000
-#define mmFBC_COMP_CNTL_DEFAULT 0x0000000f
-#define mmFBC_COMP_MODE_DEFAULT 0x00000000
-#define mmFBC_IND_LUT0_DEFAULT 0x00000000
-#define mmFBC_IND_LUT1_DEFAULT 0x00000000
-#define mmFBC_IND_LUT2_DEFAULT 0x00000000
-#define mmFBC_IND_LUT3_DEFAULT 0x00000000
-#define mmFBC_IND_LUT4_DEFAULT 0x00000000
-#define mmFBC_IND_LUT5_DEFAULT 0x00000000
-#define mmFBC_IND_LUT6_DEFAULT 0x00000000
-#define mmFBC_IND_LUT7_DEFAULT 0x00000000
-#define mmFBC_IND_LUT8_DEFAULT 0x00000000
-#define mmFBC_IND_LUT9_DEFAULT 0x00000000
-#define mmFBC_IND_LUT10_DEFAULT 0x00000000
-#define mmFBC_IND_LUT11_DEFAULT 0x00000000
-#define mmFBC_IND_LUT12_DEFAULT 0x00000000
-#define mmFBC_IND_LUT13_DEFAULT 0x00000000
-#define mmFBC_IND_LUT14_DEFAULT 0x00000000
-#define mmFBC_IND_LUT15_DEFAULT 0x00000000
-#define mmFBC_CSM_REGION_OFFSET_01_DEFAULT 0x00000000
-#define mmFBC_CSM_REGION_OFFSET_23_DEFAULT 0x00000000
-#define mmFBC_CLIENT_REGION_MASK_DEFAULT 0x00000000
-#define mmFBC_DEBUG_COMP_DEFAULT 0x00000000
-#define mmFBC_MISC_DEFAULT 0x0c306008
-#define mmFBC_STATUS_DEFAULT 0x00000000
-#define mmFBC_ALPHA_CNTL_DEFAULT 0x00000000
-#define mmFBC_ALPHA_RGB_OVERRIDE_DEFAULT 0x00000000
-#define mmPIPE0_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE0_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE0_PG_STATUS_DEFAULT 0x00000000
-#define mmPIPE1_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE1_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE1_PG_STATUS_DEFAULT 0x00000000
-#define mmPIPE2_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE2_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE2_PG_STATUS_DEFAULT 0x00000000
-#define mmPIPE3_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE3_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE3_PG_STATUS_DEFAULT 0x00000000
-#define mmPIPE4_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE4_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE4_PG_STATUS_DEFAULT 0x00000000
-#define mmPIPE5_PG_CONFIG_DEFAULT 0x00000001
-#define mmPIPE5_PG_ENABLE_DEFAULT 0x00000000
-#define mmPIPE5_PG_STATUS_DEFAULT 0x00000000
-#define mmDSI_PG_CONFIG_DEFAULT 0x00000001
-#define mmDSI_PG_ENABLE_DEFAULT 0x00000000
-#define mmDSI_PG_STATUS_DEFAULT 0x00000000
-#define mmDCFEV0_PG_CONFIG_DEFAULT 0x00000001
-#define mmDCFEV0_PG_ENABLE_DEFAULT 0x00000000
-#define mmDCFEV0_PG_STATUS_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCPG_INTERRUPT_CONTROL2_DEFAULT 0x00000000
-#define mmDCFEV1_PG_CONFIG_DEFAULT 0x00000001
-#define mmDCFEV1_PG_ENABLE_DEFAULT 0x00000000
-#define mmDCFEV1_PG_STATUS_DEFAULT 0x00000000
-#define mmDC_IP_REQUEST_CNTL_DEFAULT 0x00000000
-#define mmDC_PGCNTL_STATUS_REG_DEFAULT 0x00000000
-#define mmDMIFV_STATUS_DEFAULT 0x00000000
-#define mmDMIF_CONTROL_DEFAULT 0x00000c04
-#define mmDMIF_STATUS_DEFAULT 0x0ff00000
-#define mmDMIF_ARBITRATION_CONTROL_DEFAULT 0x00042710
-#define mmPIPE0_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE1_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE2_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE3_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE4_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE5_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmDMIF_P_VMID_DEFAULT 0x00000000
-#define mmDMIF_ADDR_CALC_DEFAULT 0x00000000
-#define mmDMIF_STATUS2_DEFAULT 0x00000000
-#define mmPIPE0_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE1_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE2_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE3_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE4_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE5_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmLOW_POWER_TILING_CONTROL_DEFAULT 0x00001000
-#define mmMCIF_CONTROL_DEFAULT 0x00000000
-#define mmMCIF_WRITE_COMBINE_CONTROL_DEFAULT 0x00000080
-#define mmMCIF_PHASE0_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmCC_DC_PIPE_DIS_DEFAULT 0x00000000
-#define mmSMU_WM_CONTROL_DEFAULT 0x00000000
-#define mmRBBMIF_TIMEOUT_DEFAULT 0x20000a00
-#define mmRBBMIF_STATUS_DEFAULT 0x80000000
-#define mmRBBMIF_TIMEOUT_DIS_DEFAULT 0x00000000
-#define mmDCI_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCI_MEM_PWR_STATUS2_DEFAULT 0x00000000
-#define mmDCI_CLK_CNTL_DEFAULT 0x00000000
-#define mmDCI_CLK_CNTL2_DEFAULT 0x00020020
-#define mmDCI_MEM_PWR_CNTL_DEFAULT 0x00000000
-#define mmDCI_MEM_PWR_CNTL2_DEFAULT 0x00000000
-#define mmDCI_MEM_PWR_CNTL3_DEFAULT 0x00000000
-#define mmPIPE0_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmPIPE1_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmPIPE2_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmPIPE3_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmPIPE4_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmPIPE5_DMIF_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmRBBMIF_STATUS_FLAG_DEFAULT 0x00000000
-#define mmDCI_SOFT_RESET_DEFAULT 0x00000000
-#define mmDMIF_URG_OVERRIDE_DEFAULT 0x00000000
-#define mmPIPE6_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE7_ARBITRATION_CONTROL3_DEFAULT 0x00000000
-#define mmPIPE6_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmPIPE7_MAX_REQUESTS_DEFAULT 0x000003ff
-#define mmDVMM_REG_RD_STATUS_DEFAULT 0x00000000
-#define mmDVMM_REG_RD_DATA_DEFAULT 0x00000000
-#define mmDVMM_PTE_REQ_DEFAULT 0x000120ff
-#define mmDVMM_CNTL_DEFAULT 0x00000000
-#define mmDVMM_FAULT_STATUS_DEFAULT 0x00000000
-#define mmDVMM_FAULT_ADDR_DEFAULT 0x00000000
-#define mmFMON_CTRL_DEFAULT 0x0000f040
-#define mmDVMM_PTE_PGMEM_CONTROL_DEFAULT 0x00000000
-#define mmDVMM_PTE_PGMEM_STATE_DEFAULT 0x00000000
-#define mmMCIF_PHASE1_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmMCIF_PHASE2_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmMCIF_WB_PHASE0_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmMCIF_WB_PHASE1_OUTSTANDING_COUNTER_DEFAULT 0x00000000
-#define mmDCI_MEM_PWR_CNTL4_DEFAULT 0x0000003f
-#define mmMCIF_WB_MISC_CTRL_DEFAULT 0x00010001
-#define mmDCI_MEM_PWR_STATUS3_DEFAULT 0x00000000
-#define mmDMIF_CURSOR_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_CURSOR_MEM_CONTROL_DEFAULT 0x00000000
-#define mmDCHUB_FB_LOCATION_DEFAULT 0x00000000
-#define mmDCHUB_FB_OFFSET_DEFAULT 0x00000000
-#define mmDCHUB_AGP_BASE_DEFAULT 0x00000000
-#define mmDCHUB_AGP_BOT_DEFAULT 0x00000000
-#define mmDCHUB_AGP_TOP_DEFAULT 0x00000000
-#define mmDCHUB_DRAM_APER_BASE_DEFAULT 0x00000000
-#define mmDCHUB_DRAM_APER_DEF_DEFAULT 0x00000000
-#define mmDCHUB_DRAM_APER_TOP_DEFAULT 0x00000000
-#define mmDCHUB_CONTROL_STATUS_DEFAULT 0x00c00000
-#define mmWB_ENABLE_DEFAULT 0x00000000
-#define mmWB_EC_CONFIG_DEFAULT 0x55000000
-#define mmCNV_MODE_DEFAULT 0x00000000
-#define mmCNV_WINDOW_START_DEFAULT 0x00000000
-#define mmCNV_WINDOW_SIZE_DEFAULT 0x00100010
-#define mmCNV_UPDATE_DEFAULT 0x00000000
-#define mmCNV_SOURCE_SIZE_DEFAULT 0x00100010
-#define mmCNV_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCNV_CSC_C11_C12_DEFAULT 0x00000000
-#define mmCNV_CSC_C13_C14_DEFAULT 0x00000000
-#define mmCNV_CSC_C21_C22_DEFAULT 0x00000000
-#define mmCNV_CSC_C23_C24_DEFAULT 0x00000000
-#define mmCNV_CSC_C31_C32_DEFAULT 0x00000000
-#define mmCNV_CSC_C33_C34_DEFAULT 0x00000000
-#define mmCNV_CSC_ROUND_OFFSET_R_DEFAULT 0x00000000
-#define mmCNV_CSC_ROUND_OFFSET_G_DEFAULT 0x00000000
-#define mmCNV_CSC_ROUND_OFFSET_B_DEFAULT 0x00000000
-#define mmCNV_CSC_CLAMP_R_DEFAULT 0x00000fff
-#define mmCNV_CSC_CLAMP_G_DEFAULT 0x00000fff
-#define mmCNV_CSC_CLAMP_B_DEFAULT 0x00000fff
-#define mmCNV_TEST_CNTL_DEFAULT 0x00000000
-#define mmCNV_TEST_CRC_RED_DEFAULT 0x0000fff0
-#define mmCNV_TEST_CRC_GREEN_DEFAULT 0x0000fff0
-#define mmCNV_TEST_CRC_BLUE_DEFAULT 0x0000fff0
-#define mmCNV_INPUT_SELECT_DEFAULT 0x00000000
-#define mmWB_SOFT_RESET_DEFAULT 0x00000000
-#define mmWB_WARM_UP_MODE_CTL1_DEFAULT 0x88700100
-#define mmWB_WARM_UP_MODE_CTL2_DEFAULT 0x00000100
-#define mmWBSCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmWBSCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmWBSCL_MODE_DEFAULT 0x00000000
-#define mmWBSCL_TAP_CONTROL_DEFAULT 0x00001111
-#define mmWBSCL_DEST_SIZE_DEFAULT 0x00010001
-#define mmWBSCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL_HORZ_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL_HORZ_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00080000
-#define mmWBSCL_VERT_FILTER_INIT_Y_RGB_DEFAULT 0x01000000
-#define mmWBSCL_VERT_FILTER_INIT_CBCR_DEFAULT 0x01000000
-#define mmWBSCL_ROUND_OFFSET_DEFAULT 0x00800010
-#define mmWBSCL_CLAMP_DEFAULT 0x01fe01fe
-#define mmWBSCL_OVERFLOW_STATUS_DEFAULT 0x00000000
-#define mmWBSCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmWBSCL_OUTSIDE_PIX_STRATEGY_DEFAULT 0x80108000
-#define mmWBSCL_TEST_CNTL_DEFAULT 0x00000000
-#define mmWBSCL_TEST_CRC_RED_DEFAULT 0x0000ff00
-#define mmWBSCL_TEST_CRC_GREEN_DEFAULT 0x0000ffff
-#define mmWBSCL_TEST_CRC_BLUE_DEFAULT 0x0000ff00
-#define mmWBSCL_BACKPRESSURE_CNT_EN_DEFAULT 0x00000000
-#define mmWB_MCIF_BACKPRESSURE_CNT_DEFAULT 0x00000000
-#define mmWBSCL_RAM_SHUTDOWN_DEFAULT 0x00000000
-#define mmDMCU_CTRL_DEFAULT 0xffff0101
-#define mmDMCU_STATUS_DEFAULT 0x00000001
-#define mmDMCU_PC_START_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_START_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_END_ADDR_DEFAULT 0x00000000
-#define mmDMCU_FW_ISR_START_ADDR_DEFAULT 0x00000004
-#define mmDMCU_FW_CS_HI_DEFAULT 0x00000000
-#define mmDMCU_FW_CS_LO_DEFAULT 0x00000000
-#define mmDMCU_RAM_ACCESS_CTRL_DEFAULT 0x00000000
-#define mmDMCU_ERAM_WR_CTRL_DEFAULT 0x000f0000
-#define mmDMCU_ERAM_WR_DATA_DEFAULT 0x00000000
-#define mmDMCU_ERAM_RD_CTRL_DEFAULT 0x000f0000
-#define mmDMCU_ERAM_RD_DATA_DEFAULT 0x00000000
-#define mmDMCU_IRAM_WR_CTRL_DEFAULT 0x00000000
-#define mmDMCU_IRAM_WR_DATA_DEFAULT 0x00000000
-#define mmDMCU_IRAM_RD_CTRL_DEFAULT 0x00000000
-#define mmDMCU_IRAM_RD_DATA_DEFAULT 0x00000000
-#define mmDMCU_EVENT_TRIGGER_DEFAULT 0x00000000
-#define mmDMCU_UC_INTERNAL_INT_STATUS_DEFAULT 0x00000000
-#define mmDMCU_SS_INTERRUPT_CNTL_STATUS_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_DEFAULT 0x00000000
-#define mmDC_DMCU_SCRATCH_DEFAULT 0x00000000
-#define mmDMCU_INT_CNT_DEFAULT 0x00000000
-#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS_DEFAULT 0x00000000
-#define mmDMCU_UC_CLK_GATING_CNTL_DEFAULT 0x00010102
-#define mmMASTER_COMM_DATA_REG1_DEFAULT 0x00000000
-#define mmMASTER_COMM_DATA_REG2_DEFAULT 0x00000000
-#define mmMASTER_COMM_DATA_REG3_DEFAULT 0x00000000
-#define mmMASTER_COMM_CMD_REG_DEFAULT 0x00000000
-#define mmMASTER_COMM_CNTL_REG_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG1_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG2_DEFAULT 0x00000000
-#define mmSLAVE_COMM_DATA_REG3_DEFAULT 0x00000000
-#define mmSLAVE_COMM_CMD_REG_DEFAULT 0x00000000
-#define mmSLAVE_COMM_CNTL_REG_DEFAULT 0x00000000
-#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL_DEFAULT 0x00000000
-#define mmBL1_PWM_USER_LEVEL_DEFAULT 0x00000000
-#define mmBL1_PWM_TARGET_ABM_LEVEL_DEFAULT 0x00000000
-#define mmBL1_PWM_CURRENT_ABM_LEVEL_DEFAULT 0x00000000
-#define mmBL1_PWM_FINAL_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmBL1_PWM_MINIMUM_DUTY_CYCLE_DEFAULT 0x00000000
-#define mmBL1_PWM_ABM_CNTL_DEFAULT 0x00000000
-#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmBL1_PWM_GRP2_REG_LOCK_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1_DEFAULT 0x00000000
-#define mmDMCU_INTERRUPT_STATUS_1_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_STATUS1_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1_DEFAULT 0x00000000
-#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_DEFAULT 0x00000000
-#define mmDC_ABM1_CNTL_DEFAULT 0x00000000
-#define mmDC_ABM1_IPCSC_COEFF_SEL_DEFAULT 0x00000000
-#define mmDC_ABM1_ACE_OFFSET_SLOPE_0_DEFAULT 0x00000400
-#define mmDC_ABM1_ACE_OFFSET_SLOPE_1_DEFAULT 0x00000400
-#define mmDC_ABM1_ACE_OFFSET_SLOPE_2_DEFAULT 0x00000400
-#define mmDC_ABM1_ACE_OFFSET_SLOPE_3_DEFAULT 0x00000400
-#define mmDC_ABM1_ACE_OFFSET_SLOPE_4_DEFAULT 0x00000400
-#define mmDC_ABM1_ACE_THRES_12_DEFAULT 0x00000000
-#define mmDC_ABM1_ACE_THRES_34_DEFAULT 0x00000000
-#define mmDC_ABM1_ACE_CNTL_MISC_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS5_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_STATUS4_DEFAULT 0x00000000
-#define mmDC_ABM1_HGLS_REG_READ_PROGRESS_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_MISC_CTRL_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_SUM_OF_LUMA_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_PIXEL_COUNT_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_OVR_SCAN_BIN_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmDC_ABM1_LS_SAMPLE_RATE_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_1_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_2_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_3_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_4_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_5_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_6_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_7_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_8_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_9_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_10_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_11_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_12_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_13_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_14_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_15_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_16_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_17_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_18_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_19_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_20_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_21_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_22_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_23_DEFAULT 0x00000000
-#define mmDC_ABM1_HG_RESULT_24_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3_DEFAULT 0x00000000
-#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4_DEFAULT 0x00000000
-#define mmDC_ABM1_OVERSCAN_PIXEL_VALUE_DEFAULT 0x00000000
-#define mmDC_ABM1_BL_MASTER_LOCK_DEFAULT 0x00000000
-#define mmAZALIA_CONTROLLER_CLOCK_GATING_DEFAULT 0x00000000
-#define mmAZALIA_AUDIO_DTO_DEFAULT 0x001b0018
-#define mmAZALIA_AUDIO_DTO_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_SOCCLK_CONTROL_DEFAULT 0x00000001
-#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_DEFAULT 0x00000000
-#define mmAZALIA_DATA_DMA_CONTROL_DEFAULT 0x0000000a
-#define mmAZALIA_BDL_DMA_CONTROL_DEFAULT 0x0000000a
-#define mmAZALIA_RIRB_AND_DP_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_CORB_DMA_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_DEFAULT 0x00000000
-#define mmAZALIA_CYCLIC_BUFFER_SYNC_DEFAULT 0x00000000
-#define mmAZALIA_GLOBAL_CAPABILITIES_DEFAULT 0x00000000
-#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000060
-#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_DEFAULT 0x00080008
-#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_DEFAULT 0x00000080
-#define mmAZALIA_INPUT_CRC0_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC0_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_INPUT_CRC1_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_CRC0_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL0_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL1_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL2_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_CONTROL3_DEFAULT 0x00000000
-#define mmAZALIA_CRC1_RESULT_DEFAULT 0x00000000
-#define mmAZALIA_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmAZALIA_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_DEFAULT 0x1002aa01
-#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_DEFAULT 0x00100700
-#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_DEFAULT 0x0000000d
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_DEFAULT 0x00000001
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_DEFAULT 0xc0000009
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_DEFAULT 0x00000200
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_DEFAULT 0x00000000
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_DEFAULT 0x00aa0100
-#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_DEFAULT 0x00000000
-#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET0_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET1_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET2_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET3_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET4_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET5_DEFAULT 0x00000000
-#define mmAZALIA_F0_GTC_GROUP_OFFSET6_DEFAULT 0x00000000
-#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_DEFAULT 0x00000000
-#define mmDAC_ENABLE_DEFAULT 0x00000004
-#define mmDAC_SOURCE_SELECT_DEFAULT 0x00000000
-#define mmDAC_CRC_EN_DEFAULT 0x00000000
-#define mmDAC_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDAC_CRC_SIG_RGB_MASK_DEFAULT 0x3fffffff
-#define mmDAC_CRC_SIG_CONTROL_MASK_DEFAULT 0x0000003f
-#define mmDAC_CRC_SIG_RGB_DEFAULT 0x3fffffff
-#define mmDAC_CRC_SIG_CONTROL_DEFAULT 0x0000003f
-#define mmDAC_SYNC_TRISTATE_CONTROL_DEFAULT 0x00000000
-#define mmDAC_STEREOSYNC_SELECT_DEFAULT 0x00000000
-#define mmDAC_AUTODETECT_CONTROL_DEFAULT 0x00070000
-#define mmDAC_AUTODETECT_CONTROL2_DEFAULT 0x0000000b
-#define mmDAC_AUTODETECT_CONTROL3_DEFAULT 0x00000519
-#define mmDAC_AUTODETECT_STATUS_DEFAULT 0x00000000
-#define mmDAC_AUTODETECT_INT_CONTROL_DEFAULT 0x00000000
-#define mmDAC_FORCE_OUTPUT_CNTL_DEFAULT 0x00000000
-#define mmDAC_FORCE_DATA_DEFAULT 0x000001e6
-#define mmDAC_POWERDOWN_DEFAULT 0x01010100
-#define mmDAC_CONTROL_DEFAULT 0x00000000
-#define mmDAC_COMPARATOR_ENABLE_DEFAULT 0x00000000
-#define mmDAC_COMPARATOR_OUTPUT_DEFAULT 0x00000000
-#define mmDAC_PWR_CNTL_DEFAULT 0x00000000
-#define mmDAC_DFT_CONFIG_DEFAULT 0x00000000
-#define mmDAC_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_CONTROL_DEFAULT 0x00000000
-#define mmDC_I2C_ARBITRATION_DEFAULT 0x00000001
-#define mmDC_I2C_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDC_I2C_SW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC1_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC2_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC3_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC4_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC5_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC6_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDC1_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC1_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC2_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC2_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC3_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC3_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC4_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC4_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC5_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC5_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_DDC6_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDC6_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION0_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION1_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION2_DEFAULT 0x00000000
-#define mmDC_I2C_TRANSACTION3_DEFAULT 0x00000000
-#define mmDC_I2C_DATA_DEFAULT 0x00000000
-#define mmDC_I2C_DDCVGA_HW_STATUS_DEFAULT 0x00000000
-#define mmDC_I2C_DDCVGA_SPEED_DEFAULT 0x00000002
-#define mmDC_I2C_DDCVGA_SETUP_DEFAULT 0x00000000
-#define mmDC_I2C_EDID_DETECT_CTRL_DEFAULT 0x004001f4
-#define mmDC_I2C_READ_REQUEST_INTERRUPT_DEFAULT 0x40000000
-#define mmGENERIC_I2C_CONTROL_DEFAULT 0x00000000
-#define mmGENERIC_I2C_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmGENERIC_I2C_STATUS_DEFAULT 0x00000000
-#define mmGENERIC_I2C_SPEED_DEFAULT 0x00000002
-#define mmGENERIC_I2C_SETUP_DEFAULT 0x00000000
-#define mmGENERIC_I2C_TRANSACTION_DEFAULT 0x00000000
-#define mmGENERIC_I2C_DATA_DEFAULT 0x00000000
-#define mmGENERIC_I2C_PIN_SELECTION_DEFAULT 0x00000000
-#define mmDCO_SCRATCH0_DEFAULT 0x00000000
-#define mmDCO_SCRATCH1_DEFAULT 0x00000000
-#define mmDCO_SCRATCH2_DEFAULT 0x00000000
-#define mmDCO_SCRATCH3_DEFAULT 0x00000000
-#define mmDCO_SCRATCH4_DEFAULT 0x00000000
-#define mmDCO_SCRATCH5_DEFAULT 0x00000000
-#define mmDCO_SCRATCH6_DEFAULT 0x00000000
-#define mmDCO_SCRATCH7_DEFAULT 0x00000000
-#define mmDCE_VCE_CONTROL_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE2_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE3_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE4_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE5_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE6_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE7_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE8_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE9_DEFAULT 0x00000000
-#define mmDCO_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCO_MEM_PWR_CTRL_DEFAULT 0x6db6d800
-#define mmDCO_MEM_PWR_CTRL2_DEFAULT 0x001b0000
-#define mmDCO_CLK_CNTL_DEFAULT 0x00000000
-#define mmDCO_POWER_MANAGEMENT_CNTL_DEFAULT 0x00000000
-#define mmDIG_SOFT_RESET_2_DEFAULT 0x00000000
-#define mmDCO_STEREOSYNC_SEL_DEFAULT 0x00000000
-#define mmDCO_SOFT_RESET_DEFAULT 0x00000000
-#define mmDIG_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCO_MEM_PWR_STATUS1_DEFAULT 0x00000000
-#define mmDISP_INTERRUPT_STATUS_CONTINUE10_DEFAULT 0x00000000
-#define mmDCO_CLK_CNTL2_DEFAULT 0x00000000
-#define mmDCO_CLK_CNTL3_DEFAULT 0x00000000
-#define mmDCO_HDMI_RXSTATUS_TIMER_CONTROL_DEFAULT 0x00000000
-#define mmDCO_PSP_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCO_PSP_INTERRUPT_CLEAR_DEFAULT 0x00000000
-#define mmDCO_GENERIC_INTERRUPT_MESSAGE_DEFAULT 0x00000000
-#define mmDCO_GENERIC_INTERRUPT_CLEAR_DEFAULT 0x00000000
-#define mmFMT_MEMORY0_CONTROL_DEFAULT 0x00000030
-#define mmFMT_MEMORY1_CONTROL_DEFAULT 0x00000031
-#define mmFMT_MEMORY2_CONTROL_DEFAULT 0x00000032
-#define mmFMT_MEMORY3_CONTROL_DEFAULT 0x00000033
-#define mmFMT_MEMORY4_CONTROL_DEFAULT 0x00000034
-#define mmFMT_MEMORY5_CONTROL_DEFAULT 0x00000035
-#define mmDISP_INTERRUPT_STATUS_CONTINUE11_DEFAULT 0x00000000
-#define mmDC_GENERICA_DEFAULT 0x00000000
-#define mmDC_GENERICB_DEFAULT 0x00000000
-#define mmDC_PAD_EXTERN_SIG_DEFAULT 0x00000000
-#define mmDC_REF_CLK_CNTL_DEFAULT 0x00000000
-#define mmDC_GPIO_DEBUG_DEFAULT 0x00000101
-#define mmUNIPHYA_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYA_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYB_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYB_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYC_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYC_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYD_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYD_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYE_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYE_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYF_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYF_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYG_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYG_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmDCIO_WRCMD_DELAY_DEFAULT 0x00033333
-#define mmDC_DVODATA_CONFIG_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_CNTL_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_STATE_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_REF_DIV_DEFAULT 0x00010000
-#define mmLVTMA_PWRSEQ_DELAY1_DEFAULT 0x00000000
-#define mmLVTMA_PWRSEQ_DELAY2_DEFAULT 0x00000000
-#define mmBL_PWM_CNTL_DEFAULT 0x00000000
-#define mmBL_PWM_CNTL2_DEFAULT 0x00000000
-#define mmBL_PWM_PERIOD_CNTL_DEFAULT 0x00000001
-#define mmBL_PWM_GRP1_REG_LOCK_DEFAULT 0x00000000
-#define mmDCIO_GSL_GENLK_PAD_CNTL_DEFAULT 0x00000000
-#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL_DEFAULT 0x00000000
-#define mmDCIO_GSL0_CNTL_DEFAULT 0x00000000
-#define mmDCIO_GSL1_CNTL_DEFAULT 0x00000000
-#define mmDCIO_GSL2_CNTL_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_START_POSITION_P_FLIP_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_READ_DEFAULT 0x00000000
-#define mmDC_GPU_TIMER_READ_CNTL_DEFAULT 0x00000000
-#define mmDCIO_CLOCK_CNTL_DEFAULT 0x00000000
-#define mmDCO_DCFE_EXT_VSYNC_CNTL_DEFAULT 0x00000000
-#define mmDCIO_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCIO_DPHY_SEL_DEFAULT 0x000000e4
-#define mmUNIPHY_IMPCAL_LINKA_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKB_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_PERIOD_DEFAULT 0x00000000
-#define mmAUXP_IMPCAL_DEFAULT 0x0a000000
-#define mmAUXN_IMPCAL_DEFAULT 0x04000000
-#define mmDCIO_IMPCAL_CNTL_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_AB_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_LINKC_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKD_DEFAULT 0x0f000000
-#define mmDCIO_IMPCAL_CNTL_CD_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_CD_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_LINKE_DEFAULT 0x0f000000
-#define mmUNIPHY_IMPCAL_LINKF_DEFAULT 0x0f000000
-#define mmDCIO_IMPCAL_CNTL_EF_DEFAULT 0x00000000
-#define mmUNIPHY_IMPCAL_PSW_EF_DEFAULT 0x00000000
-#define mmUNIPHYLPA_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYLPB_LINK_CNTL_DEFAULT 0x01100100
-#define mmUNIPHYLPA_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmUNIPHYLPB_CHANNEL_XBAR_CNTL_DEFAULT 0x03020100
-#define mmDCIO_DPCS_TX_INTERRUPT_DEFAULT 0x00000000
-#define mmDCIO_DPCS_RX_INTERRUPT_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE0_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE1_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE2_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE3_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE4_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE5_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE6_DEFAULT 0x00000000
-#define mmDCIO_SEMAPHORE7_DEFAULT 0x00000000
-#define mmDC_GPIO_GENERIC_MASK_DEFAULT 0x04444444
-#define mmDC_GPIO_GENERIC_A_DEFAULT 0x00000000
-#define mmDC_GPIO_GENERIC_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_GENERIC_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DVODATA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC1_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC1_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC2_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC2_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC3_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC3_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC4_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC4_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC5_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC5_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDC6_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDC6_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_MASK_DEFAULT 0xcf400000
-#define mmDC_GPIO_DDCVGA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_DDCVGA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_MASK_DEFAULT 0x00004040
-#define mmDC_GPIO_SYNCA_A_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_SYNCA_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_MASK_DEFAULT 0x10101a10
-#define mmDC_GPIO_GENLK_A_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_GENLK_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_HPD_MASK_DEFAULT 0x44440440
-#define mmDC_GPIO_HPD_A_DEFAULT 0x00000000
-#define mmDC_GPIO_HPD_EN_DEFAULT 0x22220202
-#define mmDC_GPIO_HPD_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_MASK_DEFAULT 0x66404040
-#define mmDC_GPIO_PWRSEQ_A_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_PWRSEQ_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_PAD_STRENGTH_1_DEFAULT 0x47ac470f
-#define mmDC_GPIO_PAD_STRENGTH_2_DEFAULT 0x00472147
-#define mmPHY_AUX_CNTL_DEFAULT 0x00010001
-#define mmDC_GPIO_I2CPAD_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_A_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_I2CPAD_STRENGTH_DEFAULT 0x0000004c
-#define mmDVO_STRENGTH_CONTROL_DEFAULT 0x31116060
-#define mmDVO_VREF_CONTROL_DEFAULT 0x00000000
-#define mmDVO_SKEW_ADJUST_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_MASK_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_A_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_EN_DEFAULT 0x00008000
-#define mmDC_GPIO_I2S_SPDIF_Y_DEFAULT 0x00000000
-#define mmDC_GPIO_I2S_SPDIF_STRENGTH_DEFAULT 0x01021202
-#define mmDC_GPIO_TX12_EN_DEFAULT 0x00000000
-#define mmDC_GPIO_AUX_CTRL_0_DEFAULT 0x00000000
-#define mmDC_GPIO_AUX_CTRL_1_DEFAULT 0x00500000
-#define mmDC_GPIO_AUX_CTRL_2_DEFAULT 0x00000000
-#define mmDC_GPIO_RXEN_DEFAULT 0x007fff7f
-#define mmBPHYC_DAC_MACRO_CNTL_DEFAULT 0x00202002
-#define mmDAC_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmBPHYC_DAC_AUTO_CALIB_CONTROL_DEFAULT 0x00700255
-#define mmDAC_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDAC_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDAC_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDISP_DSI_DUAL_CTRL_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDPRX_AUX_REFERENCE_PULSE_DIV_DEFAULT 0x0a640064
-#define mmDPRX_AUX_CONTROL_DEFAULT 0x01012c00
-#define mmDPRX_AUX_HPD_CONTROL1_DEFAULT 0x00001407
-#define mmDPRX_AUX_HPD_CONTROL2_DEFAULT 0x00000000
-#define mmDPRX_AUX_RX_STATUS_DEFAULT 0x00000000
-#define mmDPRX_AUX_RX_ERROR_MASK_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDPRX_AUX_DPHY_TX_CONTROL_DEFAULT 0x00001002
-#define mmDPRX_AUX_DPHY_RX_CONTROL0_DEFAULT 0x203d1210
-#define mmDPRX_AUX_DPHY_RX_CONTROL1_DEFAULT 0x0a00fa00
-#define mmDPRX_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDPRX_AUX_DMCU_HW_INT_STATUS_DEFAULT 0x00003f00
-#define mmDPRX_AUX_DMCU_HW_INT_ACK_DEFAULT 0x00000000
-#define mmDPRX_AUX_CPU_TO_DMCU_INTERRUPT1_DEFAULT 0x00000000
-#define mmDPRX_AUX_CPU_TO_DMCU_INTERRUPT2_DEFAULT 0x00000001
-#define mmDPRX_AUX_DMCU_TO_CPU_INTERRUPT1_DEFAULT 0x00000000
-#define mmDPRX_AUX_DMCU_TO_CPU_INTERRUPT2_DEFAULT 0x00000000
-#define mmDPRX_AUX_AUX_BUF_INDEX_DEFAULT 0x00000000
-#define mmDPRX_AUX_AUX_BUF_DATA_DEFAULT 0x00000000
-#define mmDPRX_AUX_EDID_INDEX_DEFAULT 0x00000000
-#define mmDPRX_AUX_EDID_DATA_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPCD_INDEX1_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPCD_DATA1_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPCD_INDEX2_DEFAULT 0x00000000
-#define mmDPRX_AUX_DPCD_DATA2_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_INDEX1_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_DATA1_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_INDEX2_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_DATA2_DEFAULT 0x00000000
-#define mmDPRX_AUX_KSV_INDEX1_DEFAULT 0x00000000
-#define mmDPRX_AUX_KSV_DATA1_DEFAULT 0x00000000
-#define mmDPRX_AUX_KSV_INDEX2_DEFAULT 0x00000000
-#define mmDPRX_AUX_KSV_DATA2_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_TIMEOUT_CONTROL_DEFAULT 0x00000032
-#define mmDPRX_AUX_MSG_BUF_CONTROL1_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG_BUF_CONTROL2_DEFAULT 0x00000000
-#define mmDPRX_AUX_SCRATCH1_DEFAULT 0x00000000
-#define mmDPRX_AUX_SCRATCH2_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG1_PENDING_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG2_PENDING_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG3_PENDING_DEFAULT 0x00000000
-#define mmDPRX_AUX_MSG4_PENDING_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LANE_COUNT_SET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_TRAINING_PATTERN_SET_DEFAULT 0x00000003
-#define mmDPRX_DPHY_DPCD_MSTM_CTRL_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE0_SET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE0_STATUS_DEFAULT 0x20000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE1_SET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE1_STATUS_DEFAULT 0x20000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE2_SET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE2_STATUS_DEFAULT 0x20000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE3_SET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DPCD_LINK_QUAL_LANE3_STATUS_DEFAULT 0x20000000
-#define mmDPRX_DPHY_READY_DEFAULT 0x00000000
-#define mmDPRX_DPHY_COMMA_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LANE_ALIGN_ERROR_STATUS_UPDATED_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LANE_ALIGN_STATUS_UPDATED_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_THRESH_A_LANE0_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_A_LANE0_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_B_LANE0_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_C_LANE0_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_THRESH_A_LANE1_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_A_LANE1_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_B_LANE1_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_C_LANE1_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_THRESH_A_LANE2_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_A_LANE2_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_B_LANE2_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_C_LANE2_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_THRESH_A_LANE3_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_A_LANE3_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_B_LANE3_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ERROR_COUNT_C_LANE3_DEFAULT 0x00000000
-#define mmDPRX_DPHY_BS_ERROR_THRESH_GLOBAL_DEFAULT 0x00000000
-#define mmDPRX_DPHY_SR_ERROR_COUNT_A_DEFAULT 0x00000000
-#define mmDPRX_DPHY_BS_ERROR_COUNT_A_DEFAULT 0x00000000
-#define mmDPRX_DPHY_BS_ERROR_COUNT_B_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LANESETUP0_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LANESETUP1_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LFSRADV_DEFAULT 0x00000039
-#define mmDPRX_DPHY_SEVENSYMBOLWINDOW_ERROR_DETECT_DEFAULT 0x00000000
-#define mmDPRX_DPHY_SET_ENABLE_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ECF_LSB_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ECF_MSB_DEFAULT 0x00000000
-#define mmDPRX_DPHY_ENHANCED_FRAME_EN_DEFAULT 0x00000001
-#define mmDPRX_DPHY_MTP_HEADER_COUNT_FORCE_DEFAULT 0x000a6800
-#define mmDPRX_DPHY_DYNAMIC_DESKEW_DATA_DEFAULT 0xbcbcbcbc
-#define mmDPRX_DPHY_DYNAMIC_DESKEW_CONTROL_DEFAULT 0x800071c5
-#define mmDPRX_DPHY_BYPASS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_INT_RESET_DEFAULT 0x00000000
-#define mmDPRX_DPHY_BS_INTERVAL_ERROR_THRESH_EXCEEDED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_SYMBOL_ERROR_THRESH_EXCEEDED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DISPARITY_ERROR_THRESH_EXCEEDED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_TEST_PATTERN_ERROR_THRESH_EXCEEDED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DETECT_SR_LOCK_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LOSS_OF_ALIGN_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_LOSS_OF_DESKEW_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_EXCESSIVE_ERROR_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_DESKEW_FIFO_OVERFLOW_STATUS_DEFAULT 0x00000000
-#define mmDPRX_DPHY_SPARE_DEFAULT 0x00000000
-#define mmDCRX_GATE_DISABLE_CNTL_DEFAULT 0x00001f0f
-#define mmDCRX_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCRX_LIGHT_SLEEP_CNTL_DEFAULT 0x00000101
-#define mmDCRX_DISPCLK_GATE_CNTL_DEFAULT 0x00000200
-#define mmDCRX_CLK_CNTL_DEFAULT 0x00000000
-#define mmDCRX_TEST_CLK_CNTL_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED160_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED161_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED162_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED163_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED164_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED165_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED166_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED167_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED168_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED169_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED170_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED171_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED172_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED173_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED174_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED175_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED176_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED177_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED178_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED179_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED180_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED181_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED182_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED183_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED184_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED185_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED186_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED187_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED188_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED189_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED190_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED191_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED192_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED193_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED194_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED195_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED196_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED197_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED198_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED199_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED200_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED201_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED202_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED203_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED204_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED205_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED206_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED207_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED208_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED209_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED210_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED211_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED212_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED213_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED214_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED215_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED216_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED217_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED218_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED219_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED220_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED221_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED222_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED223_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED224_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED225_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED226_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED227_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED228_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED229_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED230_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED231_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED232_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED233_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED234_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED235_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED236_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED237_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED238_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED239_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED240_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED241_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED242_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED243_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED244_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED245_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED246_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED247_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED248_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED249_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED250_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED251_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED252_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED253_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED254_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED255_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED256_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED257_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED258_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED259_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED260_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED261_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED262_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED263_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED264_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED265_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED266_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED267_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED268_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED269_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED270_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED271_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED272_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED273_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED274_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED275_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED276_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED277_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED278_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED279_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED280_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED281_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED282_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED283_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED284_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED285_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED286_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED287_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED288_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED289_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED290_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED291_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED292_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED293_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED294_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED295_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED296_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED297_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED298_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED299_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED300_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED301_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED302_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED303_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED304_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED305_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED306_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED307_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED308_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED309_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED310_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED311_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED312_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED313_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED314_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED315_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED316_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED317_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED318_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED319_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED320_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED321_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED322_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED323_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED324_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED325_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED326_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED327_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED328_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED329_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED330_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED331_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED332_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED333_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED334_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED335_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED336_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED337_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED338_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED339_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED340_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED341_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED342_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED343_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED344_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED345_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED346_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED347_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED348_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED349_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED350_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED351_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED352_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED353_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED354_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED355_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED356_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED357_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED358_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED359_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED360_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED361_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED362_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED363_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED364_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED365_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED366_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED367_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED368_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED369_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED370_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED371_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED372_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED373_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED374_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED375_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED376_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED377_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED378_DEFAULT 0x00000000
-#define mmDCRX_PHY_MACRO_CNTL_RESERVED379_DEFAULT 0x00000000
-#define mmI2S0_CNTL_DEFAULT 0x00010000
-#define mmSPDIF0_CNTL_DEFAULT 0x00000000
-#define mmI2S1_CNTL_DEFAULT 0x00010000
-#define mmSPDIF1_CNTL_DEFAULT 0x00000000
-#define mmI2S0_STATUS_DEFAULT 0x00000000
-#define mmI2S1_STATUS_DEFAULT 0x00000000
-#define mmI2S0_CRC_TEST_CNTL_DEFAULT 0x00000100
-#define mmI2S0_CRC_TEST_DATA_01_DEFAULT 0x00000000
-#define mmI2S0_CRC_TEST_DATA_23_DEFAULT 0x00000000
-#define mmI2S1_CRC_TEST_CNTL_DEFAULT 0x00000100
-#define mmI2S1_CRC_TEST_DATA_0_DEFAULT 0x00000000
-#define mmSPDIF0_CRC_TEST_CNTL_DEFAULT 0x00000100
-#define mmSPDIF0_CRC_TEST_DATA_0_DEFAULT 0x00000000
-#define mmSPDIF1_CRC_TEST_CNTL_DEFAULT 0x00000100
-#define mmSPDIF1_CRC_TEST_DATA_DEFAULT 0x00000000
-#define mmCRC_I2S_CONT_REPEAT_NUM_DEFAULT 0x00000000
-#define mmCRC_SPDIF_CONT_REPEAT_NUM_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmZCAL_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream0_dispdec
-#define mmAZF0STREAM0_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM0_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream1_dispdec
-#define mmAZF0STREAM1_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM1_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream2_dispdec
-#define mmAZF0STREAM2_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM2_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream3_dispdec
-#define mmAZF0STREAM3_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM3_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream4_dispdec
-#define mmAZF0STREAM4_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM4_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream5_dispdec
-#define mmAZF0STREAM5_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM5_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream6_dispdec
-#define mmAZF0STREAM6_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM6_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream7_dispdec
-#define mmAZF0STREAM7_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM7_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint0_dispdec
-#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint1_dispdec
-#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint2_dispdec
-#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint3_dispdec
-#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint4_dispdec
-#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint5_dispdec
-#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint6_dispdec
-#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0endpoint7_dispdec
-#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream8_dispdec
-#define mmAZF0STREAM8_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM8_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream9_dispdec
-#define mmAZF0STREAM9_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM9_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream10_dispdec
-#define mmAZF0STREAM10_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM10_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream11_dispdec
-#define mmAZF0STREAM11_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM11_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream12_dispdec
-#define mmAZF0STREAM12_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM12_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream13_dispdec
-#define mmAZF0STREAM13_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM13_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream14_dispdec
-#define mmAZF0STREAM14_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM14_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0stream15_dispdec
-#define mmAZF0STREAM15_AZALIA_STREAM_INDEX_DEFAULT 0x00000000
-#define mmAZF0STREAM15_AZALIA_STREAM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint0_dispdec
-#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint1_dispdec
-#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint2_dispdec
-#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint3_dispdec
-#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint4_dispdec
-#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint5_dispdec
-#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint6_dispdec
-#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azf0inputendpoint7_dispdec
-#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_DEFAULT 0x00000000
-#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp0_dispdec
-#define mmDCP0_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP0_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP0_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP0_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP0_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP0_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP0_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP0_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP0_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP0_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP0_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP0_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP0_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP0_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP0_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP0_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP0_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP0_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP0_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP0_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP0_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP0_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP0_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP0_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP0_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP0_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP0_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP0_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP0_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP0_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP0_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP0_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP0_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP0_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP0_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP0_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP0_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP0_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP0_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP0_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP0_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP0_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP0_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP0_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP0_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP0_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP0_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP0_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP0_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP0_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP0_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP0_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP0_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP0_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP0_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP0_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP0_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP0_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP0_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP0_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP0_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP0_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP0_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP0_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP0_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP0_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP0_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP0_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP0_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP0_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP0_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP0_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP0_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP0_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP0_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP0_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP0_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP0_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP0_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP0_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP0_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP0_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP0_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP0_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP0_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb0_dispdec
-#define mmLB0_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB0_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB0_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB0_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB0_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB0_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB0_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB0_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB0_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB0_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB0_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB0_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB0_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB0_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB0_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB0_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB0_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB0_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB0_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB0_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB0_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB0_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB0_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB0_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB0_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB0_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe0_dispdec
-#define mmDCFE0_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE0_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE0_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE0_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE0_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE0_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE0_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon3_dispdec
-#define mmDC_PERFMON3_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON3_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON3_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg0_dispdec
-#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG0_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG0_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl0_dispdec
-#define mmSCL0_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL0_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL0_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL0_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL0_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL0_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL0_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL0_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL0_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL0_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL0_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL0_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL0_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL0_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL0_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL0_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL0_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL0_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL0_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL0_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd0_dispdec
-#define mmBLND0_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND0_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND0_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND0_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND0_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND0_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND0_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc0_dispdec
-#define mmCRTC0_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC0_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC0_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC0_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC0_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC0_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC0_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC0_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC0_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC0_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC0_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC0_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt0_dispdec
-#define mmFMT0_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT0_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT0_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT0_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT0_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT0_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT0_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT0_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT0_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT0_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT0_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT0_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT0_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp1_dispdec
-#define mmDCP1_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP1_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP1_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP1_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP1_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP1_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP1_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP1_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP1_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP1_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP1_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP1_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP1_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP1_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP1_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP1_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP1_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP1_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP1_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP1_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP1_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP1_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP1_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP1_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP1_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP1_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP1_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP1_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP1_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP1_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP1_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP1_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP1_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP1_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP1_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP1_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP1_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP1_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP1_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP1_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP1_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP1_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP1_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP1_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP1_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP1_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP1_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP1_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP1_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP1_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP1_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP1_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP1_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP1_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP1_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP1_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP1_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP1_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP1_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP1_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP1_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP1_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP1_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP1_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP1_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP1_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP1_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP1_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP1_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP1_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP1_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP1_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP1_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP1_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP1_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP1_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP1_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP1_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP1_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP1_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP1_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP1_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP1_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP1_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP1_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb1_dispdec
-#define mmLB1_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB1_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB1_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB1_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB1_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB1_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB1_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB1_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB1_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB1_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB1_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB1_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB1_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB1_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB1_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB1_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB1_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB1_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB1_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB1_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB1_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB1_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB1_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB1_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB1_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB1_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe1_dispdec
-#define mmDCFE1_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE1_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE1_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE1_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE1_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE1_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE1_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon4_dispdec
-#define mmDC_PERFMON4_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON4_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON4_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg1_dispdec
-#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG1_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG1_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl1_dispdec
-#define mmSCL1_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL1_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL1_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL1_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL1_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL1_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL1_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL1_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL1_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL1_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL1_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL1_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL1_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL1_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL1_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL1_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL1_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL1_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL1_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL1_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd1_dispdec
-#define mmBLND1_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND1_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND1_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND1_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND1_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND1_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND1_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc1_dispdec
-#define mmCRTC1_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC1_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC1_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC1_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC1_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC1_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC1_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC1_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC1_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC1_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC1_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC1_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt1_dispdec
-#define mmFMT1_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT1_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT1_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT1_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT1_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT1_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT1_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT1_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT1_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT1_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT1_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT1_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT1_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp2_dispdec
-#define mmDCP2_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP2_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP2_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP2_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP2_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP2_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP2_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP2_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP2_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP2_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP2_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP2_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP2_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP2_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP2_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP2_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP2_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP2_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP2_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP2_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP2_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP2_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP2_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP2_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP2_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP2_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP2_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP2_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP2_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP2_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP2_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP2_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP2_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP2_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP2_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP2_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP2_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP2_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP2_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP2_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP2_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP2_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP2_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP2_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP2_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP2_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP2_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP2_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP2_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP2_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP2_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP2_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP2_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP2_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP2_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP2_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP2_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP2_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP2_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP2_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP2_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP2_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP2_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP2_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP2_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP2_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP2_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP2_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP2_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP2_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP2_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP2_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP2_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP2_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP2_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP2_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP2_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP2_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP2_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP2_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP2_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP2_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP2_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP2_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP2_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb2_dispdec
-#define mmLB2_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB2_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB2_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB2_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB2_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB2_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB2_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB2_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB2_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB2_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB2_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB2_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB2_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB2_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB2_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB2_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB2_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB2_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB2_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB2_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB2_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB2_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB2_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB2_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB2_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB2_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe2_dispdec
-#define mmDCFE2_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE2_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE2_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE2_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE2_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE2_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE2_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon5_dispdec
-#define mmDC_PERFMON5_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON5_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON5_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg2_dispdec
-#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG2_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG2_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl2_dispdec
-#define mmSCL2_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL2_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL2_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL2_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL2_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL2_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL2_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL2_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL2_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL2_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL2_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL2_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL2_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL2_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL2_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL2_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL2_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL2_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL2_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL2_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd2_dispdec
-#define mmBLND2_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND2_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND2_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND2_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND2_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND2_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND2_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc2_dispdec
-#define mmCRTC2_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC2_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC2_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC2_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC2_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC2_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC2_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC2_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC2_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC2_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC2_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC2_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt2_dispdec
-#define mmFMT2_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT2_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT2_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT2_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT2_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT2_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT2_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT2_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT2_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT2_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT2_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT2_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT2_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT2_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp3_dispdec
-#define mmDCP3_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP3_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP3_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP3_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP3_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP3_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP3_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP3_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP3_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP3_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP3_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP3_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP3_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP3_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP3_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP3_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP3_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP3_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP3_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP3_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP3_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP3_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP3_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP3_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP3_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP3_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP3_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP3_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP3_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP3_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP3_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP3_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP3_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP3_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP3_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP3_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP3_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP3_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP3_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP3_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP3_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP3_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP3_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP3_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP3_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP3_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP3_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP3_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP3_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP3_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP3_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP3_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP3_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP3_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP3_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP3_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP3_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP3_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP3_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP3_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP3_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP3_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP3_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP3_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP3_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP3_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP3_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP3_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP3_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP3_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP3_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP3_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP3_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP3_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP3_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP3_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP3_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP3_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP3_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP3_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP3_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP3_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP3_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP3_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP3_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb3_dispdec
-#define mmLB3_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB3_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB3_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB3_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB3_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB3_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB3_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB3_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB3_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB3_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB3_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB3_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB3_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB3_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB3_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB3_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB3_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB3_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB3_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB3_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB3_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB3_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB3_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB3_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB3_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB3_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe3_dispdec
-#define mmDCFE3_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE3_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE3_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE3_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE3_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE3_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE3_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon6_dispdec
-#define mmDC_PERFMON6_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON6_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON6_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg3_dispdec
-#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG3_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG3_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl3_dispdec
-#define mmSCL3_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL3_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL3_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL3_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL3_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL3_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL3_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL3_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL3_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL3_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL3_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL3_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL3_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL3_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL3_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL3_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL3_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL3_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL3_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL3_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd3_dispdec
-#define mmBLND3_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND3_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND3_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND3_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND3_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND3_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND3_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc3_dispdec
-#define mmCRTC3_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC3_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC3_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC3_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC3_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC3_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC3_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC3_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC3_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC3_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC3_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC3_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt3_dispdec
-#define mmFMT3_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT3_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT3_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT3_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT3_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT3_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT3_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT3_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT3_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT3_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT3_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT3_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT3_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT3_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp4_dispdec
-#define mmDCP4_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP4_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP4_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP4_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP4_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP4_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP4_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP4_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP4_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP4_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP4_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP4_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP4_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP4_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP4_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP4_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP4_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP4_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP4_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP4_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP4_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP4_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP4_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP4_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP4_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP4_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP4_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP4_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP4_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP4_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP4_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP4_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP4_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP4_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP4_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP4_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP4_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP4_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP4_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP4_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP4_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP4_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP4_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP4_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP4_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP4_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP4_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP4_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP4_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP4_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP4_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP4_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP4_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP4_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP4_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP4_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP4_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP4_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP4_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP4_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP4_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP4_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP4_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP4_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP4_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP4_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP4_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP4_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP4_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP4_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP4_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP4_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP4_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP4_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP4_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP4_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP4_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP4_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP4_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP4_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP4_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP4_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP4_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP4_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP4_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb4_dispdec
-#define mmLB4_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB4_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB4_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB4_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB4_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB4_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB4_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB4_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB4_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB4_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB4_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB4_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB4_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB4_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB4_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB4_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB4_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB4_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB4_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB4_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB4_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB4_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB4_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB4_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB4_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB4_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe4_dispdec
-#define mmDCFE4_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE4_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE4_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE4_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE4_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE4_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE4_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon7_dispdec
-#define mmDC_PERFMON7_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON7_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON7_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg4_dispdec
-#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG4_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG4_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl4_dispdec
-#define mmSCL4_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL4_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL4_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL4_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL4_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL4_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL4_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL4_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL4_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL4_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL4_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL4_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL4_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL4_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL4_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL4_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL4_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL4_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL4_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL4_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd4_dispdec
-#define mmBLND4_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND4_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND4_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND4_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND4_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND4_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND4_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc4_dispdec
-#define mmCRTC4_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC4_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC4_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC4_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC4_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC4_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC4_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC4_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC4_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC4_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC4_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC4_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt4_dispdec
-#define mmFMT4_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT4_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT4_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT4_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT4_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT4_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT4_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT4_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT4_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT4_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT4_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT4_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT4_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT4_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcp5_dispdec
-#define mmDCP5_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmDCP5_GRPH_CONTROL_DEFAULT 0x20002040
-#define mmDCP5_GRPH_LUT_10BIT_BYPASS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_PITCH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SURFACE_OFFSET_X_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SURFACE_OFFSET_Y_DEFAULT 0x00000000
-#define mmDCP5_GRPH_X_START_DEFAULT 0x00000000
-#define mmDCP5_GRPH_Y_START_DEFAULT 0x00000000
-#define mmDCP5_GRPH_X_END_DEFAULT 0x00000000
-#define mmDCP5_GRPH_Y_END_DEFAULT 0x00000000
-#define mmDCP5_INPUT_GAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmDCP5_GRPH_FLIP_CONTROL_DEFAULT 0x00000020
-#define mmDCP5_GRPH_SURFACE_ADDRESS_INUSE_DEFAULT 0x00000000
-#define mmDCP5_GRPH_DFQ_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_GRPH_DFQ_STATUS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SURFACE_ADDRESS_HIGH_INUSE_DEFAULT 0x00000000
-#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_COMPRESS_PITCH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x000000ff
-#define mmDCP5_PRESCALE_GRPH_CONTROL_DEFAULT 0x00000010
-#define mmDCP5_PRESCALE_VALUES_GRPH_R_DEFAULT 0x20000000
-#define mmDCP5_PRESCALE_VALUES_GRPH_G_DEFAULT 0x20000000
-#define mmDCP5_PRESCALE_VALUES_GRPH_B_DEFAULT 0x20000000
-#define mmDCP5_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_INPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP5_INPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP5_INPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP5_INPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP5_INPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP5_INPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP5_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_OUTPUT_CSC_C11_C12_DEFAULT 0x00002000
-#define mmDCP5_OUTPUT_CSC_C13_C14_DEFAULT 0x00000000
-#define mmDCP5_OUTPUT_CSC_C21_C22_DEFAULT 0x20000000
-#define mmDCP5_OUTPUT_CSC_C23_C24_DEFAULT 0x00000000
-#define mmDCP5_OUTPUT_CSC_C31_C32_DEFAULT 0x00000000
-#define mmDCP5_OUTPUT_CSC_C33_C34_DEFAULT 0x00002000
-#define mmDCP5_COMM_MATRIXA_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP5_COMM_MATRIXA_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXA_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP5_COMM_MATRIXA_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXA_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXA_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP5_COMM_MATRIXB_TRANS_C11_C12_DEFAULT 0x00002000
-#define mmDCP5_COMM_MATRIXB_TRANS_C13_C14_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXB_TRANS_C21_C22_DEFAULT 0x20000000
-#define mmDCP5_COMM_MATRIXB_TRANS_C23_C24_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXB_TRANS_C31_C32_DEFAULT 0x00000000
-#define mmDCP5_COMM_MATRIXB_TRANS_C33_C34_DEFAULT 0x00002000
-#define mmDCP5_DENORM_CONTROL_DEFAULT 0x00000003
-#define mmDCP5_OUT_ROUND_CONTROL_DEFAULT 0x0000000a
-#define mmDCP5_OUT_CLAMP_CONTROL_R_CR_DEFAULT 0x00003fff
-#define mmDCP5_OUT_CLAMP_CONTROL_G_Y_DEFAULT 0x00003fff
-#define mmDCP5_OUT_CLAMP_CONTROL_B_CB_DEFAULT 0x00003fff
-#define mmDCP5_KEY_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_KEY_RANGE_ALPHA_DEFAULT 0x00000000
-#define mmDCP5_KEY_RANGE_RED_DEFAULT 0x00000000
-#define mmDCP5_KEY_RANGE_GREEN_DEFAULT 0x00000000
-#define mmDCP5_KEY_RANGE_BLUE_DEFAULT 0x00000000
-#define mmDCP5_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmDCP5_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmDCP5_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmDCP5_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmDCP5_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmDCP5_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-#define mmDCP5_DCP_SPATIAL_DITHER_CNTL_DEFAULT 0x00000000
-#define mmDCP5_DCP_RANDOM_SEEDS_DEFAULT 0x00000000
-#define mmDCP5_DCP_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmDCP5_CUR_CONTROL_DEFAULT 0x00000810
-#define mmDCP5_CUR_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP5_CUR_SIZE_DEFAULT 0x00000000
-#define mmDCP5_CUR_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP5_CUR_POSITION_DEFAULT 0x00000000
-#define mmDCP5_CUR_HOT_SPOT_DEFAULT 0x00000000
-#define mmDCP5_CUR_COLOR1_DEFAULT 0x00000000
-#define mmDCP5_CUR_COLOR2_DEFAULT 0x00000000
-#define mmDCP5_CUR_UPDATE_DEFAULT 0x00000000
-#define mmDCP5_CUR_REQUEST_FILTER_CNTL_DEFAULT 0x00000000
-#define mmDCP5_CUR_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_RW_MODE_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_VGA_ACCESS_ENABLE_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP5_DC_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_BLACK_OFFSET_BLUE_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_BLACK_OFFSET_GREEN_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_BLACK_OFFSET_RED_DEFAULT 0x00000000
-#define mmDCP5_DC_LUT_WHITE_OFFSET_BLUE_DEFAULT 0x0000ffff
-#define mmDCP5_DC_LUT_WHITE_OFFSET_GREEN_DEFAULT 0x0000ffff
-#define mmDCP5_DC_LUT_WHITE_OFFSET_RED_DEFAULT 0x0000ffff
-#define mmDCP5_DCP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_DCP_CRC_MASK_DEFAULT 0x00000000
-#define mmDCP5_DCP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmDCP5_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmDCP5_DCP_CRC_LAST_DEFAULT 0x00000000
-#define mmDCP5_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmDCP5_GRPH_FLIP_RATE_CNTL_DEFAULT 0x00000000
-#define mmDCP5_DCP_GSL_CONTROL_DEFAULT 0x60000020
-#define mmDCP5_DCP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000035
-#define mmDCP5_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00000200
-#define mmDCP5_HW_ROTATION_DEFAULT 0x00000000
-#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL_DEFAULT 0x00000010
-#define mmDCP5_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmDCP5_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmDCP5_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmDCP5_ALPHA_CONTROL_DEFAULT 0x00000002
-#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_DEFAULT 0x00000000
-#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS_DEFAULT 0x00000000
-#define mmDCP5_GRPH_XDMA_FLIP_TIMEOUT_DEFAULT 0x00000000
-#define mmDCP5_GRPH_XDMA_FLIP_AVG_DELAY_DEFAULT 0x00000000
-#define mmDCP5_GRPH_SURFACE_COUNTER_CONTROL_DEFAULT 0x00000012
-#define mmDCP5_GRPH_SURFACE_COUNTER_OUTPUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_lb5_dispdec
-#define mmLB5_LB_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLB5_LB_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLB5_LB_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLB5_LB_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLB5_LB_VLINE_START_END_DEFAULT 0x00000000
-#define mmLB5_LB_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLB5_LB_V_COUNTER_DEFAULT 0x00000000
-#define mmLB5_LB_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLB5_LB_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLB5_LB_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLB5_LB_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLB5_LB_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLB5_LB_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLB5_LB_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLB5_LB_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLB5_LB_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLB5_LB_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLB5_LB_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLB5_LB_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLB5_LB_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLB5_LB_BUFFER_STATUS_DEFAULT 0x00000002
-#define mmLB5_LB_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-#define mmLB5_MVP_AFR_FLIP_MODE_DEFAULT 0x00000000
-#define mmLB5_MVP_AFR_FLIP_FIFO_CNTL_DEFAULT 0x00000000
-#define mmLB5_MVP_FLIP_LINE_NUM_INSERT_DEFAULT 0x00000002
-#define mmLB5_DC_MVP_LB_CONTROL_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dcfe5_dispdec
-#define mmDCFE5_DCFE_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFE5_DCFE_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFE5_DCFE_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFE5_DCFE_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFE5_DCFE_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFE5_DCFE_MISC_DEFAULT 0x00000001
-#define mmDCFE5_DCFE_FLUSH_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon8_dispdec
-#define mmDC_PERFMON8_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON8_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON8_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmif_pg5_dispdec
-#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL_DEFAULT 0x000bf777
-#define mmDMIF_PG5_DPG_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_PIPE_URGENT_LEVEL_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL2_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_PIPE_LOW_POWER_CONTROL_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIF_PG5_DPG_DVMM_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_scl5_dispdec
-#define mmSCL5_SCL_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCL5_SCL_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCL5_SCL_MODE_DEFAULT 0x00000000
-#define mmSCL5_SCL_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_BYPASS_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL5_SCL_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL5_SCL_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCL5_SCL_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCL5_SCL_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCL5_SCL_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCL5_SCL_UPDATE_DEFAULT 0x00000000
-#define mmSCL5_SCL_F_SHARP_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS_DEFAULT 0x00000000
-#define mmSCL5_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCL5_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCL5_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCL5_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCL5_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCL5_SCL_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCL5_SCL_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCL5_SCL_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCL5_SCL_MODE_CHANGE_MASK_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blnd5_dispdec
-#define mmBLND5_BLND_CONTROL_DEFAULT 0xff0220ff
-#define mmBLND5_BLND_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLND5_BLND_CONTROL2_DEFAULT 0x00000010
-#define mmBLND5_BLND_UPDATE_DEFAULT 0x00000000
-#define mmBLND5_BLND_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLND5_BLND_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLND5_BLND_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtc5_dispdec
-#define mmCRTC5_CRTC_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTC5_CRTC_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VBI_END_DEFAULT 0x00000003
-#define mmCRTC5_CRTC_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CONTROL_DEFAULT 0x80400110
-#define mmCRTC5_CRTC_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTC5_CRTC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTC5_CRTC_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTC5_CRTC_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTC5_CRTC_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTC5_CRTC_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTC5_CRTC_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_GSL_CONTROL_DEFAULT 0x00020000
-#define mmCRTC5_CRTC_RANGE_TIMING_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTC5_CRTC_DRR_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_fmt5_dispdec
-#define mmFMT5_FMT_CLAMP_COMPONENT_R_DEFAULT 0x00000000
-#define mmFMT5_FMT_CLAMP_COMPONENT_G_DEFAULT 0x00000000
-#define mmFMT5_FMT_CLAMP_COMPONENT_B_DEFAULT 0x00000000
-#define mmFMT5_FMT_DYNAMIC_EXP_CNTL_DEFAULT 0x00000000
-#define mmFMT5_FMT_CONTROL_DEFAULT 0x00000000
-#define mmFMT5_FMT_BIT_DEPTH_CONTROL_DEFAULT 0x00600000
-#define mmFMT5_FMT_DITHER_RAND_R_SEED_DEFAULT 0x00000000
-#define mmFMT5_FMT_DITHER_RAND_G_SEED_DEFAULT 0x00000099
-#define mmFMT5_FMT_DITHER_RAND_B_SEED_DEFAULT 0x000000dd
-#define mmFMT5_FMT_CLAMP_CNTL_DEFAULT 0x00000000
-#define mmFMT5_FMT_CRC_CNTL_DEFAULT 0x01000040
-#define mmFMT5_FMT_CRC_SIG_RED_GREEN_MASK_DEFAULT 0x00ff00ff
-#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_MASK_DEFAULT 0x000700ff
-#define mmFMT5_FMT_CRC_SIG_RED_GREEN_DEFAULT 0x00000000
-#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_DEFAULT 0x00000000
-#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmFMT5_FMT_420_HBLANK_EARLY_START_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_unp0_dispdec
-#define mmUNP0_UNP_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmUNP0_UNP_GRPH_CONTROL_DEFAULT 0x0a008008
-#define mmUNP0_UNP_GRPH_CONTROL_C_DEFAULT 0x00008000
-#define mmUNP0_UNP_GRPH_CONTROL_EXP_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PITCH_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_PITCH_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_X_START_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_X_START_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_Y_START_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_Y_START_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_X_END_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_X_END_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_Y_END_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_Y_END_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmUNP0_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x0000ffff
-#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C_DEFAULT 0x00000000
-#define mmUNP0_UNP_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmUNP0_UNP_DVMM_PTE_CONTROL_C_DEFAULT 0x00004000
-#define mmUNP0_UNP_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmUNP0_UNP_DVMM_PTE_ARB_CONTROL_C_DEFAULT 0x00002220
-#define mmUNP0_UNP_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmUNP0_UNP_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00002020
-#define mmUNP0_UNP_FLIP_CONTROL_DEFAULT 0x00000001
-#define mmUNP0_UNP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmUNP0_UNP_CRC_MASK_DEFAULT 0x00000000
-#define mmUNP0_UNP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmUNP0_UNP_CRC_LAST_DEFAULT 0x00000000
-#define mmUNP0_UNP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000100
-#define mmUNP0_UNP_HW_ROTATION_DEFAULT 0x00000010
-
-
-// addressBlock: dce_dc_lbv0_dispdec
-#define mmLBV0_LBV_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLBV0_LBV_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLBV0_LBV_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLBV0_LBV_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLBV0_LBV_VLINE_START_END_DEFAULT 0x00000000
-#define mmLBV0_LBV_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLBV0_LBV_V_COUNTER_DEFAULT 0x00000000
-#define mmLBV0_LBV_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLBV0_LBV_V_COUNTER_CHROMA_DEFAULT 0x00000000
-#define mmLBV0_LBV_SNAPSHOT_V_COUNTER_CHROMA_DEFAULT 0x00000000
-#define mmLBV0_LBV_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLBV0_LBV_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLBV0_LBV_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLBV0_LBV_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLBV0_LBV_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLBV0_LBV_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLBV0_LBV_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLBV0_LBV_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLBV0_LBV_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLBV0_LBV_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLBV0_LBV_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLBV0_LBV_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLBV0_LBV_BUFFER_STATUS_DEFAULT 0x12000002
-#define mmLBV0_LBV_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_sclv0_dispdec
-#define mmSCLV0_SCLV_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MODE_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCLV0_SCLV_UPDATE_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY_C_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_VIEWPORT_SIZE_C_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_MODE_CHANGE_MASK_DEFAULT 0x00000000
-#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-
-
-// addressBlock: dce_dc_col_man0_dispdec
-#define mmCOL_MAN0_COL_MAN_UPDATE_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C11_C12_A_DEFAULT 0x00002000
-#define mmCOL_MAN0_INPUT_CSC_C13_C14_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C21_C22_A_DEFAULT 0x20000000
-#define mmCOL_MAN0_INPUT_CSC_C23_C24_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C31_C32_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C33_C34_A_DEFAULT 0x00002000
-#define mmCOL_MAN0_INPUT_CSC_C11_C12_B_DEFAULT 0x00002000
-#define mmCOL_MAN0_INPUT_CSC_C13_C14_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C21_C22_B_DEFAULT 0x20000000
-#define mmCOL_MAN0_INPUT_CSC_C23_C24_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C31_C32_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_CSC_C33_C34_B_DEFAULT 0x00002000
-#define mmCOL_MAN0_PRESCALE_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_PRESCALE_VALUES_R_DEFAULT 0x20000000
-#define mmCOL_MAN0_PRESCALE_VALUES_G_DEFAULT 0x20000000
-#define mmCOL_MAN0_PRESCALE_VALUES_B_DEFAULT 0x20000000
-#define mmCOL_MAN0_COL_MAN_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_A_DEFAULT 0x00002000
-#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_A_DEFAULT 0x20000000
-#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_A_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_A_DEFAULT 0x00002000
-#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_B_DEFAULT 0x00002000
-#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_B_DEFAULT 0x20000000
-#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_B_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_B_DEFAULT 0x00002000
-#define mmCOL_MAN0_DENORM_CLAMP_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_DENORM_CLAMP_RANGE_R_CR_DEFAULT 0x00000fff
-#define mmCOL_MAN0_DENORM_CLAMP_RANGE_G_Y_DEFAULT 0x00000fff
-#define mmCOL_MAN0_DENORM_CLAMP_RANGE_B_CB_DEFAULT 0x00000fff
-#define mmCOL_MAN0_COL_MAN_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmCOL_MAN0_PACK_FIFO_ERROR_DEFAULT 0x00000000
-#define mmCOL_MAN0_OUTPUT_FIFO_ERROR_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_GAMMA_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_GAMMA_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_GAMMA_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_GAMMA_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCOL_MAN0_INPUT_GAMMA_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL1_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL2_DEFAULT 0x03800000
-#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_B_DEFAULT 0xffff0000
-#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_G_DEFAULT 0xffff0000
-#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_R_DEFAULT 0xffff0000
-#define mmCOL_MAN0_COL_MAN_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCOL_MAN0_COL_MAN_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-
-
-// addressBlock: dce_dc_dcfev0_dispdec
-#define mmDCFEV0_DCFEV_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_DMIFV_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_L_FLUSH_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_C_FLUSH_DEFAULT 0x00000000
-#define mmDCFEV0_DCFEV_MISC_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dc_perfmon11_dispdec
-#define mmDC_PERFMON11_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON11_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON11_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmifv_pg0_dispdec
-#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV0_WATERMARK_MASK_CONTROL_DEFAULT 0x00030303
-#define mmDMIFV_PG0_DPGV0_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV0_PIPE_DPM_CONTROL_DEFAULT 0x00003000
-#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL_DEFAULT 0x00000200
-#define mmDMIFV_PG0_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH_DEFAULT 0x00000200
-#define mmDMIFV_PG0_DPGV0_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV0_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_WATERMARK_MASK_CONTROL_DEFAULT 0x00030303
-#define mmDMIFV_PG0_DPGV1_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_PIPE_DPM_CONTROL_DEFAULT 0x00003000
-#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL_DEFAULT 0x00000200
-#define mmDMIFV_PG0_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH_DEFAULT 0x00000200
-#define mmDMIFV_PG0_DPGV1_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIFV_PG0_DPGV1_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blndv0_dispdec
-#define mmBLNDV0_BLNDV_CONTROL_DEFAULT 0xff0220ff
-#define mmBLNDV0_BLNDV_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLNDV0_BLNDV_CONTROL2_DEFAULT 0x00000010
-#define mmBLNDV0_BLNDV_UPDATE_DEFAULT 0x00000000
-#define mmBLNDV0_BLNDV_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLNDV0_BLNDV_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLNDV0_BLNDV_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtcv0_dispdec
-#define mmCRTCV0_CRTCV_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTCV0_CRTCV_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VBI_END_DEFAULT 0x00000003
-#define mmCRTCV0_CRTCV_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CONTROL_DEFAULT 0x80400110
-#define mmCRTCV0_CRTCV_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTCV0_CRTCV_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTCV0_CRTCV_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTCV0_CRTCV_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTCV0_CRTCV_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTCV0_CRTCV_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTCV0_CRTCV_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTCV0_CRTCV_GSL_CONTROL_DEFAULT 0x00020000
-
-
-// addressBlock: dce_dc_unp1_dispdec
-#define mmUNP1_UNP_GRPH_ENABLE_DEFAULT 0x00000001
-#define mmUNP1_UNP_GRPH_CONTROL_DEFAULT 0x0a008008
-#define mmUNP1_UNP_GRPH_CONTROL_C_DEFAULT 0x00008000
-#define mmUNP1_UNP_GRPH_CONTROL_EXP_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PITCH_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_PITCH_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_X_START_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_X_START_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_Y_START_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_Y_START_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_X_END_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_X_END_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_Y_END_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_Y_END_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_UPDATE_DEFAULT 0x00000000
-#define mmUNP1_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_DEFAULT 0x0000ffff
-#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C_DEFAULT 0x00000000
-#define mmUNP1_UNP_DVMM_PTE_CONTROL_DEFAULT 0x00004000
-#define mmUNP1_UNP_DVMM_PTE_CONTROL_C_DEFAULT 0x00004000
-#define mmUNP1_UNP_DVMM_PTE_ARB_CONTROL_DEFAULT 0x00002220
-#define mmUNP1_UNP_DVMM_PTE_ARB_CONTROL_C_DEFAULT 0x00002220
-#define mmUNP1_UNP_GRPH_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmUNP1_UNP_GRPH_STEREOSYNC_FLIP_DEFAULT 0x00002020
-#define mmUNP1_UNP_FLIP_CONTROL_DEFAULT 0x00000001
-#define mmUNP1_UNP_CRC_CONTROL_DEFAULT 0x00000000
-#define mmUNP1_UNP_CRC_MASK_DEFAULT 0x00000000
-#define mmUNP1_UNP_CRC_CURRENT_DEFAULT 0x00000000
-#define mmUNP1_UNP_CRC_LAST_DEFAULT 0x00000000
-#define mmUNP1_UNP_LB_DATA_GAP_BETWEEN_CHUNK_DEFAULT 0x00000100
-#define mmUNP1_UNP_HW_ROTATION_DEFAULT 0x00000010
-
-
-// addressBlock: dce_dc_lbv1_dispdec
-#define mmLBV1_LBV_DATA_FORMAT_DEFAULT 0x00000000
-#define mmLBV1_LBV_MEMORY_CTRL_DEFAULT 0x000006b0
-#define mmLBV1_LBV_MEMORY_SIZE_STATUS_DEFAULT 0x00000000
-#define mmLBV1_LBV_DESKTOP_HEIGHT_DEFAULT 0x00000000
-#define mmLBV1_LBV_VLINE_START_END_DEFAULT 0x00000000
-#define mmLBV1_LBV_VLINE2_START_END_DEFAULT 0x00000000
-#define mmLBV1_LBV_V_COUNTER_DEFAULT 0x00000000
-#define mmLBV1_LBV_SNAPSHOT_V_COUNTER_DEFAULT 0x00000000
-#define mmLBV1_LBV_V_COUNTER_CHROMA_DEFAULT 0x00000000
-#define mmLBV1_LBV_SNAPSHOT_V_COUNTER_CHROMA_DEFAULT 0x00000000
-#define mmLBV1_LBV_INTERRUPT_MASK_DEFAULT 0x00000000
-#define mmLBV1_LBV_VLINE_STATUS_DEFAULT 0x00000000
-#define mmLBV1_LBV_VLINE2_STATUS_DEFAULT 0x00000000
-#define mmLBV1_LBV_VBLANK_STATUS_DEFAULT 0x00000000
-#define mmLBV1_LBV_SYNC_RESET_SEL_DEFAULT 0x00000002
-#define mmLBV1_LBV_BLACK_KEYER_R_CR_DEFAULT 0x00000000
-#define mmLBV1_LBV_BLACK_KEYER_G_Y_DEFAULT 0x00000000
-#define mmLBV1_LBV_BLACK_KEYER_B_CB_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_CTRL_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_R_CR_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_G_Y_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_B_CB_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_REP_R_CR_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_REP_G_Y_DEFAULT 0x00000000
-#define mmLBV1_LBV_KEYER_COLOR_REP_B_CB_DEFAULT 0x00000000
-#define mmLBV1_LBV_BUFFER_LEVEL_STATUS_DEFAULT 0xa0008000
-#define mmLBV1_LBV_BUFFER_URGENCY_CTRL_DEFAULT 0x00200010
-#define mmLBV1_LBV_BUFFER_URGENCY_STATUS_DEFAULT 0x00000000
-#define mmLBV1_LBV_BUFFER_STATUS_DEFAULT 0x12000002
-#define mmLBV1_LBV_NO_OUTSTANDING_REQ_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_sclv1_dispdec
-#define mmSCLV1_SCLV_COEF_RAM_SELECT_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_COEF_RAM_TAP_DATA_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MODE_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_TAP_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MANUAL_REPLICATE_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_AUTOMATIC_MODE_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_HORZ_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_HORZ_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_HORZ_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_VERT_FILTER_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VERT_FILTER_INIT_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO_C_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VERT_FILTER_INIT_C_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_ROUND_OFFSET_DEFAULT 0x80000000
-#define mmSCLV1_SCLV_UPDATE_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_ALU_CONTROL_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_START_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_SIZE_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_START_C_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY_C_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_VIEWPORT_SIZE_C_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_EXT_OVERSCAN_LEFT_RIGHT_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_EXT_OVERSCAN_TOP_BOTTOM_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MODE_CHANGE_DET1_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MODE_CHANGE_DET2_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MODE_CHANGE_DET3_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_MODE_CHANGE_MASK_DEFAULT 0x00000000
-#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT_DEFAULT 0x01000000
-#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT_C_DEFAULT 0x01000000
-
-
-// addressBlock: dce_dc_col_man1_dispdec
-#define mmCOL_MAN1_COL_MAN_UPDATE_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_INPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C11_C12_A_DEFAULT 0x00002000
-#define mmCOL_MAN1_INPUT_CSC_C13_C14_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C21_C22_A_DEFAULT 0x20000000
-#define mmCOL_MAN1_INPUT_CSC_C23_C24_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C31_C32_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C33_C34_A_DEFAULT 0x00002000
-#define mmCOL_MAN1_INPUT_CSC_C11_C12_B_DEFAULT 0x00002000
-#define mmCOL_MAN1_INPUT_CSC_C13_C14_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C21_C22_B_DEFAULT 0x20000000
-#define mmCOL_MAN1_INPUT_CSC_C23_C24_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C31_C32_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_CSC_C33_C34_B_DEFAULT 0x00002000
-#define mmCOL_MAN1_PRESCALE_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_PRESCALE_VALUES_R_DEFAULT 0x20000000
-#define mmCOL_MAN1_PRESCALE_VALUES_G_DEFAULT 0x20000000
-#define mmCOL_MAN1_PRESCALE_VALUES_B_DEFAULT 0x20000000
-#define mmCOL_MAN1_COL_MAN_OUTPUT_CSC_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_A_DEFAULT 0x00002000
-#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_A_DEFAULT 0x20000000
-#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_A_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_A_DEFAULT 0x00002000
-#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_B_DEFAULT 0x00002000
-#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_B_DEFAULT 0x20000000
-#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_B_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_B_DEFAULT 0x00002000
-#define mmCOL_MAN1_DENORM_CLAMP_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_DENORM_CLAMP_RANGE_R_CR_DEFAULT 0x00000fff
-#define mmCOL_MAN1_DENORM_CLAMP_RANGE_G_Y_DEFAULT 0x00000fff
-#define mmCOL_MAN1_DENORM_CLAMP_RANGE_B_CB_DEFAULT 0x00000fff
-#define mmCOL_MAN1_COL_MAN_FP_CONVERTED_FIELD_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_LUT_INDEX_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_LUT_DATA_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_LUT_WRITE_EN_MASK_DEFAULT 0x00000007
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_START_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_END_CNTL1_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_END_CNTL2_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_0_1_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_2_3_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_4_5_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_6_7_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_8_9_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_10_11_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_12_13_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLA_REGION_14_15_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_START_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_SLOPE_CNTL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_END_CNTL1_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_END_CNTL2_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_0_1_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_2_3_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_4_5_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_6_7_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_8_9_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_10_11_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_12_13_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_REGAMMA_CNTLB_REGION_14_15_DEFAULT 0x00000000
-#define mmCOL_MAN1_PACK_FIFO_ERROR_DEFAULT 0x00000000
-#define mmCOL_MAN1_OUTPUT_FIFO_ERROR_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_GAMMA_LUT_AUTOFILL_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_GAMMA_LUT_RW_INDEX_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_GAMMA_LUT_SEQ_COLOR_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_GAMMA_LUT_PWL_DATA_DEFAULT 0x00000000
-#define mmCOL_MAN1_INPUT_GAMMA_LUT_30_COLOR_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL1_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL2_DEFAULT 0x03800000
-#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_B_DEFAULT 0xffff0000
-#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_G_DEFAULT 0xffff0000
-#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_R_DEFAULT 0xffff0000
-#define mmCOL_MAN1_COL_MAN_DEGAMMA_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_CONTROL_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C11_C12_DEFAULT 0x00002000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C13_C14_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C21_C22_DEFAULT 0x20000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C23_C24_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C31_C32_DEFAULT 0x00000000
-#define mmCOL_MAN1_COL_MAN_GAMUT_REMAP_C33_C34_DEFAULT 0x00002000
-
-
-// addressBlock: dce_dc_dcfev1_dispdec
-#define mmDCFEV1_DCFEV_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_SOFT_RESET_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_DMIFV_CLOCK_CONTROL_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_MEM_PWR_CTRL_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_MEM_PWR_CTRL2_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_MEM_PWR_STATUS_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_L_FLUSH_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_C_FLUSH_DEFAULT 0x00000000
-#define mmDCFEV1_DCFEV_MISC_DEFAULT 0x00000001
-
-
-// addressBlock: dce_dc_dc_perfmon12_dispdec
-#define mmDC_PERFMON12_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON12_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON12_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dmifv_pg1_dispdec
-#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV0_WATERMARK_MASK_CONTROL_DEFAULT 0x00030303
-#define mmDMIFV_PG1_DPGV0_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV0_PIPE_DPM_CONTROL_DEFAULT 0x00003000
-#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL_DEFAULT 0x00000200
-#define mmDMIFV_PG1_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH_DEFAULT 0x00000200
-#define mmDMIFV_PG1_DPGV0_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV0_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL1_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL2_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_WATERMARK_MASK_CONTROL_DEFAULT 0x00030303
-#define mmDMIFV_PG1_DPGV1_PIPE_URGENCY_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_PIPE_DPM_CONTROL_DEFAULT 0x00003000
-#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL_DEFAULT 0x00000200
-#define mmDMIFV_PG1_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH_DEFAULT 0x00000200
-#define mmDMIFV_PG1_DPGV1_REPEATER_PROGRAM_DEFAULT 0x00000000
-#define mmDMIFV_PG1_DPGV1_CHK_PRE_PROC_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_blndv1_dispdec
-#define mmBLNDV1_BLNDV_CONTROL_DEFAULT 0xff0220ff
-#define mmBLNDV1_BLNDV_SM_CONTROL2_DEFAULT 0x00000000
-#define mmBLNDV1_BLNDV_CONTROL2_DEFAULT 0x00000010
-#define mmBLNDV1_BLNDV_UPDATE_DEFAULT 0x00000000
-#define mmBLNDV1_BLNDV_UNDERFLOW_INTERRUPT_DEFAULT 0x00000000
-#define mmBLNDV1_BLNDV_V_UPDATE_LOCK_DEFAULT 0x80000000
-#define mmBLNDV1_BLNDV_REG_UPDATE_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_crtcv1_dispdec
-#define mmCRTCV1_CRTCV_H_BLANK_EARLY_NUM_DEFAULT 0x00000040
-#define mmCRTCV1_CRTCV_H_TOTAL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_H_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_H_SYNC_A_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_H_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_H_SYNC_B_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_H_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VBI_END_DEFAULT 0x00000003
-#define mmCRTCV1_CRTCV_V_TOTAL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_TOTAL_MIN_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_TOTAL_MAX_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_TOTAL_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_TOTAL_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VSYNC_NOM_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_BLANK_START_END_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_SYNC_A_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_SYNC_A_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_SYNC_B_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_V_SYNC_B_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_DTMTEST_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_DTMTEST_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TRIGA_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TRIGA_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TRIGB_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TRIGB_MANUAL_TRIG_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_FORCE_COUNT_NOW_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_FLOW_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STEREO_FORCE_NEXT_EYE_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_AVSYNC_COUNTER_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CONTROL_DEFAULT 0x80400110
-#define mmCRTCV1_CRTCV_BLANK_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_INTERLACE_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_INTERLACE_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_FIELD_INDICATION_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_PIXEL_DATA_READBACK0_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_PIXEL_DATA_READBACK1_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATUS_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_NOM_VERT_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATUS_FRAME_COUNT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATUS_VF_COUNT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATUS_HV_COUNT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_COUNT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_COUNT_RESET_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_MANUAL_FORCE_VSYNC_NEXT_LINE_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERT_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STEREO_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STEREO_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_SNAPSHOT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_SNAPSHOT_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_SNAPSHOT_FRAME_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_START_LINE_CONTROL_DEFAULT 0x00003002
-#define mmCRTCV1_CRTCV_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_UPDATE_LOCK_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_DOUBLE_BUFFER_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VGA_PARAMETER_CAPTURE_MODE_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TEST_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TEST_PATTERN_PARAMETERS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_TEST_PATTERN_COLOR_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_MASTER_UPDATE_LOCK_DEFAULT 0x00010000
-#define mmCRTCV1_CRTCV_MASTER_UPDATE_MODE_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_MVP_INBAND_CNTL_INSERT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_MVP_INBAND_CNTL_INSERT_TIMER_DEFAULT 0x00000008
-#define mmCRTCV1_CRTCV_MVP_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_MASTER_EN_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_ALLOW_STOP_OFF_V_CNT_DEFAULT 0x00010000
-#define mmCRTCV1_CRTCV_V_UPDATE_INT_STATUS_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_OVERSCAN_COLOR_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_OVERSCAN_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_BLANK_DATA_COLOR_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_BLANK_DATA_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_BLACK_COLOR_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_BLACK_COLOR_EXT_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT0_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT0_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT1_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT1_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT2_POSITION_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_VERTICAL_INTERRUPT2_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC_CNTL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_DATA_RG_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC0_DATA_B_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_WINDOWA_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_WINDOWA_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_WINDOWB_X_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_WINDOWB_Y_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_DATA_RG_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_CRC1_DATA_B_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_WINDOW_START_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_WINDOW_END_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_STATIC_SCREEN_CONTROL_DEFAULT 0x00010000
-#define mmCRTCV1_CRTCV_3D_STRUCTURE_CONTROL_DEFAULT 0x00000010
-#define mmCRTCV1_CRTCV_GSL_VSYNC_GAP_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_GSL_WINDOW_DEFAULT 0x00000000
-#define mmCRTCV1_CRTCV_GSL_CONTROL_DEFAULT 0x00020000
-
-
-// addressBlock: dce_dc_hpd0_dispdec
-#define mmHPD0_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hpd1_dispdec
-#define mmHPD1_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hpd2_dispdec
-#define mmHPD2_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hpd3_dispdec
-#define mmHPD3_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hpd4_dispdec
-#define mmHPD4_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_hpd5_dispdec
-#define mmHPD5_DC_HPD_INT_STATUS_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_INT_CONTROL_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_CONTROL_DEFAULT 0x10fa09c4
-#define mmHPD5_DC_HPD_FAST_TRAIN_CNTL_DEFAULT 0x00000000
-#define mmHPD5_DC_HPD_TOGGLE_FILT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon2_dispdec
-#define mmDC_PERFMON2_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON2_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON2_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux0_dispdec
-#define mmDP_AUX0_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX0_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX0_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux1_dispdec
-#define mmDP_AUX1_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX1_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX1_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux2_dispdec
-#define mmDP_AUX2_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX2_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX2_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX2_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux3_dispdec
-#define mmDP_AUX3_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX3_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX3_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX3_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux4_dispdec
-#define mmDP_AUX4_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX4_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX4_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX4_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp_aux5_dispdec
-#define mmDP_AUX5_AUX_CONTROL_DEFAULT 0x01040000
-#define mmDP_AUX5_AUX_SW_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_ARB_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_INTERRUPT_CONTROL_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_SW_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_LS_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_SW_DATA_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_LS_DATA_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL_DEFAULT 0x00320000
-#define mmDP_AUX5_AUX_DPHY_TX_CONTROL_DEFAULT 0x00021002
-#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0_DEFAULT 0x223d1210
-#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_TX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_DPHY_RX_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_GTC_SYNC_ERROR_CONTROL_DEFAULT 0x00210000
-#define mmDP_AUX5_AUX_GTC_SYNC_CONTROLLER_STATUS_DEFAULT 0x00000000
-#define mmDP_AUX5_AUX_GTC_SYNC_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig0_dispdec
-#define mmDIG0_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG0_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG0_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG0_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG0_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG0_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG0_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG0_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG0_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG0_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG0_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG0_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG0_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG0_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG0_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG0_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG0_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG0_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG0_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG0_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG0_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG0_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG0_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG0_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp0_dispdec
-#define mmDP0_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP0_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP0_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP0_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP0_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP0_DP_VID_N_DEFAULT 0x00002000
-#define mmDP0_DP_VID_M_DEFAULT 0x00000000
-#define mmDP0_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP0_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP0_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP0_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP0_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP0_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP0_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP0_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP0_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP0_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP0_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP0_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP0_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP0_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig1_dispdec
-#define mmDIG1_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG1_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG1_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG1_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG1_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG1_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG1_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG1_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG1_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG1_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG1_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG1_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG1_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG1_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG1_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG1_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG1_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG1_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG1_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG1_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG1_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG1_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG1_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG1_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp1_dispdec
-#define mmDP1_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP1_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP1_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP1_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP1_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP1_DP_VID_N_DEFAULT 0x00002000
-#define mmDP1_DP_VID_M_DEFAULT 0x00000000
-#define mmDP1_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP1_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP1_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP1_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP1_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP1_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP1_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP1_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP1_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP1_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP1_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP1_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP1_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP1_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig2_dispdec
-#define mmDIG2_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG2_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG2_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG2_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG2_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG2_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG2_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG2_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG2_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG2_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG2_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG2_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG2_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG2_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG2_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG2_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG2_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG2_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG2_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG2_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG2_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG2_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG2_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG2_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG2_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG2_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp2_dispdec
-#define mmDP2_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP2_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP2_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP2_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP2_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP2_DP_VID_N_DEFAULT 0x00002000
-#define mmDP2_DP_VID_M_DEFAULT 0x00000000
-#define mmDP2_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP2_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP2_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP2_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP2_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP2_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP2_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP2_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP2_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP2_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP2_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP2_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP2_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP2_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig3_dispdec
-#define mmDIG3_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG3_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG3_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG3_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG3_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG3_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG3_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG3_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG3_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG3_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG3_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG3_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG3_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG3_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG3_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG3_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG3_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG3_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG3_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG3_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG3_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG3_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG3_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG3_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG3_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG3_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp3_dispdec
-#define mmDP3_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP3_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP3_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP3_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP3_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP3_DP_VID_N_DEFAULT 0x00002000
-#define mmDP3_DP_VID_M_DEFAULT 0x00000000
-#define mmDP3_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP3_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP3_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP3_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP3_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP3_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP3_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP3_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP3_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP3_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP3_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP3_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP3_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP3_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig4_dispdec
-#define mmDIG4_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG4_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG4_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG4_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG4_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG4_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG4_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG4_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG4_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG4_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG4_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG4_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG4_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG4_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG4_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG4_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG4_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG4_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG4_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG4_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG4_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG4_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG4_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG4_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG4_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG4_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp4_dispdec
-#define mmDP4_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP4_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP4_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP4_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP4_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP4_DP_VID_N_DEFAULT 0x00002000
-#define mmDP4_DP_VID_M_DEFAULT 0x00000000
-#define mmDP4_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP4_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP4_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP4_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP4_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP4_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP4_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP4_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP4_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP4_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP4_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP4_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP4_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP4_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig5_dispdec
-#define mmDIG5_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG5_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG5_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG5_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG5_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG5_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG5_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG5_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG5_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG5_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG5_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG5_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG5_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG5_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG5_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG5_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG5_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG5_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG5_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG5_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG5_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG5_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG5_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG5_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG5_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG5_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp5_dispdec
-#define mmDP5_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP5_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP5_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP5_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP5_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP5_DP_VID_N_DEFAULT 0x00002000
-#define mmDP5_DP_VID_M_DEFAULT 0x00000000
-#define mmDP5_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP5_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP5_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP5_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP5_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP5_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP5_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP5_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP5_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP5_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP5_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP5_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP5_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP5_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP5_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dig6_dispdec
-#define mmDIG6_DIG_FE_CNTL_DEFAULT 0x00000000
-#define mmDIG6_DIG_OUTPUT_CRC_CNTL_DEFAULT 0x00000100
-#define mmDIG6_DIG_OUTPUT_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG6_DIG_CLOCK_PATTERN_DEFAULT 0x00000063
-#define mmDIG6_DIG_TEST_PATTERN_DEFAULT 0x00000060
-#define mmDIG6_DIG_RANDOM_PATTERN_SEED_DEFAULT 0x00222222
-#define mmDIG6_DIG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_CONTROL_DEFAULT 0x00010001
-#define mmDIG6_HDMI_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_AUDIO_PACKET_CONTROL_DEFAULT 0x00000010
-#define mmDIG6_HDMI_ACR_PACKET_CONTROL_DEFAULT 0x00010000
-#define mmDIG6_HDMI_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_HDMI_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_INFOFRAME_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_INTERRUPT_STATUS_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GC_DEFAULT 0x00000004
-#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC1_4_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_ISRC2_3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AVI_INFO0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AVI_INFO1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AVI_INFO2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AVI_INFO3_DEFAULT 0x02000000
-#define mmDIG6_AFMT_MPEG_INFO0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_MPEG_INFO1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_HDR_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_4_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_5_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_6_DEFAULT 0x00000000
-#define mmDIG6_AFMT_GENERIC_7_DEFAULT 0x00000000
-#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_32_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_32_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_44_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_44_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_48_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_48_1_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_STATUS_0_DEFAULT 0x00000000
-#define mmDIG6_HDMI_ACR_STATUS_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_INFO0_DEFAULT 0x00000170
-#define mmDIG6_AFMT_AUDIO_INFO1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_CRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL1_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_RAMP_CONTROL3_DEFAULT 0x00000000
-#define mmDIG6_AFMT_60958_2_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_CRC_RESULT_DEFAULT 0x00000000
-#define mmDIG6_AFMT_STATUS_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL_DEFAULT 0x00000800
-#define mmDIG6_AFMT_VBI_PACKET_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_AFMT_INFOFRAME_CONTROL0_DEFAULT 0x00000000
-#define mmDIG6_AFMT_AUDIO_SRC_CONTROL_DEFAULT 0x00000000
-#define mmDIG6_DIG_BE_CNTL_DEFAULT 0x00010000
-#define mmDIG6_DIG_BE_EN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CNTL_DEFAULT 0x00000001
-#define mmDIG6_TMDS_CONTROL_CHAR_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CONTROL0_FEEDBACK_DEFAULT 0x00000000
-#define mmDIG6_TMDS_STEREOSYNC_CTL_SEL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_0_1_DEFAULT 0x00000000
-#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_2_3_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CTL_BITS_DEFAULT 0x00000000
-#define mmDIG6_TMDS_DCBALANCER_CONTROL_DEFAULT 0x00000001
-#define mmDIG6_TMDS_CTL0_1_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_TMDS_CTL2_3_GEN_CNTL_DEFAULT 0x00000000
-#define mmDIG6_DIG_VERSION_DEFAULT 0x00000000
-#define mmDIG6_DIG_LANE_ENABLE_DEFAULT 0x00000000
-#define mmDIG6_AFMT_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dp6_dispdec
-#define mmDP6_DP_LINK_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_COLORIMETRY_DEFAULT 0x00000000
-#define mmDP6_DP_CONFIG_DEFAULT 0x00000000
-#define mmDP6_DP_VID_STREAM_CNTL_DEFAULT 0x00000200
-#define mmDP6_DP_STEER_FIFO_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_MISC_DEFAULT 0x00000000
-#define mmDP6_DP_VID_TIMING_DEFAULT 0x00000000
-#define mmDP6_DP_VID_N_DEFAULT 0x00002000
-#define mmDP6_DP_VID_M_DEFAULT 0x00000000
-#define mmDP6_DP_LINK_FRAMING_CNTL_DEFAULT 0x10002000
-#define mmDP6_DP_HBR2_EYE_PATTERN_DEFAULT 0x00000000
-#define mmDP6_DP_VID_MSA_VBID_DEFAULT 0x01000000
-#define mmDP6_DP_VID_INTERRUPT_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_TRAINING_PATTERN_SEL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM0_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM1_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_SYM2_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_8B10B_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_PRBS_CNTL_DEFAULT 0x7fffff00
-#define mmDP6_DP_DPHY_SCRAM_CNTL_DEFAULT 0x0101ff10
-#define mmDP6_DP_DPHY_CRC_EN_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_CNTL_DEFAULT 0x00ff0000
-#define mmDP6_DP_DPHY_CRC_RESULT_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_MST_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_CRC_MST_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_FAST_TRAINING_DEFAULT 0x20020000
-#define mmDP6_DP_DPHY_FAST_TRAINING_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_V_TIMING_OVERRIDE1_DEFAULT 0x00000000
-#define mmDP6_DP_MSA_V_TIMING_OVERRIDE2_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_CNTL1_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING1_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING2_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_FRAMING3_DEFAULT 0x00000200
-#define mmDP6_DP_SEC_FRAMING4_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_N_DEFAULT 0x00008000
-#define mmDP6_DP_SEC_AUD_N_READBACK_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_M_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_AUD_M_READBACK_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_TIMESTAMP_DEFAULT 0x00000000
-#define mmDP6_DP_SEC_PACKET_CNTL_DEFAULT 0x00001100
-#define mmDP6_DP_MSE_RATE_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_RATE_UPDATE_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT0_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT1_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT2_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT_UPDATE_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_LINK_TIMING_DEFAULT 0x000203ff
-#define mmDP6_DP_MSE_MISC_CNTL_DEFAULT 0x00000000
-#define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL_DEFAULT 0x00000005
-#define mmDP6_DP_DPHY_HBR2_PATTERN_CONTROL_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT0_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT1_STATUS_DEFAULT 0x00000000
-#define mmDP6_DP_MSE_SAT2_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy0_dispdec
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs0_dispdec
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS0_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS0_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS0_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs0_dispdec
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs0_dispdec
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS0_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS0_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS0_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS0_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy1_dispdec
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs1_dispdec
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS1_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS1_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS1_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs1_dispdec
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs1_dispdec
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS1_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS1_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS1_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS1_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy2_dispdec
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs2_dispdec
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS2_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS2_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS2_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs2_dispdec
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs2_dispdec
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS2_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS2_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS2_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS2_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy3_dispdec
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs3_dispdec
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS3_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS3_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS3_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs3_dispdec
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs3_dispdec
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS3_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS3_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS3_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS3_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy4_dispdec
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs4_dispdec
-#define mmDC_COMBOPHYCMREGS4_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS4_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS4_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS4_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs4_dispdec
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs4_dispdec
-#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS4_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS4_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS4_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS4_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS4_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS4_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy5_dispdec
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs5_dispdec
-#define mmDC_COMBOPHYCMREGS5_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS5_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS5_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS5_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs5_dispdec
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs5_dispdec
-#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS5_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS5_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS5_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS5_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS5_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS5_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy6_dispdec
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs6_dispdec
-#define mmDC_COMBOPHYCMREGS6_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS6_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS6_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS6_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs6_dispdec
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs6_dispdec
-#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS6_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS6_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS6_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS6_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS6_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS6_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dcio_uniphy8_dispdec
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED0_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED1_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED2_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED3_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED4_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED5_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED6_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED7_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED8_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED9_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED10_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED11_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED12_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED13_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED14_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED15_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED16_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED17_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED18_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED19_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED20_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED21_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED22_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED23_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED24_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED25_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED26_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED27_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED28_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED29_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED30_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED31_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED32_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED33_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED34_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED35_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED36_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED37_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED38_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED39_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED40_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED41_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED42_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED43_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED44_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED45_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED46_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED47_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED48_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED49_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED50_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED51_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED52_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED53_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED54_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED55_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED56_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED57_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED58_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED59_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED60_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED61_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED62_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED63_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED64_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED65_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED66_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED67_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED68_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED69_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED70_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED71_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED72_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED73_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED74_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED75_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED76_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED77_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED78_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED79_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED80_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED81_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED82_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED83_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED84_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED85_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED86_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED87_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED88_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED89_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED90_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED91_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED92_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED93_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED94_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED95_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED96_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED97_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED98_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED99_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED100_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED101_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED102_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED103_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED104_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED105_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED106_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED107_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED108_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED109_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED110_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED111_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED112_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED113_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED114_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED115_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED116_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED117_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED118_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED119_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED120_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED121_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED122_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED123_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED124_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED125_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED126_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED127_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED128_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED129_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED130_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED131_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED132_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED133_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED134_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED135_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED136_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED137_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED138_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED139_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED140_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED141_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED142_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED143_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED144_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED145_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED146_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED147_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED148_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED149_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED150_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED151_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED152_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED153_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED154_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED155_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED156_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED157_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED158_DEFAULT 0x00000000
-#define mmDCIO_UNIPHY8_UNIPHY_MACRO_CNTL_RESERVED159_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophycmregs8_dispdec
-#define mmDC_COMBOPHYCMREGS8_COMMON_FUSE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_FUSE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_FUSE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_MAR_DEEMPH_NOM_DEFAULT 0x402a2a00
-#define mmDC_COMBOPHYCMREGS8_COMMON_LANE_PWRMGMT_DEFAULT 0x00000004
-#define mmDC_COMBOPHYCMREGS8_COMMON_TXCNTRL_DEFAULT 0x00000007
-#define mmDC_COMBOPHYCMREGS8_COMMON_TMDP_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_LANE_RESETS_DEFAULT 0x000000ff
-#define mmDC_COMBOPHYCMREGS8_COMMON_ZCALCODE_CTRL_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU4_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU5_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU6_DEFAULT 0x00000000
-#define mmDC_COMBOPHYCMREGS8_COMMON_DISP_RFU7_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophytxregs8_dispdec
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_TX_CONTROL_LANE0_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS8_MARGIN_DEEMPH_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_GLOBAL_FOR_TX_LANE0_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU0_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU1_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU2_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU3_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU4_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU5_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU6_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU7_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU8_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU9_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU10_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU11_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU12_LANE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_TX_CONTROL_LANE1_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS8_MARGIN_DEEMPH_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_GLOBAL_FOR_TX_LANE1_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU0_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU1_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU2_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU3_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU4_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU5_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU6_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU7_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU8_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU9_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU10_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU11_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU12_LANE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_TX_CONTROL_LANE2_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS8_MARGIN_DEEMPH_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_GLOBAL_FOR_TX_LANE2_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU0_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU1_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU2_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU3_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU4_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU5_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU6_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU7_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU8_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU9_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU10_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU11_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU12_LANE2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_TX_CONTROL_LANE3_DEFAULT 0x00000006
-#define mmDC_COMBOPHYTXREGS8_MARGIN_DEEMPH_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_CMD_BUS_GLOBAL_FOR_TX_LANE3_DEFAULT 0x00000040
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU0_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU1_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU2_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU3_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU4_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU5_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU6_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU7_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU8_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU9_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU10_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU11_LANE3_DEFAULT 0x00000000
-#define mmDC_COMBOPHYTXREGS8_TX_DISP_RFU12_LANE3_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_combophypllregs8_dispdec
-#define mmDC_COMBOPHYPLLREGS8_FREQ_CTRL0_DEFAULT 0x00280000
-#define mmDC_COMBOPHYPLLREGS8_FREQ_CTRL1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS8_FREQ_CTRL2_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS8_FREQ_CTRL3_DEFAULT 0x00e80000
-#define mmDC_COMBOPHYPLLREGS8_BW_CTRL_COARSE_DEFAULT 0x0020c4b1
-#define mmDC_COMBOPHYPLLREGS8_BW_CTRL_FINE_DEFAULT 0x00000001
-#define mmDC_COMBOPHYPLLREGS8_CAL_CTRL_DEFAULT 0x64000000
-#define mmDC_COMBOPHYPLLREGS8_LOOP_CTRL_DEFAULT 0x00000090
-#define mmDC_COMBOPHYPLLREGS8_VREG_CFG_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS8_OBSERVE0_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS8_OBSERVE1_DEFAULT 0x00000000
-#define mmDC_COMBOPHYPLLREGS8_DFT_OUT_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dsi0_dispdec
-#define mmDSI0_DISP_DSI_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_VIDEO_MODE_CTRL_DEFAULT 0x00008000
-#define mmDSI0_DISP_DSI_VIDEO_MODE_SYNC_DATATYPE_DEFAULT 0x31211101
-#define mmDSI0_DISP_DSI_VIDEO_MODE_VSYNC_PAYLOAD_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_VIDEO_MODE_HSYNC_PAYLOAD_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_VIDEO_MODE_PIXEL_DATATYPE_DEFAULT 0x3e2e1e0e
-#define mmDSI0_DISP_DSI_VIDEO_MODE_BLANKING_DATATYPE_DEFAULT 0x00001900
-#define mmDSI0_DISP_DSI_VIDEO_MODE_DATA_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_COMMAND_MODE_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_COMMAND_MODE_DATA_CTRL_DEFAULT 0x00000066
-#define mmDSI0_DISP_DSI_COMMAND_MODE_DCS_CMD_CTRL_DEFAULT 0x00003c2c
-#define mmDSI0_DISP_DSI_DMA_CMD_OFFSET_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_CMD_LENGTH_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_DATA_OFFSET_0_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_DATA_OFFSET_1_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_DATA_PITCH_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_DATA_WIDTH_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_DATA_HEIGHT_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_FIFO_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DMA_NULL_PACKET_DATA_DEFAULT 0x00000900
-#define mmDSI0_DISP_DSI_DENG_DATA_LENGTH_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_ACK_ERROR_REPORT_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_DATA0_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_DATA1_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_DATA2_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_DATA3_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_DATATYPE0_DEFAULT 0x22211211
-#define mmDSI0_DISP_DSI_RDBK_DATATYPE1_DEFAULT 0x001c1a02
-#define mmDSI0_DISP_DSI_TRIG_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_EXT_MUX_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_EXT_TE_PULSE_DETECTION_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_MODE_DMA_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_MODE_DENG_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_MODE_BTA_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RESET_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_EXT_RESET_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_LANE_CRC_HS_MODE_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_LANE_CRC_LP_MODE_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_LANE_CRC_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_PIXEL_CRC_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_LANE_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DLN0_PHY_ERROR_DEFAULT 0x00088888
-#define mmDSI0_DISP_DSI_LP_TIMER_CTRL_DEFAULT 0xffffffff
-#define mmDSI0_DISP_DSI_HS_TIMER_CTRL_DEFAULT 0x0000ffff
-#define mmDSI0_DISP_DSI_TIMEOUT_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_PHY_CLK_TIMING_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_PHY_CLK_TIMING_CTRL2_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_EOT_PACKET_DEFAULT 0x010f0f08
-#define mmDSI0_DISP_DSI_EOT_PACKET_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_GENERIC_ESC_TX_TRIGGER_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_FRAME_SIZE_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_BLOCK_SIZE_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_FRAME_CONFIG_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_LSFR_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_LSFR_INIT_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_START_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_MIPI_BIST_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_ERROR_INTERRUPT_MASK_DEFAULT 0xfd37377f
-#define mmDSI0_DISP_DSI_INTERRUPT_CTRL_DEFAULT 0x02222222
-#define mmDSI0_DISP_DSI_CLK_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CLK_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DENG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_DENG_FIFO_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_FIFO_DATA_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_FIFO_CTRL_DEFAULT 0x00000001
-#define mmDSI0_DISP_DSI_TE_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_LANE_STATUS_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_PERF_CTRL_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_HSYNC_LENGTH_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_RDBK_NUM_DEFAULT 0x00000000
-#define mmDSI0_DISP_DSI_CMD_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dsi1_dispdec
-#define mmDSI1_DISP_DSI_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_VIDEO_MODE_CTRL_DEFAULT 0x00008000
-#define mmDSI1_DISP_DSI_VIDEO_MODE_SYNC_DATATYPE_DEFAULT 0x31211101
-#define mmDSI1_DISP_DSI_VIDEO_MODE_VSYNC_PAYLOAD_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_VIDEO_MODE_HSYNC_PAYLOAD_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_VIDEO_MODE_PIXEL_DATATYPE_DEFAULT 0x3e2e1e0e
-#define mmDSI1_DISP_DSI_VIDEO_MODE_BLANKING_DATATYPE_DEFAULT 0x00001900
-#define mmDSI1_DISP_DSI_VIDEO_MODE_DATA_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_COMMAND_MODE_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_COMMAND_MODE_DATA_CTRL_DEFAULT 0x00000066
-#define mmDSI1_DISP_DSI_COMMAND_MODE_DCS_CMD_CTRL_DEFAULT 0x00003c2c
-#define mmDSI1_DISP_DSI_DMA_CMD_OFFSET_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_CMD_LENGTH_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_DATA_OFFSET_0_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_DATA_OFFSET_1_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_DATA_PITCH_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_DATA_WIDTH_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_DATA_HEIGHT_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_FIFO_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DMA_NULL_PACKET_DATA_DEFAULT 0x00000900
-#define mmDSI1_DISP_DSI_DENG_DATA_LENGTH_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_ACK_ERROR_REPORT_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_DATA0_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_DATA1_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_DATA2_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_DATA3_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_DATATYPE0_DEFAULT 0x22211211
-#define mmDSI1_DISP_DSI_RDBK_DATATYPE1_DEFAULT 0x001c1a02
-#define mmDSI1_DISP_DSI_TRIG_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_EXT_MUX_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_EXT_TE_PULSE_DETECTION_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_MODE_DMA_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_MODE_DENG_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_MODE_BTA_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RESET_SW_TRIGGER_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_EXT_RESET_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_LANE_CRC_HS_MODE_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_LANE_CRC_LP_MODE_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_LANE_CRC_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_PIXEL_CRC_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_LANE_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DLN0_PHY_ERROR_DEFAULT 0x00088888
-#define mmDSI1_DISP_DSI_LP_TIMER_CTRL_DEFAULT 0xffffffff
-#define mmDSI1_DISP_DSI_HS_TIMER_CTRL_DEFAULT 0x0000ffff
-#define mmDSI1_DISP_DSI_TIMEOUT_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_PHY_CLK_TIMING_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_PHY_CLK_TIMING_CTRL2_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_EOT_PACKET_DEFAULT 0x010f0f08
-#define mmDSI1_DISP_DSI_EOT_PACKET_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_GENERIC_ESC_TX_TRIGGER_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_FRAME_SIZE_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_BLOCK_SIZE_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_FRAME_CONFIG_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_LSFR_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_LSFR_INIT_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_START_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_MIPI_BIST_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_ERROR_INTERRUPT_MASK_DEFAULT 0xfd37377f
-#define mmDSI1_DISP_DSI_INTERRUPT_CTRL_DEFAULT 0x02222222
-#define mmDSI1_DISP_DSI_CLK_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CLK_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DENG_FIFO_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_DENG_FIFO_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_FIFO_DATA_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_FIFO_CTRL_DEFAULT 0x00000001
-#define mmDSI1_DISP_DSI_TE_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_LANE_STATUS_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_PERF_CTRL_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_HSYNC_LENGTH_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_RDBK_NUM_DEFAULT 0x00000000
-#define mmDSI1_DISP_DSI_CMD_MEM_PWR_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dprx_sd0_dispdec
-#define mmDPRX_SD0_DPRX_SD_CONTROL_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_STREAM_ENABLE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA0_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA1_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA2_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA3_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA4_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA5_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA6_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA7_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA8_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_VBID_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_CURRENT_LINE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_DISPLAY_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_DISPLAY_TIMER_MODE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSE_SAT_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSE_FORCE_UPDATE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSE_SAT_ACTIVE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_V_PARAMETER_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSA_RECEIVED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_VIDEO_STREAM_STATUS_TOGGLED_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_LINE_NUMBER0_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_LINE_NUMBER0_CONTROL_DEFAULT 0x0000ffff
-#define mmDPRX_SD0_DPRX_SD_LINE_NUMBER1_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_LINE_NUMBER1_CONTROL_DEFAULT 0x0000ffff
-#define mmDPRX_SD0_DPRX_SD_MAIN_DEFRAMING_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_VBID_MAJORITY_VOTE_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_SECONDARY_DEFRAMING_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_VCPF_PHASE_LOCKED_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_VCPF_PHASE_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MAJORITY_VOTE_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_PIXEL_FIFO_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MAXIMUM_SDP_PAYLOAD_LENGTH_DEFAULT 0x000003ff
-#define mmDPRX_SD0_DPRX_SD_SDP_STEER_DEFAULT 0x00000001
-#define mmDPRX_SD0_DPRX_SD_SDP_RECEIVED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_SDP_LEVEL_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_SDP_DATA_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_SDP_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_AUDIO_HEADER_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_AUDIO_FIFO_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_SDP_CONTROL_DEFAULT 0x00000001
-#define mmDPRX_SD0_DPRX_SD_V_TOTAL_MEASURED_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_H_TOTAL_MEASURED_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_BS_COUNTER_DEFAULT 0x00000000
-#define mmDPRX_SD0_DPRX_SD_MSE_ACT_HANDLED_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dprx_sd1_dispdec
-#define mmDPRX_SD1_DPRX_SD_CONTROL_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_STREAM_ENABLE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA0_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA1_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA2_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA3_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA4_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA5_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA6_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA7_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA8_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_VBID_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_CURRENT_LINE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_DISPLAY_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_DISPLAY_TIMER_MODE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSE_SAT_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSE_FORCE_UPDATE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSE_SAT_ACTIVE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_V_PARAMETER_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_PIXEL_FORMAT_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSA_RECEIVED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_VIDEO_STREAM_STATUS_TOGGLED_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_LINE_NUMBER0_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_LINE_NUMBER0_CONTROL_DEFAULT 0x0000ffff
-#define mmDPRX_SD1_DPRX_SD_LINE_NUMBER1_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_LINE_NUMBER1_CONTROL_DEFAULT 0x0000ffff
-#define mmDPRX_SD1_DPRX_SD_MAIN_DEFRAMING_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_VBID_MAJORITY_VOTE_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_SECONDARY_DEFRAMING_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_VCPF_PHASE_LOCKED_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_VCPF_PHASE_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MAJORITY_VOTE_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_PIXEL_FIFO_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MAXIMUM_SDP_PAYLOAD_LENGTH_DEFAULT 0x000003ff
-#define mmDPRX_SD1_DPRX_SD_SDP_STEER_DEFAULT 0x00000001
-#define mmDPRX_SD1_DPRX_SD_SDP_RECEIVED_STATUS_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_SDP_LEVEL_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_SDP_DATA_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_SDP_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_AUDIO_HEADER_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_AUDIO_FIFO_ERROR_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_SDP_CONTROL_DEFAULT 0x00000001
-#define mmDPRX_SD1_DPRX_SD_V_TOTAL_MEASURED_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_H_TOTAL_MEASURED_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_BS_COUNTER_DEFAULT 0x00000000
-#define mmDPRX_SD1_DPRX_SD_MSE_ACT_HANDLED_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_perfmon10_dispdec
-#define mmDC_PERFMON10_PERFCOUNTER_CNTL_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFCOUNTER_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFCOUNTER_STATE_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CNTL_DEFAULT 0x00000100
-#define mmDC_PERFMON10_PERFMON_CNTL2_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_CVALUE_LOW_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_HI_DEFAULT 0x00000000
-#define mmDC_PERFMON10_PERFMON_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dc_zcalregs_dispdec
-#define mmCOMP_EN_CTL_DEFAULT 0x00080000
-#define mmCOMP_EN_DFX_DEFAULT 0x00000000
-#define mmZCAL_FUSES_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_dispdec_VGA_MEM_WRITE_PAGE_ADDR
-
-
-// addressBlock: dce_dc_dispdec_VGA_MEM_READ_PAGE_ADDR
-
-
-// addressBlock: dce_dc_dispdec[948..986]
-
-
-// addressBlock: dce_dc_azdec
-#define mmCORB_WRITE_POINTER_DEFAULT 0x00000000
-#define mmCORB_READ_POINTER_DEFAULT 0x00000000
-#define mmCORB_CONTROL_DEFAULT 0x00000000
-#define mmCORB_STATUS_DEFAULT 0x00000000
-#define mmCORB_SIZE_DEFAULT 0x00000002
-#define mmRIRB_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmRIRB_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmRIRB_WRITE_POINTER_DEFAULT 0x00000000
-#define mmRESPONSE_INTERRUPT_COUNT_DEFAULT 0x00000000
-#define mmRIRB_CONTROL_DEFAULT 0x00000000
-#define mmRIRB_STATUS_DEFAULT 0x00000000
-#define mmRIRB_SIZE_DEFAULT 0x00000002
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX_DEFAULT 0x00000000
-#define mmIMMEDIATE_RESPONSE_INPUT_INTERFACE_DEFAULT 0x00000000
-#define mmIMMEDIATE_COMMAND_STATUS_DEFAULT 0x00000000
-#define mmDMA_POSITION_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmDMA_POSITION_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmWALL_CLOCK_COUNTER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream0_azdec
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM0_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream1_azdec
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM1_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream2_azdec
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM2_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream3_azdec
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM3_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream4_azdec
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM4_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream5_azdec
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM5_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream6_azdec
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM6_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: dce_dc_azstream7_azdec
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_FORMAT_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS_DEFAULT 0x00000000
-#define mmAZSTREAM7_OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream0_streamind
-#define ixAZF0STREAM0_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM0_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM0_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream1_streamind
-#define ixAZF0STREAM1_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM1_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM1_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream2_streamind
-#define ixAZF0STREAM2_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM2_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM2_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream3_streamind
-#define ixAZF0STREAM3_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM3_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM3_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream4_streamind
-#define ixAZF0STREAM4_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM4_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM4_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream5_streamind
-#define ixAZF0STREAM5_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM5_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM5_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream6_streamind
-#define ixAZF0STREAM6_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM6_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM6_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream7_streamind
-#define ixAZF0STREAM7_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM7_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM7_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream8_streamind
-#define ixAZF0STREAM8_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM8_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM8_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream9_streamind
-#define ixAZF0STREAM9_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM9_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM9_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream10_streamind
-#define ixAZF0STREAM10_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM10_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM10_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream11_streamind
-#define ixAZF0STREAM11_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM11_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM11_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream12_streamind
-#define ixAZF0STREAM12_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM12_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM12_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream13_streamind
-#define ixAZF0STREAM13_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM13_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM13_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream14_streamind
-#define ixAZF0STREAM14_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM14_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM14_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0stream15_streamind
-#define ixAZF0STREAM15_AZALIA_FIFO_SIZE_CONTROL_DEFAULT 0x00203004
-#define ixAZF0STREAM15_AZALIA_LATENCY_COUNTER_CONTROL_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_WORSTCASE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_CUMULATIVE_LATENCY_COUNT_DEFAULT 0x00000000
-#define ixAZF0STREAM15_AZALIA_CUMULATIVE_REQUEST_COUNT_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint0_endpointind
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint1_endpointind
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint2_endpointind
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT2_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint3_endpointind
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT3_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint4_endpointind
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT4_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint5_endpointind
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT5_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint6_endpointind
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT6_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0endpoint7_endpointind
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000221
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00300000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400380
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000094
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0_DEFAULT 0x07010701
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18560010
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0xffffffff
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLE_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_ENABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_DISABLED_INT_STATUS_DEFAULT 0x00000000
-#define ixAZF0ENDPOINT7_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint0_inputendpointind
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint1_inputendpointind
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint2_inputendpointind
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint3_inputendpointind
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint4_inputendpointind
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint5_inputendpointind
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint6_inputendpointind
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: azf0inputendpoint7_inputendpointind
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00100301
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00020070
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00400280
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x000000a4
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE_DEFAULT 0x7fffffff
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_DEFAULT 0x00000001
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL_DEFAULT 0x00000010
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x18d600f0
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000000
-#define ixAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-
-
-// addressBlock: f2codecind
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE_DEFAULT 0x00000003
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2_DEFAULT 0x00000001
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3_DEFAULT 0x000000aa
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE_DEFAULT 0x000000b4
-#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000040
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x00000010
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3_DEFAULT 0x00000056
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4_DEFAULT 0x00000018
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7_DEFAULT 0x00000000
-#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_DEFAULT 0x00000020
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_DEFAULT 0x000000f0
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3_DEFAULT 0x000000d6
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4_DEFAULT 0x00000018
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL_DEFAULT 0x00000010
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DEFAULT 0x00000000
-
-
-// addressBlock: descriptorind
-#define ixAUDIO_DESCRIPTOR0_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR1_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR2_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR3_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR4_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR5_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR6_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR7_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR8_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR9_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR10_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR11_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR12_DEFAULT 0x00000000
-#define ixAUDIO_DESCRIPTOR13_DEFAULT 0x00000000
-
-
-// addressBlock: sinkinfoind
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0_DEFAULT 0x00000000
-#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION0_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION1_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION2_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION3_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION4_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION5_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION6_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION7_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION8_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION9_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION10_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION11_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION12_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION13_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION14_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION15_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION16_DEFAULT 0x00000000
-#define ixSINK_DESCRIPTION17_DEFAULT 0x00000000
-
-
-// addressBlock: azinputcrc0resultind
-#define ixAZALIA_INPUT_CRC0_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC0_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azinputcrc1resultind
-#define ixAZALIA_INPUT_CRC1_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_INPUT_CRC1_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azcrc0resultind
-#define ixAZALIA_CRC0_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_CRC0_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: azcrc1resultind
-#define ixAZALIA_CRC1_CHANNEL0_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL1_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL2_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL3_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL4_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL5_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL6_DEFAULT 0x00000000
-#define ixAZALIA_CRC1_CHANNEL7_DEFAULT 0x00000000
-
-
-// addressBlock: vgaseqind
-#define ixSEQ00_DEFAULT 0x00000003
-#define ixSEQ01_DEFAULT 0x00000021
-#define ixSEQ02_DEFAULT 0x00000000
-#define ixSEQ03_DEFAULT 0x00000000
-#define ixSEQ04_DEFAULT 0x00000000
-
-
-// addressBlock: vgacrtind
-#define ixCRT00_DEFAULT 0x00000000
-#define ixCRT01_DEFAULT 0x00000000
-#define ixCRT02_DEFAULT 0x00000000
-#define ixCRT03_DEFAULT 0x00000000
-#define ixCRT04_DEFAULT 0x00000000
-#define ixCRT05_DEFAULT 0x00000000
-#define ixCRT06_DEFAULT 0x00000000
-#define ixCRT07_DEFAULT 0x00000000
-#define ixCRT08_DEFAULT 0x00000000
-#define ixCRT09_DEFAULT 0x00000000
-#define ixCRT0A_DEFAULT 0x00000000
-#define ixCRT0B_DEFAULT 0x00000000
-#define ixCRT0C_DEFAULT 0x00000000
-#define ixCRT0D_DEFAULT 0x00000000
-#define ixCRT0E_DEFAULT 0x00000000
-#define ixCRT0F_DEFAULT 0x00000000
-#define ixCRT10_DEFAULT 0x00000000
-#define ixCRT11_DEFAULT 0x00000000
-#define ixCRT12_DEFAULT 0x00000000
-#define ixCRT13_DEFAULT 0x00000000
-#define ixCRT14_DEFAULT 0x00000000
-#define ixCRT15_DEFAULT 0x00000000
-#define ixCRT16_DEFAULT 0x00000000
-#define ixCRT17_DEFAULT 0x00000000
-#define ixCRT18_DEFAULT 0x00000000
-#define ixCRT1E_DEFAULT 0x00000000
-#define ixCRT1F_DEFAULT 0x00000000
-#define ixCRT22_DEFAULT 0x00000000
-
-
-// addressBlock: vgagrphind
-#define ixGRA00_DEFAULT 0x00000000
-#define ixGRA01_DEFAULT 0x00000000
-#define ixGRA02_DEFAULT 0x00000000
-#define ixGRA03_DEFAULT 0x00000000
-#define ixGRA04_DEFAULT 0x00000000
-#define ixGRA05_DEFAULT 0x00000000
-#define ixGRA06_DEFAULT 0x00000000
-#define ixGRA07_DEFAULT 0x00000000
-#define ixGRA08_DEFAULT 0x00000000
-
-
-// addressBlock: vgaattrind
-#define ixATTR00_DEFAULT 0x00000000
-#define ixATTR01_DEFAULT 0x00000000
-#define ixATTR02_DEFAULT 0x00000000
-#define ixATTR03_DEFAULT 0x00000000
-#define ixATTR04_DEFAULT 0x00000000
-#define ixATTR05_DEFAULT 0x00000000
-#define ixATTR06_DEFAULT 0x00000000
-#define ixATTR07_DEFAULT 0x00000000
-#define ixATTR08_DEFAULT 0x00000000
-#define ixATTR09_DEFAULT 0x00000000
-#define ixATTR0A_DEFAULT 0x00000000
-#define ixATTR0B_DEFAULT 0x00000000
-#define ixATTR0C_DEFAULT 0x00000000
-#define ixATTR0D_DEFAULT 0x00000000
-#define ixATTR0E_DEFAULT 0x00000000
-#define ixATTR0F_DEFAULT 0x00000000
-#define ixATTR10_DEFAULT 0x00000000
-#define ixATTR11_DEFAULT 0x00000000
-#define ixATTR12_DEFAULT 0x00000000
-#define ixATTR13_DEFAULT 0x00000000
-#define ixATTR14_DEFAULT 0x00000000
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h
deleted file mode 100644
index 864690c..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _hdp_4_0_DEFAULT_HEADER
-#define _hdp_4_0_DEFAULT_HEADER
-
-
-// addressBlock: hdp_hdpdec
-#define mmHDP_MMHUB_TLVL_DEFAULT 0x00006666
-#define mmHDP_MMHUB_UNITID_DEFAULT 0x00000000
-#define mmHDP_NONSURFACE_BASE_DEFAULT 0x00000000
-#define mmHDP_NONSURFACE_INFO_DEFAULT 0x00000000
-#define mmHDP_NONSURFACE_BASE_HI_DEFAULT 0x00000000
-#define mmHDP_NONSURF_FLAGS_DEFAULT 0x00000000
-#define mmHDP_NONSURF_FLAGS_CLR_DEFAULT 0x00000000
-#define mmHDP_HOST_PATH_CNTL_DEFAULT 0x00680000
-#define mmHDP_SW_SEMAPHORE_DEFAULT 0x00000000
-#define mmHDP_DEBUG0_DEFAULT 0x00000000
-#define mmHDP_LAST_SURFACE_HIT_DEFAULT 0x00000003
-#define mmHDP_READ_CACHE_INVALIDATE_DEFAULT 0x00000000
-#define mmHDP_OUTSTANDING_REQ_DEFAULT 0x00000000
-#define mmHDP_MISC_CNTL_DEFAULT 0x2d200861
-#define mmHDP_MEM_POWER_LS_DEFAULT 0x00000901
-#define mmHDP_MMHUB_CNTL_DEFAULT 0x00000000
-#define mmHDP_EDC_CNT_DEFAULT 0x00000000
-#define mmHDP_VERSION_DEFAULT 0x00000400
-#define mmHDP_CLK_CNTL_DEFAULT 0x0000000f
-#define mmHDP_MEMIO_CNTL_DEFAULT 0x00000000
-#define mmHDP_MEMIO_ADDR_DEFAULT 0x00000000
-#define mmHDP_MEMIO_STATUS_DEFAULT 0x00000000
-#define mmHDP_MEMIO_WR_DATA_DEFAULT 0x00000000
-#define mmHDP_MEMIO_RD_DATA_DEFAULT 0xdeadbeef
-#define mmHDP_XDP_DIRECT2HDP_FIRST_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_FLUSH_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_BAR_UPDATE_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_3_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_4_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_5_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_6_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_7_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_8_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_9_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_10_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_11_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_12_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_13_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_14_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_15_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_16_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_17_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_18_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_19_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_20_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_21_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_22_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_23_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_24_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_25_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_26_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_27_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_28_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_29_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_30_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_31_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_32_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_33_DEFAULT 0x00000000
-#define mmHDP_XDP_D2H_RSVD_34_DEFAULT 0x00000000
-#define mmHDP_XDP_DIRECT2HDP_LAST_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR_CFG_DEFAULT 0x0000000f
-#define mmHDP_XDP_P2P_MBX_OFFSET_DEFAULT 0x000011bc
-#define mmHDP_XDP_P2P_MBX_ADDR0_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR1_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR2_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR3_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR4_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR5_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_MBX_ADDR6_DEFAULT 0x00000000
-#define mmHDP_XDP_HDP_MBX_MC_CFG_DEFAULT 0x00000000
-#define mmHDP_XDP_HDP_MC_CFG_DEFAULT 0x00020000
-#define mmHDP_XDP_HST_CFG_DEFAULT 0x0000001b
-#define mmHDP_XDP_HDP_IPH_CFG_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR0_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR1_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR2_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR3_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR4_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR5_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR6_DEFAULT 0x00000000
-#define mmHDP_XDP_P2P_BAR7_DEFAULT 0x00000000
-#define mmHDP_XDP_FLUSH_ARMED_STS_DEFAULT 0x00000000
-#define mmHDP_XDP_FLUSH_CNTR0_STS_DEFAULT 0x00000000
-#define mmHDP_XDP_BUSY_STS_DEFAULT 0x00000000
-#define mmHDP_XDP_STICKY_DEFAULT 0x00000000
-#define mmHDP_XDP_CHKN_DEFAULT 0x48584450
-#define mmHDP_XDP_BARS_ADDR_39_36_DEFAULT 0x00000000
-#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000
-#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmHDP_XDP_MMHUB_ERROR_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h
deleted file mode 100644
index fbad771..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _hdp_4_0_OFFSET_HEADER
-#define _hdp_4_0_OFFSET_HEADER
-
-
-
-// addressBlock: hdp_hdpdec
-// base address: 0x3c80
-#define mmHDP_MMHUB_TLVL 0x0000
-#define mmHDP_MMHUB_TLVL_BASE_IDX 0
-#define mmHDP_MMHUB_UNITID 0x0001
-#define mmHDP_MMHUB_UNITID_BASE_IDX 0
-#define mmHDP_NONSURFACE_BASE 0x0040
-#define mmHDP_NONSURFACE_BASE_BASE_IDX 0
-#define mmHDP_NONSURFACE_INFO 0x0041
-#define mmHDP_NONSURFACE_INFO_BASE_IDX 0
-#define mmHDP_NONSURFACE_BASE_HI 0x0042
-#define mmHDP_NONSURFACE_BASE_HI_BASE_IDX 0
-#define mmHDP_NONSURF_FLAGS 0x00c8
-#define mmHDP_NONSURF_FLAGS_BASE_IDX 0
-#define mmHDP_NONSURF_FLAGS_CLR 0x00c9
-#define mmHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
-#define mmHDP_HOST_PATH_CNTL 0x00cc
-#define mmHDP_HOST_PATH_CNTL_BASE_IDX 0
-#define mmHDP_SW_SEMAPHORE 0x00cd
-#define mmHDP_SW_SEMAPHORE_BASE_IDX 0
-#define mmHDP_DEBUG0 0x00ce
-#define mmHDP_DEBUG0_BASE_IDX 0
-#define mmHDP_LAST_SURFACE_HIT 0x00d0
-#define mmHDP_LAST_SURFACE_HIT_BASE_IDX 0
-#define mmHDP_READ_CACHE_INVALIDATE 0x00d1
-#define mmHDP_READ_CACHE_INVALIDATE_BASE_IDX 0
-#define mmHDP_OUTSTANDING_REQ 0x00d2
-#define mmHDP_OUTSTANDING_REQ_BASE_IDX 0
-#define mmHDP_MISC_CNTL 0x00d3
-#define mmHDP_MISC_CNTL_BASE_IDX 0
-#define mmHDP_MEM_POWER_LS 0x00d4
-#define mmHDP_MEM_POWER_LS_BASE_IDX 0
-#define mmHDP_MMHUB_CNTL 0x00d5
-#define mmHDP_MMHUB_CNTL_BASE_IDX 0
-#define mmHDP_EDC_CNT 0x00d6
-#define mmHDP_EDC_CNT_BASE_IDX 0
-#define mmHDP_VERSION 0x00d7
-#define mmHDP_VERSION_BASE_IDX 0
-#define mmHDP_CLK_CNTL 0x00d8
-#define mmHDP_CLK_CNTL_BASE_IDX 0
-#define mmHDP_MEMIO_CNTL 0x00f6
-#define mmHDP_MEMIO_CNTL_BASE_IDX 0
-#define mmHDP_MEMIO_ADDR 0x00f7
-#define mmHDP_MEMIO_ADDR_BASE_IDX 0
-#define mmHDP_MEMIO_STATUS 0x00f8
-#define mmHDP_MEMIO_STATUS_BASE_IDX 0
-#define mmHDP_MEMIO_WR_DATA 0x00f9
-#define mmHDP_MEMIO_WR_DATA_BASE_IDX 0
-#define mmHDP_MEMIO_RD_DATA 0x00fa
-#define mmHDP_MEMIO_RD_DATA_BASE_IDX 0
-#define mmHDP_XDP_DIRECT2HDP_FIRST 0x0100
-#define mmHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
-#define mmHDP_XDP_D2H_FLUSH 0x0101
-#define mmHDP_XDP_D2H_FLUSH_BASE_IDX 0
-#define mmHDP_XDP_D2H_BAR_UPDATE 0x0102
-#define mmHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_3 0x0103
-#define mmHDP_XDP_D2H_RSVD_3_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_4 0x0104
-#define mmHDP_XDP_D2H_RSVD_4_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_5 0x0105
-#define mmHDP_XDP_D2H_RSVD_5_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_6 0x0106
-#define mmHDP_XDP_D2H_RSVD_6_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_7 0x0107
-#define mmHDP_XDP_D2H_RSVD_7_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_8 0x0108
-#define mmHDP_XDP_D2H_RSVD_8_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_9 0x0109
-#define mmHDP_XDP_D2H_RSVD_9_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_10 0x010a
-#define mmHDP_XDP_D2H_RSVD_10_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_11 0x010b
-#define mmHDP_XDP_D2H_RSVD_11_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_12 0x010c
-#define mmHDP_XDP_D2H_RSVD_12_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_13 0x010d
-#define mmHDP_XDP_D2H_RSVD_13_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_14 0x010e
-#define mmHDP_XDP_D2H_RSVD_14_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_15 0x010f
-#define mmHDP_XDP_D2H_RSVD_15_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_16 0x0110
-#define mmHDP_XDP_D2H_RSVD_16_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_17 0x0111
-#define mmHDP_XDP_D2H_RSVD_17_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_18 0x0112
-#define mmHDP_XDP_D2H_RSVD_18_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_19 0x0113
-#define mmHDP_XDP_D2H_RSVD_19_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_20 0x0114
-#define mmHDP_XDP_D2H_RSVD_20_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_21 0x0115
-#define mmHDP_XDP_D2H_RSVD_21_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_22 0x0116
-#define mmHDP_XDP_D2H_RSVD_22_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_23 0x0117
-#define mmHDP_XDP_D2H_RSVD_23_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_24 0x0118
-#define mmHDP_XDP_D2H_RSVD_24_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_25 0x0119
-#define mmHDP_XDP_D2H_RSVD_25_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_26 0x011a
-#define mmHDP_XDP_D2H_RSVD_26_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_27 0x011b
-#define mmHDP_XDP_D2H_RSVD_27_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_28 0x011c
-#define mmHDP_XDP_D2H_RSVD_28_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_29 0x011d
-#define mmHDP_XDP_D2H_RSVD_29_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_30 0x011e
-#define mmHDP_XDP_D2H_RSVD_30_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_31 0x011f
-#define mmHDP_XDP_D2H_RSVD_31_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_32 0x0120
-#define mmHDP_XDP_D2H_RSVD_32_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_33 0x0121
-#define mmHDP_XDP_D2H_RSVD_33_BASE_IDX 0
-#define mmHDP_XDP_D2H_RSVD_34 0x0122
-#define mmHDP_XDP_D2H_RSVD_34_BASE_IDX 0
-#define mmHDP_XDP_DIRECT2HDP_LAST 0x0123
-#define mmHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR_CFG 0x0124
-#define mmHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_OFFSET 0x0125
-#define mmHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR0 0x0126
-#define mmHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR1 0x0127
-#define mmHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR2 0x0128
-#define mmHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR3 0x0129
-#define mmHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR4 0x012a
-#define mmHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR5 0x012b
-#define mmHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
-#define mmHDP_XDP_P2P_MBX_ADDR6 0x012c
-#define mmHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
-#define mmHDP_XDP_HDP_MBX_MC_CFG 0x012d
-#define mmHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
-#define mmHDP_XDP_HDP_MC_CFG 0x012e
-#define mmHDP_XDP_HDP_MC_CFG_BASE_IDX 0
-#define mmHDP_XDP_HST_CFG 0x012f
-#define mmHDP_XDP_HST_CFG_BASE_IDX 0
-#define mmHDP_XDP_HDP_IPH_CFG 0x0131
-#define mmHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR0 0x0134
-#define mmHDP_XDP_P2P_BAR0_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR1 0x0135
-#define mmHDP_XDP_P2P_BAR1_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR2 0x0136
-#define mmHDP_XDP_P2P_BAR2_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR3 0x0137
-#define mmHDP_XDP_P2P_BAR3_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR4 0x0138
-#define mmHDP_XDP_P2P_BAR4_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR5 0x0139
-#define mmHDP_XDP_P2P_BAR5_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR6 0x013a
-#define mmHDP_XDP_P2P_BAR6_BASE_IDX 0
-#define mmHDP_XDP_P2P_BAR7 0x013b
-#define mmHDP_XDP_P2P_BAR7_BASE_IDX 0
-#define mmHDP_XDP_FLUSH_ARMED_STS 0x013c
-#define mmHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
-#define mmHDP_XDP_FLUSH_CNTR0_STS 0x013d
-#define mmHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
-#define mmHDP_XDP_BUSY_STS 0x013e
-#define mmHDP_XDP_BUSY_STS_BASE_IDX 0
-#define mmHDP_XDP_STICKY 0x013f
-#define mmHDP_XDP_STICKY_BASE_IDX 0
-#define mmHDP_XDP_CHKN 0x0140
-#define mmHDP_XDP_CHKN_BASE_IDX 0
-#define mmHDP_XDP_BARS_ADDR_39_36 0x0144
-#define mmHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
-#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
-#define mmHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
-#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
-#define mmHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
-#define mmHDP_XDP_MMHUB_ERROR 0x0149
-#define mmHDP_XDP_MMHUB_ERROR_BASE_IDX 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h
deleted file mode 100644
index 5861875..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h
+++ /dev/null
@@ -1,601 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _hdp_4_0_SH_MASK_HEADER
-#define _hdp_4_0_SH_MASK_HEADER
-
-
-// addressBlock: hdp_hdpdec
-//HDP_MMHUB_TLVL
-#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
-#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
-#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
-#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
-#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
-#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x00000007L
-#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x00000070L
-#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000700L
-#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x00007000L
-#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x00070000L
-//HDP_MMHUB_UNITID
-#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
-#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
-#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
-#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
-#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
-#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
-//HDP_NONSURFACE_BASE
-#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
-#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
-//HDP_NONSURFACE_INFO
-#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
-#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
-#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
-#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
-//HDP_NONSURFACE_BASE_HI
-#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
-#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
-//HDP_NONSURF_FLAGS
-#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
-#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
-#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
-#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
-//HDP_NONSURF_FLAGS_CLR
-#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
-#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
-#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
-#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
-//HDP_HOST_PATH_CNTL
-#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
-#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
-#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
-#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS__SHIFT 0x1e
-#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS__SHIFT 0x1f
-#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
-#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
-#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
-#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
-#define HDP_HOST_PATH_CNTL__WRITE_THROUGH_CACHE_DIS_MASK 0x40000000L
-#define HDP_HOST_PATH_CNTL__LIN_RD_CACHE_DIS_MASK 0x80000000L
-//HDP_SW_SEMAPHORE
-#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
-#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
-//HDP_DEBUG0
-#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
-#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
-//HDP_LAST_SURFACE_HIT
-#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
-#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
-//HDP_READ_CACHE_INVALIDATE
-#define HDP_READ_CACHE_INVALIDATE__READ_CACHE_INVALIDATE__SHIFT 0x0
-#define HDP_READ_CACHE_INVALIDATE__READ_CACHE_INVALIDATE_MASK 0x00000001L
-//HDP_OUTSTANDING_REQ
-#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
-#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
-#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
-#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
-//HDP_MISC_CNTL
-#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE__SHIFT 0x0
-#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
-#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
-#define HDP_MISC_CNTL__MULTIPLE_READS__SHIFT 0x6
-#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
-#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
-#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0x17
-#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
-#define HDP_MISC_CNTL__ALL_FUNCTION_CACHELINE_INVALID__SHIFT 0x19
-#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1a
-#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1b
-#define HDP_MISC_CNTL__VARIABLE_CACHELINE_SIZE__SHIFT 0x1c
-#define HDP_MISC_CNTL__ADAPTIVE_CACHELINE_SIZE__SHIFT 0x1d
-#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
-#define HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK 0x00000001L
-#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
-#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
-#define HDP_MISC_CNTL__MULTIPLE_READS_MASK 0x00000040L
-#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
-#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
-#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00800000L
-#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
-#define HDP_MISC_CNTL__ALL_FUNCTION_CACHELINE_INVALID_MASK 0x02000000L
-#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x04000000L
-#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x08000000L
-#define HDP_MISC_CNTL__VARIABLE_CACHELINE_SIZE_MASK 0x10000000L
-#define HDP_MISC_CNTL__ADAPTIVE_CACHELINE_SIZE_MASK 0x20000000L
-#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
-//HDP_MEM_POWER_LS
-#define HDP_MEM_POWER_LS__LS_ENABLE__SHIFT 0x0
-#define HDP_MEM_POWER_LS__LS_HOLD__SHIFT 0x7
-#define HDP_MEM_POWER_LS__LS_ENABLE_MASK 0x00000001L
-#define HDP_MEM_POWER_LS__LS_HOLD_MASK 0x00001F80L
-//HDP_MMHUB_CNTL
-#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
-#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
-#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
-#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
-#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
-#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
-//HDP_EDC_CNT
-#define HDP_EDC_CNT__MEM0_SED_COUNT__SHIFT 0x0
-#define HDP_EDC_CNT__MEM1_SED_COUNT__SHIFT 0x2
-#define HDP_EDC_CNT__MEM0_SED_COUNT_MASK 0x00000003L
-#define HDP_EDC_CNT__MEM1_SED_COUNT_MASK 0x0000000CL
-//HDP_VERSION
-#define HDP_VERSION__MINVER__SHIFT 0x0
-#define HDP_VERSION__MAJVER__SHIFT 0x8
-#define HDP_VERSION__REV__SHIFT 0x10
-#define HDP_VERSION__MINVER_MASK 0x000000FFL
-#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
-#define HDP_VERSION__REV_MASK 0x00FF0000L
-//HDP_CLK_CNTL
-#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
-#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK__SHIFT 0x4
-#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
-#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
-#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
-#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
-#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
-#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK_MASK 0x00000010L
-#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
-#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
-#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
-#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
-//HDP_MEMIO_CNTL
-#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
-#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
-#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
-#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
-#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
-#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
-#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
-#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
-#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
-#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
-#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
-#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
-#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
-#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
-#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
-#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
-#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
-#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
-#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
-#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
-//HDP_MEMIO_ADDR
-#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
-#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
-//HDP_MEMIO_STATUS
-#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
-#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
-#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
-#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
-#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
-#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
-#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
-#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
-//HDP_MEMIO_WR_DATA
-#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
-#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
-//HDP_MEMIO_RD_DATA
-#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
-#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
-//HDP_XDP_DIRECT2HDP_FIRST
-#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
-#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_FLUSH
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
-#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
-//HDP_XDP_D2H_BAR_UPDATE
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
-#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
-//HDP_XDP_D2H_RSVD_3
-#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_4
-#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_5
-#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_6
-#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_7
-#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_8
-#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_9
-#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_10
-#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_11
-#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_12
-#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_13
-#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_14
-#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_15
-#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_16
-#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_17
-#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_18
-#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_19
-#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_20
-#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_21
-#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_22
-#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_23
-#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_24
-#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_25
-#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_26
-#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_27
-#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_28
-#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_29
-#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_30
-#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_31
-#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_32
-#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_33
-#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_D2H_RSVD_34
-#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
-#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_DIRECT2HDP_LAST
-#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
-#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
-//HDP_XDP_P2P_BAR_CFG
-#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
-#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
-#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
-#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
-//HDP_XDP_P2P_MBX_OFFSET
-#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
-//HDP_XDP_P2P_MBX_ADDR0
-#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR1
-#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR2
-#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR3
-#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR4
-#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR5
-#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_P2P_MBX_ADDR6
-#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
-#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
-//HDP_XDP_HDP_MBX_MC_CFG
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
-#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
-//HDP_XDP_HDP_MC_CFG
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
-#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
-//HDP_XDP_HST_CFG
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
-#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
-#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
-#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
-//HDP_XDP_HDP_IPH_CFG
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x0
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x6
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003FL
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000FC0L
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
-#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
-//HDP_XDP_P2P_BAR0
-#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR1
-#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR2
-#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR3
-#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR4
-#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR5
-#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR6
-#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
-//HDP_XDP_P2P_BAR7
-#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
-#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
-#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
-#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
-#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
-#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
-//HDP_XDP_FLUSH_ARMED_STS
-#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
-#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
-//HDP_XDP_FLUSH_CNTR0_STS
-#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
-#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
-//HDP_XDP_BUSY_STS
-#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x0
-#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x0003FFFFL
-//HDP_XDP_STICKY
-#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
-#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
-#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
-#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
-//HDP_XDP_CHKN
-#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
-#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
-#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
-#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
-#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
-#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
-#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
-#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
-//HDP_XDP_BARS_ADDR_39_36
-#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
-#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
-#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
-#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
-#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
-#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
-#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
-#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
-#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
-#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
-#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
-#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
-#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
-#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
-#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
-#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
-//HDP_XDP_MC_VM_FB_LOCATION_BASE
-#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
-#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
-//HDP_XDP_GPU_IOV_VIOLATION_LOG
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
-#define HDP_XDP_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
-//HDP_XDP_MMHUB_ERROR
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
-#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
-#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
-#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
-#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
-#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h
deleted file mode 100644
index 98ba7d8..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mp_9_0_DEFAULT_HEADER
-#define _mp_9_0_DEFAULT_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-#define mmMP0_SMN_C2PMSG_32_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_33_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_34_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_35_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_36_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_37_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_38_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_39_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_40_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_41_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_42_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_43_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_44_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_45_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_46_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_47_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_48_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_49_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_50_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_51_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_52_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_53_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_54_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_55_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_56_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_57_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_58_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_59_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_60_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_61_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_62_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_63_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_64_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_65_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_66_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_67_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_68_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_69_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_70_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_71_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_72_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_73_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_74_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_75_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_76_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_77_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_78_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_79_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_80_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_81_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_82_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_83_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_84_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_85_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_86_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_87_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_88_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_89_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_90_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_91_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_92_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_93_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_94_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_95_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_96_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_97_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_98_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_99_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_100_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_101_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_102_DEFAULT 0x00000000
-#define mmMP0_SMN_C2PMSG_103_DEFAULT 0x00000000
-#define mmMP0_SMN_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmMP0_SMN_IH_CREDIT_DEFAULT 0x00000000
-#define mmMP0_SMN_IH_SW_INT_DEFAULT 0x00000000
-#define mmMP0_SMN_IH_SW_INT_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-#define mmMP1_SMN_ACP2MP_RESP_DEFAULT 0x00000000
-#define mmMP1_SMN_DC2MP_RESP_DEFAULT 0x00000000
-#define mmMP1_SMN_UVD2MP_RESP_DEFAULT 0x00000000
-#define mmMP1_SMN_VCE2MP_RESP_DEFAULT 0x00000000
-#define mmMP1_SMN_RLC2MP_RESP_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_32_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_33_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_34_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_35_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_36_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_37_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_38_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_39_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_40_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_41_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_42_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_43_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_44_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_45_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_46_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_47_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_48_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_49_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_50_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_51_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_52_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_53_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_54_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_55_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_56_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_57_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_58_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_59_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_60_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_61_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_62_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_63_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_64_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_65_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_66_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_67_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_68_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_69_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_70_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_71_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_72_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_73_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_74_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_75_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_76_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_77_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_78_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_79_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_80_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_81_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_82_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_83_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_84_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_85_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_86_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_87_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_88_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_89_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_90_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_91_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_92_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_93_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_94_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_95_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_96_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_97_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_98_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_99_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_100_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_101_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_102_DEFAULT 0x00000000
-#define mmMP1_SMN_C2PMSG_103_DEFAULT 0x00000000
-#define mmMP1_SMN_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmMP1_SMN_IH_CREDIT_DEFAULT 0x00000000
-#define mmMP1_SMN_IH_SW_INT_DEFAULT 0x00000000
-#define mmMP1_SMN_IH_SW_INT_CTRL_DEFAULT 0x00000000
-#define mmMP1_SMN_FPS_CNT_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH0_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH1_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH2_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH3_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH4_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH5_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH6_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH7_DEFAULT 0x00000000
-#define mmMP1_SMN_EXT_SCRATCH8_DEFAULT 0x00000000
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-#define mmMP1_SMN_PUB_CTRL_DEFAULT 0x00000001
-#define smnMP1_FIRMWARE_FLAGS_DEFAULT 0x00000000
-#define smnMP1_PUB_SCRATCH0_DEFAULT 0x00000000
-#define smnMP1_PUB_SCRATCH1_DEFAULT 0x00000000
-#define smnMP1_PUB_SCRATCH2_DEFAULT 0x00000000
-#define smnMP1_PUB_SCRATCH3_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_0_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_1_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_2_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_3_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_4_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_5_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_6_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_7_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_8_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_9_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_10_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_11_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_12_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_13_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_14_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_15_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_16_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_17_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_18_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_19_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_20_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_21_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_22_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_23_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_24_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_25_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_26_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_27_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_28_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_29_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_30_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_31_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_0_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_1_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_2_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_3_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_INTEN_DEFAULT 0x00000000
-#define smnMP1_P2CMSG_INTSTS_DEFAULT 0x00000000
-#define smnMP1_P2SMSG_0_DEFAULT 0x00000000
-#define smnMP1_P2SMSG_1_DEFAULT 0x00000000
-#define smnMP1_P2SMSG_2_DEFAULT 0x00000000
-#define smnMP1_P2SMSG_3_DEFAULT 0x00000000
-#define smnMP1_P2SMSG_INTSTS_DEFAULT 0x00000000
-#define smnMP1_S2PMSG_0_DEFAULT 0x00000000
-#define smnMP1_ACP2MP_RESP_DEFAULT 0x00000000
-#define smnMP1_DC2MP_RESP_DEFAULT 0x00000000
-#define smnMP1_UVD2MP_RESP_DEFAULT 0x00000000
-#define smnMP1_VCE2MP_RESP_DEFAULT 0x00000000
-#define smnMP1_RLC2MP_RESP_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_32_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_33_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_34_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_35_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_36_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_37_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_38_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_39_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_40_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_41_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_42_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_43_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_44_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_45_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_46_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_47_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_48_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_49_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_50_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_51_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_52_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_53_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_54_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_55_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_56_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_57_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_58_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_59_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_60_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_61_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_62_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_63_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_64_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_65_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_66_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_67_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_68_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_69_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_70_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_71_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_72_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_73_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_74_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_75_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_76_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_77_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_78_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_79_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_80_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_81_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_82_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_83_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_84_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_85_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_86_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_87_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_88_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_89_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_90_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_91_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_92_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_93_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_94_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_95_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_96_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_97_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_98_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_99_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_100_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_101_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_102_DEFAULT 0x00000000
-#define smnMP1_C2PMSG_103_DEFAULT 0x00000000
-#define smnMP1_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define smnMP1_IH_CREDIT_DEFAULT 0x00000000
-#define smnMP1_IH_SW_INT_DEFAULT 0x00000000
-#define smnMP1_IH_SW_INT_CTRL_DEFAULT 0x00000000
-#define smnMP1_FPS_CNT_DEFAULT 0x00000000
-#define smnMP1_PUB_CTRL_DEFAULT 0x00000001
-#define smnMP1_EXT_SCRATCH0_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH1_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH2_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH3_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH4_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH5_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH6_DEFAULT 0x00000000
-#define smnMP1_EXT_SCRATCH7_DEFAULT 0x00000000
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h
deleted file mode 100644
index 621e880..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mp_9_0_OFFSET_HEADER
-#define _mp_9_0_OFFSET_HEADER
-
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-// base address: 0x0
-#define mmMP0_SMN_C2PMSG_32 0x0060
-#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_33 0x0061
-#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_34 0x0062
-#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_35 0x0063
-#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_36 0x0064
-#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_37 0x0065
-#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_38 0x0066
-#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_39 0x0067
-#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_40 0x0068
-#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_41 0x0069
-#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_42 0x006a
-#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_43 0x006b
-#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_44 0x006c
-#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_45 0x006d
-#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_46 0x006e
-#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_47 0x006f
-#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_48 0x0070
-#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_49 0x0071
-#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_50 0x0072
-#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_51 0x0073
-#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_52 0x0074
-#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_53 0x0075
-#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_54 0x0076
-#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_55 0x0077
-#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_56 0x0078
-#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_57 0x0079
-#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_58 0x007a
-#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_59 0x007b
-#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_60 0x007c
-#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_61 0x007d
-#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_62 0x007e
-#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_63 0x007f
-#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_64 0x0080
-#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_65 0x0081
-#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_66 0x0082
-#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_67 0x0083
-#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_68 0x0084
-#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_69 0x0085
-#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_70 0x0086
-#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_71 0x0087
-#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_72 0x0088
-#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_73 0x0089
-#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_74 0x008a
-#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_75 0x008b
-#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_76 0x008c
-#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_77 0x008d
-#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_78 0x008e
-#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_79 0x008f
-#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_80 0x0090
-#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_81 0x0091
-#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_82 0x0092
-#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_83 0x0093
-#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_84 0x0094
-#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_85 0x0095
-#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_86 0x0096
-#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_87 0x0097
-#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_88 0x0098
-#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_89 0x0099
-#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_90 0x009a
-#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_91 0x009b
-#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_92 0x009c
-#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_93 0x009d
-#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_94 0x009e
-#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_95 0x009f
-#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_96 0x00a0
-#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_97 0x00a1
-#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_98 0x00a2
-#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_99 0x00a3
-#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_100 0x00a4
-#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_101 0x00a5
-#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_102 0x00a6
-#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0
-#define mmMP0_SMN_C2PMSG_103 0x00a7
-#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0
-#define mmMP0_SMN_ACTIVE_FCN_ID 0x00c0
-#define mmMP0_SMN_ACTIVE_FCN_ID_BASE_IDX 0
-#define mmMP0_SMN_IH_CREDIT 0x00c1
-#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0
-#define mmMP0_SMN_IH_SW_INT 0x00c2
-#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0
-#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3
-#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-// base address: 0x0
-#define mmMP1_SMN_ACP2MP_RESP 0x0240
-#define mmMP1_SMN_ACP2MP_RESP_BASE_IDX 0
-#define mmMP1_SMN_DC2MP_RESP 0x0241
-#define mmMP1_SMN_DC2MP_RESP_BASE_IDX 0
-#define mmMP1_SMN_UVD2MP_RESP 0x0242
-#define mmMP1_SMN_UVD2MP_RESP_BASE_IDX 0
-#define mmMP1_SMN_VCE2MP_RESP 0x0243
-#define mmMP1_SMN_VCE2MP_RESP_BASE_IDX 0
-#define mmMP1_SMN_RLC2MP_RESP 0x0244
-#define mmMP1_SMN_RLC2MP_RESP_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_32 0x0260
-#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_33 0x0261
-#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_34 0x0262
-#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_35 0x0263
-#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_36 0x0264
-#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_37 0x0265
-#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_38 0x0266
-#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_39 0x0267
-#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_40 0x0268
-#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_41 0x0269
-#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_42 0x026a
-#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_43 0x026b
-#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_44 0x026c
-#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_45 0x026d
-#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_46 0x026e
-#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_47 0x026f
-#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_48 0x0270
-#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_49 0x0271
-#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_50 0x0272
-#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_51 0x0273
-#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_52 0x0274
-#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_53 0x0275
-#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_54 0x0276
-#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_55 0x0277
-#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_56 0x0278
-#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_57 0x0279
-#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_58 0x027a
-#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_59 0x027b
-#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_60 0x027c
-#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_61 0x027d
-#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_62 0x027e
-#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_63 0x027f
-#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_64 0x0280
-#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_65 0x0281
-#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_66 0x0282
-#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_67 0x0283
-#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_68 0x0284
-#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_69 0x0285
-#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_70 0x0286
-#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_71 0x0287
-#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_72 0x0288
-#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_73 0x0289
-#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_74 0x028a
-#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_75 0x028b
-#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_76 0x028c
-#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_77 0x028d
-#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_78 0x028e
-#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_79 0x028f
-#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_80 0x0290
-#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_81 0x0291
-#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_82 0x0292
-#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_83 0x0293
-#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_84 0x0294
-#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_85 0x0295
-#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_86 0x0296
-#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_87 0x0297
-#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_88 0x0298
-#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_89 0x0299
-#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_90 0x029a
-#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_91 0x029b
-#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_92 0x029c
-#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_93 0x029d
-#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_94 0x029e
-#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_95 0x029f
-#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_96 0x02a0
-#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_97 0x02a1
-#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_98 0x02a2
-#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_99 0x02a3
-#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_100 0x02a4
-#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_101 0x02a5
-#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_102 0x02a6
-#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0
-#define mmMP1_SMN_C2PMSG_103 0x02a7
-#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0
-#define mmMP1_SMN_ACTIVE_FCN_ID 0x02c0
-#define mmMP1_SMN_ACTIVE_FCN_ID_BASE_IDX 0
-#define mmMP1_SMN_IH_CREDIT 0x02c1
-#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0
-#define mmMP1_SMN_IH_SW_INT 0x02c2
-#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0
-#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3
-#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
-#define mmMP1_SMN_FPS_CNT 0x02c4
-#define mmMP1_SMN_FPS_CNT_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH0 0x03c0
-#define mmMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH1 0x03c1
-#define mmMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH2 0x03c2
-#define mmMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH3 0x03c3
-#define mmMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH4 0x03c4
-#define mmMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH5 0x03c5
-#define mmMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH6 0x03c6
-#define mmMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH7 0x03c7
-#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
-#define mmMP1_SMN_EXT_SCRATCH8 0x03c8
-#define mmMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-// base address: 0x0
-#define mmMP1_SMN_PUB_CTRL 0x02c5
-#define mmMP1_SMN_PUB_CTRL_BASE_IDX 0
-
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h
deleted file mode 100644
index ae7b518..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h
+++ /dev/null
@@ -1,1463 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _mp_9_0_SH_MASK_HEADER
-#define _mp_9_0_SH_MASK_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-//MP0_SMN_C2PMSG_32
-#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_33
-#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_34
-#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_35
-#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_36
-#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_37
-#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_38
-#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_39
-#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_40
-#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_41
-#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_42
-#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_43
-#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_44
-#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_45
-#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_46
-#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_47
-#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_48
-#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_49
-#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_50
-#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_51
-#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_52
-#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_53
-#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_54
-#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_55
-#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_56
-#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_57
-#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_58
-#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_59
-#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_60
-#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_61
-#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_62
-#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_63
-#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_64
-#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_65
-#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_66
-#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_67
-#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_68
-#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_69
-#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_70
-#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_71
-#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_72
-#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_73
-#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_74
-#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_75
-#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_76
-#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_77
-#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_78
-#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_79
-#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_80
-#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_81
-#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_82
-#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_83
-#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_84
-#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_85
-#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_86
-#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_87
-#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_88
-#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_89
-#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_90
-#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_91
-#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_92
-#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_93
-#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_94
-#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_95
-#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_96
-#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_97
-#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_98
-#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_99
-#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_100
-#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_101
-#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_102
-#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_C2PMSG_103
-#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP0_SMN_ACTIVE_FCN_ID
-#define MP0_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define MP0_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define MP0_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define MP0_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//MP0_SMN_IH_CREDIT
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP0_SMN_IH_SW_INT
-#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x1
-#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000001L
-#define MP0_SMN_IH_SW_INT__ID_MASK 0x000001FEL
-//MP0_SMN_IH_SW_INT_CTRL
-#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
-#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
-#define MP0_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
-#define MP0_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-//MP1_SMN_ACP2MP_RESP
-#define MP1_SMN_ACP2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_SMN_ACP2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_DC2MP_RESP
-#define MP1_SMN_DC2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_SMN_DC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_UVD2MP_RESP
-#define MP1_SMN_UVD2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_SMN_UVD2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_VCE2MP_RESP
-#define MP1_SMN_VCE2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_SMN_VCE2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_RLC2MP_RESP
-#define MP1_SMN_RLC2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_SMN_RLC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_32
-#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_33
-#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_34
-#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_35
-#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_36
-#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_37
-#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_38
-#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_39
-#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_40
-#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_41
-#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_42
-#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_43
-#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_44
-#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_45
-#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_46
-#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_47
-#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_48
-#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_49
-#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_50
-#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_51
-#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_52
-#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_53
-#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_54
-#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_55
-#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_56
-#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_57
-#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_58
-#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_59
-#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_60
-#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_61
-#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_62
-#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_63
-#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_64
-#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_65
-#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_66
-#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_67
-#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_68
-#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_69
-#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_70
-#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_71
-#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_72
-#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_73
-#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_74
-#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_75
-#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_76
-#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_77
-#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_78
-#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_79
-#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_80
-#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_81
-#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_82
-#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_83
-#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_84
-#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_85
-#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_86
-#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_87
-#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_88
-#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_89
-#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_90
-#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_91
-#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_92
-#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_93
-#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_94
-#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_95
-#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_96
-#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_97
-#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_98
-#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_99
-#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_100
-#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_101
-#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_102
-#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_C2PMSG_103
-#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP1_SMN_ACTIVE_FCN_ID
-#define MP1_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define MP1_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define MP1_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define MP1_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//MP1_SMN_IH_CREDIT
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP1_SMN_IH_SW_INT
-#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x1
-#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000001L
-#define MP1_SMN_IH_SW_INT__ID_MASK 0x000001FEL
-//MP1_SMN_IH_SW_INT_CTRL
-#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK__SHIFT 0x0
-#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK__SHIFT 0x8
-#define MP1_SMN_IH_SW_INT_CTRL__SW_TRIG_MASK_MASK 0x00000001L
-#define MP1_SMN_IH_SW_INT_CTRL__SW_INT_ACK_MASK 0x00000100L
-//MP1_SMN_FPS_CNT
-#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
-#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH0
-#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH1
-#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH2
-#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH3
-#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH4
-#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH5
-#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH6
-#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH7
-#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH8
-#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
-#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
-
-
-
-
-// addressBlock: mp_SmuMp0Pub_CruDec
-//MP0_SOC_INFO
-#define MP0_SOC_INFO__SOC_DIE_ID__SHIFT 0x0
-#define MP0_SOC_INFO__SOC_PKG_TYPE__SHIFT 0x2
-#define MP0_SOC_INFO__SOC_DIE_ID_MASK 0x00000003L
-#define MP0_SOC_INFO__SOC_PKG_TYPE_MASK 0x0000001CL
-//MP0_PUB_SCRATCH0
-#define MP0_PUB_SCRATCH0__DATA__SHIFT 0x0
-#define MP0_PUB_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP0_PUB_SCRATCH1
-#define MP0_PUB_SCRATCH1__DATA__SHIFT 0x0
-#define MP0_PUB_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP0_PUB_SCRATCH2
-#define MP0_PUB_SCRATCH2__DATA__SHIFT 0x0
-#define MP0_PUB_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP0_PUB_SCRATCH3
-#define MP0_PUB_SCRATCH3__DATA__SHIFT 0x0
-#define MP0_PUB_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP0_FW_INTF
-#define MP0_FW_INTF__SS_SECURE__SHIFT 0x13
-#define MP0_FW_INTF__SS_SECURE_MASK 0x00080000L
-//MP0_C2PMSG_0
-#define MP0_C2PMSG_0__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_1
-#define MP0_C2PMSG_1__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_2
-#define MP0_C2PMSG_2__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_3
-#define MP0_C2PMSG_3__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_4
-#define MP0_C2PMSG_4__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_5
-#define MP0_C2PMSG_5__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_6
-#define MP0_C2PMSG_6__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_7
-#define MP0_C2PMSG_7__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_8
-#define MP0_C2PMSG_8__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_9
-#define MP0_C2PMSG_9__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_10
-#define MP0_C2PMSG_10__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_11
-#define MP0_C2PMSG_11__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_12
-#define MP0_C2PMSG_12__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_13
-#define MP0_C2PMSG_13__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_14
-#define MP0_C2PMSG_14__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_15
-#define MP0_C2PMSG_15__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_16
-#define MP0_C2PMSG_16__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_17
-#define MP0_C2PMSG_17__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_18
-#define MP0_C2PMSG_18__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_19
-#define MP0_C2PMSG_19__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_20
-#define MP0_C2PMSG_20__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_21
-#define MP0_C2PMSG_21__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_22
-#define MP0_C2PMSG_22__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_23
-#define MP0_C2PMSG_23__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_24
-#define MP0_C2PMSG_24__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_25
-#define MP0_C2PMSG_25__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_26
-#define MP0_C2PMSG_26__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_27
-#define MP0_C2PMSG_27__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_28
-#define MP0_C2PMSG_28__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_29
-#define MP0_C2PMSG_29__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_30
-#define MP0_C2PMSG_30__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_31
-#define MP0_C2PMSG_31__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2CMSG_0
-#define MP0_P2CMSG_0__CONTENT__SHIFT 0x0
-#define MP0_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2CMSG_1
-#define MP0_P2CMSG_1__CONTENT__SHIFT 0x0
-#define MP0_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2CMSG_2
-#define MP0_P2CMSG_2__CONTENT__SHIFT 0x0
-#define MP0_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2CMSG_3
-#define MP0_P2CMSG_3__CONTENT__SHIFT 0x0
-#define MP0_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2CMSG_INTEN
-#define MP0_P2CMSG_INTEN__INTEN__SHIFT 0x0
-#define MP0_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
-//MP0_P2CMSG_INTSTS
-#define MP0_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
-#define MP0_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
-#define MP0_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
-#define MP0_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
-#define MP0_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
-#define MP0_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
-#define MP0_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
-#define MP0_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
-//MP0_C2PMSG_ATTR_0
-#define MP0_C2PMSG_ATTR_0__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_0__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_1
-#define MP0_C2PMSG_ATTR_1__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_1__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_2
-#define MP0_C2PMSG_ATTR_2__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_2__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_3
-#define MP0_C2PMSG_ATTR_3__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_3__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_4
-#define MP0_C2PMSG_ATTR_4__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_4__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_5
-#define MP0_C2PMSG_ATTR_5__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_5__MSG_ATTR_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_ATTR_6
-#define MP0_C2PMSG_ATTR_6__MSG_ATTR__SHIFT 0x0
-#define MP0_C2PMSG_ATTR_6__MSG_ATTR_MASK 0x0000FFFFL
-//MP0_P2CMSG_ATTR
-#define MP0_P2CMSG_ATTR__MSG_ATTR__SHIFT 0x0
-#define MP0_P2CMSG_ATTR__MSG_ATTR_MASK 0x000000FFL
-//MP0_P2SMSG_0
-#define MP0_P2SMSG_0__CONTENT__SHIFT 0x0
-#define MP0_P2SMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2SMSG_1
-#define MP0_P2SMSG_1__CONTENT__SHIFT 0x0
-#define MP0_P2SMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2SMSG_2
-#define MP0_P2SMSG_2__CONTENT__SHIFT 0x0
-#define MP0_P2SMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2SMSG_3
-#define MP0_P2SMSG_3__CONTENT__SHIFT 0x0
-#define MP0_P2SMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP0_P2SMSG_ATTR
-#define MP0_P2SMSG_ATTR__MSG_ATTR__SHIFT 0x0
-#define MP0_P2SMSG_ATTR__MSG_ATTR_MASK 0x000000FFL
-//MP0_S2PMSG_ATTR
-#define MP0_S2PMSG_ATTR__MSG_ATTR__SHIFT 0x0
-#define MP0_S2PMSG_ATTR__MSG_ATTR_MASK 0x00000003L
-//MP0_P2SMSG_INTSTS
-#define MP0_P2SMSG_INTSTS__INTSTS0__SHIFT 0x0
-#define MP0_P2SMSG_INTSTS__INTSTS1__SHIFT 0x1
-#define MP0_P2SMSG_INTSTS__INTSTS2__SHIFT 0x2
-#define MP0_P2SMSG_INTSTS__INTSTS3__SHIFT 0x3
-#define MP0_P2SMSG_INTSTS__INTSTS0_MASK 0x00000001L
-#define MP0_P2SMSG_INTSTS__INTSTS1_MASK 0x00000002L
-#define MP0_P2SMSG_INTSTS__INTSTS2_MASK 0x00000004L
-#define MP0_P2SMSG_INTSTS__INTSTS3_MASK 0x00000008L
-//MP0_S2PMSG_0
-#define MP0_S2PMSG_0__CONTENT__SHIFT 0x0
-#define MP0_S2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_32
-#define MP0_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_33
-#define MP0_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_34
-#define MP0_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_35
-#define MP0_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_36
-#define MP0_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_37
-#define MP0_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_38
-#define MP0_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_39
-#define MP0_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_40
-#define MP0_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_41
-#define MP0_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_42
-#define MP0_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_43
-#define MP0_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_44
-#define MP0_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_45
-#define MP0_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_46
-#define MP0_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_47
-#define MP0_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_48
-#define MP0_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_49
-#define MP0_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_50
-#define MP0_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_51
-#define MP0_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_52
-#define MP0_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_53
-#define MP0_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_54
-#define MP0_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_55
-#define MP0_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_56
-#define MP0_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_57
-#define MP0_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_58
-#define MP0_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_59
-#define MP0_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_60
-#define MP0_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_61
-#define MP0_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_62
-#define MP0_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_63
-#define MP0_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_64
-#define MP0_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_65
-#define MP0_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_66
-#define MP0_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_67
-#define MP0_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_68
-#define MP0_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_69
-#define MP0_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_70
-#define MP0_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_71
-#define MP0_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_72
-#define MP0_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_73
-#define MP0_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_74
-#define MP0_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_75
-#define MP0_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_76
-#define MP0_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_77
-#define MP0_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_78
-#define MP0_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_79
-#define MP0_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_80
-#define MP0_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_81
-#define MP0_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_82
-#define MP0_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_83
-#define MP0_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_84
-#define MP0_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_85
-#define MP0_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_86
-#define MP0_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_87
-#define MP0_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_88
-#define MP0_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_89
-#define MP0_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_90
-#define MP0_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_91
-#define MP0_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_92
-#define MP0_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_93
-#define MP0_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_94
-#define MP0_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_95
-#define MP0_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_96
-#define MP0_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_97
-#define MP0_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_98
-#define MP0_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_99
-#define MP0_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_100
-#define MP0_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_101
-#define MP0_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_102
-#define MP0_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP0_C2PMSG_103
-#define MP0_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP0_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP0_ACTIVE_FCN_ID
-#define MP0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define MP0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define MP0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define MP0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//MP0_IH_CREDIT
-#define MP0_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP0_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP0_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP0_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP0_IH_SW_INT
-#define MP0_IH_SW_INT__ID__SHIFT 0x0
-#define MP0_IH_SW_INT__VALID__SHIFT 0x8
-#define MP0_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP0_IH_SW_INT__VALID_MASK 0x00000100L
-//MP0_IH_SW_INT_CTRL
-#define MP0_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP0_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP0_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP0_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-
-
-//CGTT_DRM_CLK_CTRL0
-#define CGTT_DRM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
-#define CGTT_DRM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
-#define CGTT_DRM_CLK_CTRL0__DIV_ID__SHIFT 0xc
-#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_0__SHIFT 0x15
-#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_REG__SHIFT 0x16
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
-#define CGTT_DRM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
-#define CGTT_DRM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
-#define CGTT_DRM_CLK_CTRL0__DIV_ID_MASK 0x00007000L
-#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_0_MASK 0x00200000L
-#define CGTT_DRM_CLK_CTRL0__RAMP_DIS_CLK_REG_MASK 0x00400000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
-#define CGTT_DRM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
-//DRM_LIGHT_SLEEP_CTRL
-#define DRM_LIGHT_SLEEP_CTRL__MEM_LIGHT_SLEEP_EN__SHIFT 0x0
-#define DRM_LIGHT_SLEEP_CTRL__MEM_LIGHT_SLEEP_EN_MASK 0x00000001L
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-//MP1_SMN_PUB_CTRL
-#define MP1_SMN_PUB_CTRL__RESET__SHIFT 0x0
-#define MP1_SMN_PUB_CTRL__RESET_MASK 0x00000001L
-//MP1_FIRMWARE_FLAGS
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
-#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
-#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
-//MP1_PUB_SCRATCH0
-#define MP1_PUB_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_PUB_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_PUB_SCRATCH1
-#define MP1_PUB_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_PUB_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_PUB_SCRATCH2
-#define MP1_PUB_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_PUB_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_PUB_SCRATCH3
-#define MP1_PUB_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_PUB_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_0
-#define MP1_C2PMSG_0__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_1
-#define MP1_C2PMSG_1__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_2
-#define MP1_C2PMSG_2__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_3
-#define MP1_C2PMSG_3__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_4
-#define MP1_C2PMSG_4__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_4__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_5
-#define MP1_C2PMSG_5__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_5__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_6
-#define MP1_C2PMSG_6__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_6__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_7
-#define MP1_C2PMSG_7__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_7__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_8
-#define MP1_C2PMSG_8__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_8__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_9
-#define MP1_C2PMSG_9__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_9__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_10
-#define MP1_C2PMSG_10__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_10__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_11
-#define MP1_C2PMSG_11__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_11__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_12
-#define MP1_C2PMSG_12__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_12__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_13
-#define MP1_C2PMSG_13__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_13__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_14
-#define MP1_C2PMSG_14__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_14__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_15
-#define MP1_C2PMSG_15__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_15__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_16
-#define MP1_C2PMSG_16__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_16__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_17
-#define MP1_C2PMSG_17__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_17__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_18
-#define MP1_C2PMSG_18__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_18__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_19
-#define MP1_C2PMSG_19__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_19__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_20
-#define MP1_C2PMSG_20__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_20__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_21
-#define MP1_C2PMSG_21__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_21__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_22
-#define MP1_C2PMSG_22__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_22__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_23
-#define MP1_C2PMSG_23__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_23__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_24
-#define MP1_C2PMSG_24__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_24__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_25
-#define MP1_C2PMSG_25__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_25__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_26
-#define MP1_C2PMSG_26__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_26__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_27
-#define MP1_C2PMSG_27__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_27__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_28
-#define MP1_C2PMSG_28__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_28__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_29
-#define MP1_C2PMSG_29__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_29__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_30
-#define MP1_C2PMSG_30__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_30__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_31
-#define MP1_C2PMSG_31__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_31__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2CMSG_0
-#define MP1_P2CMSG_0__CONTENT__SHIFT 0x0
-#define MP1_P2CMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2CMSG_1
-#define MP1_P2CMSG_1__CONTENT__SHIFT 0x0
-#define MP1_P2CMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2CMSG_2
-#define MP1_P2CMSG_2__CONTENT__SHIFT 0x0
-#define MP1_P2CMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2CMSG_3
-#define MP1_P2CMSG_3__CONTENT__SHIFT 0x0
-#define MP1_P2CMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2CMSG_INTEN
-#define MP1_P2CMSG_INTEN__INTEN__SHIFT 0x0
-#define MP1_P2CMSG_INTEN__INTEN_MASK 0x0000000FL
-//MP1_P2CMSG_INTSTS
-#define MP1_P2CMSG_INTSTS__INTSTS0__SHIFT 0x0
-#define MP1_P2CMSG_INTSTS__INTSTS1__SHIFT 0x1
-#define MP1_P2CMSG_INTSTS__INTSTS2__SHIFT 0x2
-#define MP1_P2CMSG_INTSTS__INTSTS3__SHIFT 0x3
-#define MP1_P2CMSG_INTSTS__INTSTS0_MASK 0x00000001L
-#define MP1_P2CMSG_INTSTS__INTSTS1_MASK 0x00000002L
-#define MP1_P2CMSG_INTSTS__INTSTS2_MASK 0x00000004L
-#define MP1_P2CMSG_INTSTS__INTSTS3_MASK 0x00000008L
-//MP1_P2SMSG_0
-#define MP1_P2SMSG_0__CONTENT__SHIFT 0x0
-#define MP1_P2SMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2SMSG_1
-#define MP1_P2SMSG_1__CONTENT__SHIFT 0x0
-#define MP1_P2SMSG_1__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2SMSG_2
-#define MP1_P2SMSG_2__CONTENT__SHIFT 0x0
-#define MP1_P2SMSG_2__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2SMSG_3
-#define MP1_P2SMSG_3__CONTENT__SHIFT 0x0
-#define MP1_P2SMSG_3__CONTENT_MASK 0xFFFFFFFFL
-//MP1_P2SMSG_INTSTS
-#define MP1_P2SMSG_INTSTS__INTSTS0__SHIFT 0x0
-#define MP1_P2SMSG_INTSTS__INTSTS1__SHIFT 0x1
-#define MP1_P2SMSG_INTSTS__INTSTS2__SHIFT 0x2
-#define MP1_P2SMSG_INTSTS__INTSTS3__SHIFT 0x3
-#define MP1_P2SMSG_INTSTS__INTSTS0_MASK 0x00000001L
-#define MP1_P2SMSG_INTSTS__INTSTS1_MASK 0x00000002L
-#define MP1_P2SMSG_INTSTS__INTSTS2_MASK 0x00000004L
-#define MP1_P2SMSG_INTSTS__INTSTS3_MASK 0x00000008L
-//MP1_S2PMSG_0
-#define MP1_S2PMSG_0__CONTENT__SHIFT 0x0
-#define MP1_S2PMSG_0__CONTENT_MASK 0xFFFFFFFFL
-//MP1_ACP2MP_RESP
-#define MP1_ACP2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_ACP2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_DC2MP_RESP
-#define MP1_DC2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_DC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_UVD2MP_RESP
-#define MP1_UVD2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_UVD2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_VCE2MP_RESP
-#define MP1_VCE2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_VCE2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_RLC2MP_RESP
-#define MP1_RLC2MP_RESP__CONTENT__SHIFT 0x0
-#define MP1_RLC2MP_RESP__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_32
-#define MP1_C2PMSG_32__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_33
-#define MP1_C2PMSG_33__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_34
-#define MP1_C2PMSG_34__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_35
-#define MP1_C2PMSG_35__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_36
-#define MP1_C2PMSG_36__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_37
-#define MP1_C2PMSG_37__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_38
-#define MP1_C2PMSG_38__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_39
-#define MP1_C2PMSG_39__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_40
-#define MP1_C2PMSG_40__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_41
-#define MP1_C2PMSG_41__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_42
-#define MP1_C2PMSG_42__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_43
-#define MP1_C2PMSG_43__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_44
-#define MP1_C2PMSG_44__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_45
-#define MP1_C2PMSG_45__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_46
-#define MP1_C2PMSG_46__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_47
-#define MP1_C2PMSG_47__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_48
-#define MP1_C2PMSG_48__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_49
-#define MP1_C2PMSG_49__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_50
-#define MP1_C2PMSG_50__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_51
-#define MP1_C2PMSG_51__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_52
-#define MP1_C2PMSG_52__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_53
-#define MP1_C2PMSG_53__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_54
-#define MP1_C2PMSG_54__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_55
-#define MP1_C2PMSG_55__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_56
-#define MP1_C2PMSG_56__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_57
-#define MP1_C2PMSG_57__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_58
-#define MP1_C2PMSG_58__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_59
-#define MP1_C2PMSG_59__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_60
-#define MP1_C2PMSG_60__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_61
-#define MP1_C2PMSG_61__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_62
-#define MP1_C2PMSG_62__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_63
-#define MP1_C2PMSG_63__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_64
-#define MP1_C2PMSG_64__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_65
-#define MP1_C2PMSG_65__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_66
-#define MP1_C2PMSG_66__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_67
-#define MP1_C2PMSG_67__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_68
-#define MP1_C2PMSG_68__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_69
-#define MP1_C2PMSG_69__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_70
-#define MP1_C2PMSG_70__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_71
-#define MP1_C2PMSG_71__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_72
-#define MP1_C2PMSG_72__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_73
-#define MP1_C2PMSG_73__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_74
-#define MP1_C2PMSG_74__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_75
-#define MP1_C2PMSG_75__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_76
-#define MP1_C2PMSG_76__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_77
-#define MP1_C2PMSG_77__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_78
-#define MP1_C2PMSG_78__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_79
-#define MP1_C2PMSG_79__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_80
-#define MP1_C2PMSG_80__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_81
-#define MP1_C2PMSG_81__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_82
-#define MP1_C2PMSG_82__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_83
-#define MP1_C2PMSG_83__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_84
-#define MP1_C2PMSG_84__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_85
-#define MP1_C2PMSG_85__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_86
-#define MP1_C2PMSG_86__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_87
-#define MP1_C2PMSG_87__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_88
-#define MP1_C2PMSG_88__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_89
-#define MP1_C2PMSG_89__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_90
-#define MP1_C2PMSG_90__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_91
-#define MP1_C2PMSG_91__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_92
-#define MP1_C2PMSG_92__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_93
-#define MP1_C2PMSG_93__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_94
-#define MP1_C2PMSG_94__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_95
-#define MP1_C2PMSG_95__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_96
-#define MP1_C2PMSG_96__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_97
-#define MP1_C2PMSG_97__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_98
-#define MP1_C2PMSG_98__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_99
-#define MP1_C2PMSG_99__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_100
-#define MP1_C2PMSG_100__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_101
-#define MP1_C2PMSG_101__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_102
-#define MP1_C2PMSG_102__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
-//MP1_C2PMSG_103
-#define MP1_C2PMSG_103__CONTENT__SHIFT 0x0
-#define MP1_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
-//MP1_ACTIVE_FCN_ID
-#define MP1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define MP1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define MP1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define MP1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//MP1_IH_CREDIT
-#define MP1_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
-#define MP1_IH_CREDIT__CLIENT_ID__SHIFT 0x10
-#define MP1_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
-#define MP1_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
-//MP1_IH_SW_INT
-#define MP1_IH_SW_INT__ID__SHIFT 0x0
-#define MP1_IH_SW_INT__VALID__SHIFT 0x8
-#define MP1_IH_SW_INT__ID_MASK 0x000000FFL
-#define MP1_IH_SW_INT__VALID_MASK 0x00000100L
-//MP1_IH_SW_INT_CTRL
-#define MP1_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
-#define MP1_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
-#define MP1_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
-#define MP1_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
-//MP1_FPS_CNT
-#define MP1_FPS_CNT__COUNT__SHIFT 0x0
-#define MP1_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
-//MP1_PUB_CTRL
-#define MP1_PUB_CTRL__RESET__SHIFT 0x0
-#define MP1_PUB_CTRL__RESET_MASK 0x00000001L
-//MP1_EXT_SCRATCH0
-#define MP1_EXT_SCRATCH0__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH1
-#define MP1_EXT_SCRATCH1__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH2
-#define MP1_EXT_SCRATCH2__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH3
-#define MP1_EXT_SCRATCH3__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH4
-#define MP1_EXT_SCRATCH4__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH5
-#define MP1_EXT_SCRATCH5__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH6
-#define MP1_EXT_SCRATCH6__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
-//MP1_EXT_SCRATCH7
-#define MP1_EXT_SCRATCH7__DATA__SHIFT 0x0
-#define MP1_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h
deleted file mode 100644
index daa7eae..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h
+++ /dev/null
@@ -1,1271 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _nbif_6_1_DEFAULT_HEADER
-#define _nbif_6_1_DEFAULT_HEADER
-
-
-// addressBlock: bif_cfg_dev0_epf0_bifcfgdecp
-// base address: 0x0
-#define cfgVENDOR_ID_DEFAULT 0x00000000
-#define cfgDEVICE_ID_DEFAULT 0x00000000
-#define cfgCOMMAND_DEFAULT 0x00000000
-#define cfgSTATUS_DEFAULT 0x00000000
-#define cfgREVISION_ID_DEFAULT 0x00000000
-#define cfgPROG_INTERFACE_DEFAULT 0x00000000
-#define cfgSUB_CLASS_DEFAULT 0x00000000
-#define cfgBASE_CLASS_DEFAULT 0x00000000
-#define cfgCACHE_LINE_DEFAULT 0x00000000
-#define cfgLATENCY_DEFAULT 0x00000000
-#define cfgHEADER_DEFAULT 0x00000000
-#define cfgBIST_DEFAULT 0x00000000
-#define cfgBASE_ADDR_1_DEFAULT 0x00000000
-#define cfgBASE_ADDR_2_DEFAULT 0x00000000
-#define cfgBASE_ADDR_3_DEFAULT 0x00000000
-#define cfgBASE_ADDR_4_DEFAULT 0x00000000
-#define cfgBASE_ADDR_5_DEFAULT 0x00000000
-#define cfgBASE_ADDR_6_DEFAULT 0x00000000
-#define cfgADAPTER_ID_DEFAULT 0x00000000
-#define cfgROM_BASE_ADDR_DEFAULT 0x00000000
-#define cfgCAP_PTR_DEFAULT 0x00000000
-#define cfgINTERRUPT_LINE_DEFAULT 0x000000ff
-#define cfgINTERRUPT_PIN_DEFAULT 0x00000000
-#define cfgMIN_GRANT_DEFAULT 0x00000000
-#define cfgMAX_LATENCY_DEFAULT 0x00000000
-#define cfgVENDOR_CAP_LIST_DEFAULT 0x00000000
-#define cfgADAPTER_ID_W_DEFAULT 0x00000000
-#define cfgPMI_CAP_LIST_DEFAULT 0x00000000
-#define cfgPMI_CAP_DEFAULT 0x00000000
-#define cfgPMI_STATUS_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_CAP_LIST_DEFAULT 0x0000a000
-#define cfgPCIE_CAP_DEFAULT 0x00000002
-#define cfgDEVICE_CAP_DEFAULT 0x10000000
-#define cfgDEVICE_CNTL_DEFAULT 0x00002810
-#define cfgDEVICE_STATUS_DEFAULT 0x00000000
-#define cfgLINK_CAP_DEFAULT 0x00011c03
-#define cfgLINK_CNTL_DEFAULT 0x00000000
-#define cfgLINK_STATUS_DEFAULT 0x00000001
-#define cfgDEVICE_CAP2_DEFAULT 0x00000000
-#define cfgDEVICE_CNTL2_DEFAULT 0x00000000
-#define cfgDEVICE_STATUS2_DEFAULT 0x00000000
-#define cfgLINK_CAP2_DEFAULT 0x0000000e
-#define cfgLINK_CNTL2_DEFAULT 0x00000003
-#define cfgLINK_STATUS2_DEFAULT 0x00000000
-#define cfgSLOT_CAP2_DEFAULT 0x00000000
-#define cfgSLOT_CNTL2_DEFAULT 0x00000000
-#define cfgSLOT_STATUS2_DEFAULT 0x00000000
-#define cfgMSI_CAP_LIST_DEFAULT 0x0000c000
-#define cfgMSI_MSG_CNTL_DEFAULT 0x00000080
-#define cfgMSI_MSG_ADDR_LO_DEFAULT 0x00000000
-#define cfgMSI_MSG_ADDR_HI_DEFAULT 0x00000000
-#define cfgMSI_MSG_DATA_DEFAULT 0x00000000
-#define cfgMSI_MSG_DATA_64_DEFAULT 0x00000000
-#define cfgMSI_MASK_DEFAULT 0x00000000
-#define cfgMSI_PENDING_DEFAULT 0x00000000
-#define cfgMSI_MASK_64_DEFAULT 0x00000000
-#define cfgMSI_PENDING_64_DEFAULT 0x00000000
-#define cfgMSIX_CAP_LIST_DEFAULT 0x00000000
-#define cfgMSIX_MSG_CNTL_DEFAULT 0x00000000
-#define cfgMSIX_TABLE_DEFAULT 0x00000000
-#define cfgMSIX_PBA_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_DEFAULT 0x11000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC1_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC2_DEFAULT 0x00000000
-#define cfgPCIE_VC_ENH_CAP_LIST_DEFAULT 0x14000000
-#define cfgPCIE_PORT_VC_CAP_REG1_DEFAULT 0x00000000
-#define cfgPCIE_PORT_VC_CAP_REG2_DEFAULT 0x00000000
-#define cfgPCIE_PORT_VC_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_PORT_VC_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_VC0_RESOURCE_CAP_DEFAULT 0x00000000
-#define cfgPCIE_VC0_RESOURCE_CNTL_DEFAULT 0x000000fe
-#define cfgPCIE_VC0_RESOURCE_STATUS_DEFAULT 0x00000002
-#define cfgPCIE_VC1_RESOURCE_CAP_DEFAULT 0x00000000
-#define cfgPCIE_VC1_RESOURCE_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_VC1_RESOURCE_STATUS_DEFAULT 0x00000002
-#define cfgPCIE_DEV_SERIAL_NUM_ENH_CAP_LIST_DEFAULT 0x15000000
-#define cfgPCIE_DEV_SERIAL_NUM_DW1_DEFAULT 0x00000000
-#define cfgPCIE_DEV_SERIAL_NUM_DW2_DEFAULT 0x00000000
-#define cfgPCIE_ADV_ERR_RPT_ENH_CAP_LIST_DEFAULT 0x20020000
-#define cfgPCIE_UNCORR_ERR_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_UNCORR_ERR_MASK_DEFAULT 0x00000000
-#define cfgPCIE_UNCORR_ERR_SEVERITY_DEFAULT 0x00440010
-#define cfgPCIE_CORR_ERR_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_CORR_ERR_MASK_DEFAULT 0x00002000
-#define cfgPCIE_ADV_ERR_CAP_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_HDR_LOG0_DEFAULT 0x00000000
-#define cfgPCIE_HDR_LOG1_DEFAULT 0x00000000
-#define cfgPCIE_HDR_LOG2_DEFAULT 0x00000000
-#define cfgPCIE_HDR_LOG3_DEFAULT 0x00000000
-#define cfgPCIE_ROOT_ERR_CMD_DEFAULT 0x00000000
-#define cfgPCIE_ROOT_ERR_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_ERR_SRC_ID_DEFAULT 0x00000000
-#define cfgPCIE_TLP_PREFIX_LOG0_DEFAULT 0x00000000
-#define cfgPCIE_TLP_PREFIX_LOG1_DEFAULT 0x00000000
-#define cfgPCIE_TLP_PREFIX_LOG2_DEFAULT 0x00000000
-#define cfgPCIE_TLP_PREFIX_LOG3_DEFAULT 0x00000000
-#define cfgPCIE_BAR_ENH_CAP_LIST_DEFAULT 0x24000000
-#define cfgPCIE_BAR1_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR1_CNTL_DEFAULT 0x00000020
-#define cfgPCIE_BAR2_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR2_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_BAR3_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR3_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_BAR4_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR4_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_BAR5_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR5_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_BAR6_CAP_DEFAULT 0x00000000
-#define cfgPCIE_BAR6_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_PWR_BUDGET_ENH_CAP_LIST_DEFAULT 0x25000000
-#define cfgPCIE_PWR_BUDGET_DATA_SELECT_DEFAULT 0x00000000
-#define cfgPCIE_PWR_BUDGET_DATA_DEFAULT 0x00000000
-#define cfgPCIE_PWR_BUDGET_CAP_DEFAULT 0x00000000
-#define cfgPCIE_DPA_ENH_CAP_LIST_DEFAULT 0x27000000
-#define cfgPCIE_DPA_CAP_DEFAULT 0x00000000
-#define cfgPCIE_DPA_LATENCY_INDICATOR_DEFAULT 0x00000000
-#define cfgPCIE_DPA_STATUS_DEFAULT 0x00000100
-#define cfgPCIE_DPA_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_0_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_1_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_2_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_3_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_4_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_5_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_6_DEFAULT 0x00000000
-#define cfgPCIE_DPA_SUBSTATE_PWR_ALLOC_7_DEFAULT 0x00000000
-#define cfgPCIE_SECONDARY_ENH_CAP_LIST_DEFAULT 0x2a010019
-#define cfgPCIE_LINK_CNTL3_DEFAULT 0x00000000
-#define cfgPCIE_LANE_ERROR_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_LANE_0_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_1_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_2_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_3_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_4_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_5_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_6_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_7_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_8_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_9_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_10_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_11_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_12_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_13_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_14_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_LANE_15_EQUALIZATION_CNTL_DEFAULT 0x00007f00
-#define cfgPCIE_ACS_ENH_CAP_LIST_DEFAULT 0x2b000000
-#define cfgPCIE_ACS_CAP_DEFAULT 0x00000000
-#define cfgPCIE_ACS_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_ATS_ENH_CAP_LIST_DEFAULT 0x2c000000
-#define cfgPCIE_ATS_CAP_DEFAULT 0x00000000
-#define cfgPCIE_ATS_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_PAGE_REQ_ENH_CAP_LIST_DEFAULT 0x2d000000
-#define cfgPCIE_PAGE_REQ_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_PAGE_REQ_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_OUTSTAND_PAGE_REQ_CAPACITY_DEFAULT 0x00000000
-#define cfgPCIE_OUTSTAND_PAGE_REQ_ALLOC_DEFAULT 0x00000000
-#define cfgPCIE_PASID_ENH_CAP_LIST_DEFAULT 0x2e000000
-#define cfgPCIE_PASID_CAP_DEFAULT 0x00000000
-#define cfgPCIE_PASID_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_TPH_REQR_ENH_CAP_LIST_DEFAULT 0x2f000000
-#define cfgPCIE_TPH_REQR_CAP_DEFAULT 0x00000000
-#define cfgPCIE_TPH_REQR_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_MC_ENH_CAP_LIST_DEFAULT 0x32000000
-#define cfgPCIE_MC_CAP_DEFAULT 0x00000000
-#define cfgPCIE_MC_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_MC_ADDR0_DEFAULT 0x00000000
-#define cfgPCIE_MC_ADDR1_DEFAULT 0x00000000
-#define cfgPCIE_MC_RCV0_DEFAULT 0x00000000
-#define cfgPCIE_MC_RCV1_DEFAULT 0x00000000
-#define cfgPCIE_MC_BLOCK_ALL0_DEFAULT 0x00000000
-#define cfgPCIE_MC_BLOCK_ALL1_DEFAULT 0x00000000
-#define cfgPCIE_MC_BLOCK_UNTRANSLATED_0_DEFAULT 0x00000000
-#define cfgPCIE_MC_BLOCK_UNTRANSLATED_1_DEFAULT 0x00000000
-#define cfgPCIE_LTR_ENH_CAP_LIST_DEFAULT 0x32800000
-#define cfgPCIE_LTR_CAP_DEFAULT 0x00000000
-#define cfgPCIE_ARI_ENH_CAP_LIST_DEFAULT 0x33000000
-#define cfgPCIE_ARI_CAP_DEFAULT 0x00000000
-#define cfgPCIE_ARI_CNTL_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_ENH_CAP_LIST_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_CAP_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_CONTROL_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_INITIAL_VFS_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_TOTAL_VFS_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_NUM_VFS_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_FUNC_DEP_LINK_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_FIRST_VF_OFFSET_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_STRIDE_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_DEVICE_ID_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_SUPPORTED_PAGE_SIZE_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_SYSTEM_PAGE_SIZE_DEFAULT 0x00000001
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_0_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_1_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_2_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_3_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_4_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_BASE_ADDR_5_DEFAULT 0x00000000
-#define cfgPCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6_DEFAULT 0x00000000
-#define cfgPCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7_DEFAULT 0x00000000
-
-
-// addressBlock: bif_cfg_dev0_swds_bifcfgdecp
-// base address: 0x0
-#define mmSUB_BUS_NUMBER_LATENCY_DEFAULT 0x00000000
-#define mmIO_BASE_LIMIT_DEFAULT 0x00000000
-#define mmSECONDARY_STATUS_DEFAULT 0x00000000
-#define mmMEM_BASE_LIMIT_DEFAULT 0x00000000
-#define mmPREF_BASE_LIMIT_DEFAULT 0x00000000
-#define mmPREF_BASE_UPPER_DEFAULT 0x00000000
-#define mmPREF_LIMIT_UPPER_DEFAULT 0x00000000
-#define mmIO_BASE_LIMIT_HI_DEFAULT 0x00000000
-#define mmIRQ_BRIDGE_CNTL_DEFAULT 0x00000000
-#define mmSLOT_CAP_DEFAULT 0x00000000
-#define mmSLOT_CNTL_DEFAULT 0x00000000
-#define mmSLOT_STATUS_DEFAULT 0x00000000
-#define mmSSID_CAP_LIST_DEFAULT 0x00000000
-#define mmSSID_CAP_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_shadow_reg_shadowdec
-// base address: 0x0
-#define ixSHADOW_COMMAND_DEFAULT 0x00000000
-#define ixSHADOW_BASE_ADDR_1_DEFAULT 0x00000000
-#define ixSHADOW_BASE_ADDR_2_DEFAULT 0x00000000
-#define ixSHADOW_SUB_BUS_NUMBER_LATENCY_DEFAULT 0x00000000
-#define ixSHADOW_IO_BASE_LIMIT_DEFAULT 0x00000000
-#define ixSHADOW_MEM_BASE_LIMIT_DEFAULT 0x00000000
-#define ixSHADOW_PREF_BASE_LIMIT_DEFAULT 0x00000000
-#define ixSHADOW_PREF_BASE_UPPER_DEFAULT 0x00000000
-#define ixSHADOW_PREF_LIMIT_UPPER_DEFAULT 0x00000000
-#define ixSHADOW_IO_BASE_LIMIT_HI_DEFAULT 0x00000000
-#define ixSHADOW_IRQ_BRIDGE_CNTL_DEFAULT 0x00000000
-#define ixSUC_INDEX_DEFAULT 0x00000000
-#define ixSUC_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: bif_bx_pf_SUMDEC
-// base address: 0x0
-#define ixSUM_INDEX_DEFAULT 0x00000000
-#define ixSUM_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: gdc_GDCDEC
-// base address: 0x1400000
-#define mmA2S_CNTL_CL0_DEFAULT 0x00280540
-#define mmA2S_CNTL_CL1_DEFAULT 0x00282540
-#define mmA2S_CNTL_CL2_DEFAULT 0x002825a0
-#define mmA2S_CNTL_CL3_DEFAULT 0x00282550
-#define mmA2S_CNTL_CL4_DEFAULT 0x00282550
-#define mmA2S_CNTL_SW0_DEFAULT 0x08080005
-#define mmA2S_CNTL_SW1_DEFAULT 0x08080205
-#define mmA2S_CNTL_SW2_DEFAULT 0x08080200
-#define mmNGDC_MGCG_CTRL_DEFAULT 0x00000080
-#define mmA2S_MISC_CNTL_DEFAULT 0x00000003
-#define mmNGDC_SDP_PORT_CTRL_DEFAULT 0x0000000f
-#define mmNGDC_RESERVED_0_DEFAULT 0x00000000
-#define mmNGDC_RESERVED_1_DEFAULT 0x00000000
-#define mmBIF_SDMA0_DOORBELL_RANGE_DEFAULT 0x00000000
-#define mmBIF_SDMA1_DOORBELL_RANGE_DEFAULT 0x00000000
-#define mmBIF_IH_DOORBELL_RANGE_DEFAULT 0x00000000
-#define mmBIF_MMSCH0_DOORBELL_RANGE_DEFAULT 0x00000000
-#define mmBIF_DOORBELL_FENCE_CNTL_DEFAULT 0x00000000
-#define mmS2A_MISC_CNTL_DEFAULT 0x00000000
-#define mmA2S_CNTL2_SEC_CL0_DEFAULT 0x00000006
-#define mmA2S_CNTL2_SEC_CL1_DEFAULT 0x00000006
-#define mmA2S_CNTL2_SEC_CL2_DEFAULT 0x00000006
-#define mmA2S_CNTL2_SEC_CL3_DEFAULT 0x00000006
-#define mmA2S_CNTL2_SEC_CL4_DEFAULT 0x00000006
-
-
-// addressBlock: nbif_sion_SIONDEC
-// base address: 0x1400000
-#define ixSION_CL0_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL0_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL0_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL1_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL1_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL2_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL2_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL3_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL3_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL4_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL4_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_RdRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_RdRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_RdRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_RdRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_WrRsp_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_WrRsp_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_WrRsp_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_WrRsp_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_Req_BurstTarget_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_Req_BurstTarget_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_Req_TimeSlot_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_Req_TimeSlot_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_ReqPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_ReqPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_DataPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_DataPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_RdRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_RdRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CL5_WrRspPoolCredit_Alloc_REG0_DEFAULT 0x00000000
-#define ixSION_CL5_WrRspPoolCredit_Alloc_REG1_DEFAULT 0x00000000
-#define ixSION_CNTL_REG0_DEFAULT 0x00000000
-#define ixSION_CNTL_REG1_DEFAULT 0x00000000
-
-
-// addressBlock: syshub_mmreg_direct_syshubdirect
-// base address: 0x1400000
-#define ixSYSHUB_DS_CTRL_SOCCLK_DEFAULT 0x00000000
-#define ixSYSHUB_DS_CTRL2_SOCCLK_DEFAULT 0x00000100
-#define ixSYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK_DEFAULT 0x00000000
-#define ixSYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK_DEFAULT 0x00000000
-#define ixDMA_CLK0_SW0_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixDMA_CLK0_SW1_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixDMA_CLK0_SW0_CL0_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW0_CL1_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW0_CL2_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW0_CL3_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW0_CL4_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW0_CL5_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW1_CL0_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK0_SW2_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUB_CG_CNTL_DEFAULT 0x00082000
-#define ixSYSHUB_TRANS_IDLE_DEFAULT 0x00000000
-#define ixSYSHUB_HP_TIMER_DEFAULT 0x00000100
-#define ixSYSHUB_SCRATCH_DEFAULT 0x00000040
-#define ixSYSHUB_DS_CTRL_SHUBCLK_DEFAULT 0x00000000
-#define ixSYSHUB_DS_CTRL2_SHUBCLK_DEFAULT 0x00000100
-#define ixSYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SHUBCLK_DEFAULT 0x00000000
-#define ixSYSHUB_BGEN_ENHANCEMENT_IMM_EN_SHUBCLK_DEFAULT 0x00000000
-#define ixDMA_CLK1_SW0_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixDMA_CLK1_SW1_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixDMA_CLK1_SW0_CL0_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW0_CL1_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW0_CL2_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW0_CL3_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW0_CL4_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW1_CL0_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW1_CL1_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW1_CL2_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW1_CL3_CNTL_DEFAULT 0x20200000
-#define ixDMA_CLK1_SW1_CL4_CNTL_DEFAULT 0x20200000
-
-
-// addressBlock: gdc_ras_gdc_ras_regblk
-// base address: 0x1400000
-#define ixGDC_RAS_LEAF0_CTRL_DEFAULT 0x00000000
-#define ixGDC_RAS_LEAF1_CTRL_DEFAULT 0x00000000
-#define ixGDC_RAS_LEAF2_CTRL_DEFAULT 0x00000000
-#define ixGDC_RAS_LEAF3_CTRL_DEFAULT 0x00000000
-#define ixGDC_RAS_LEAF4_CTRL_DEFAULT 0x00000000
-#define ixGDC_RAS_LEAF5_CTRL_DEFAULT 0x00000000
-
-
-// addressBlock: gdc_rst_GDCRST_DEC
-// base address: 0x1400000
-#define ixSHUB_PF_FLR_RST_DEFAULT 0x00000000
-#define ixSHUB_GFX_DRV_MODE1_RST_DEFAULT 0x00000000
-#define ixSHUB_LINK_RESET_DEFAULT 0x00000000
-#define ixSHUB_PF0_VF_FLR_RST_DEFAULT 0x00000000
-#define ixSHUB_HARD_RST_CTRL_DEFAULT 0x0000001b
-#define ixSHUB_SOFT_RST_CTRL_DEFAULT 0x00000009
-#define ixSHUB_SDP_PORT_RST_DEFAULT 0x00000000
-
-
-// addressBlock: bif_bx_pf_SYSDEC
-// base address: 0x0
-#define mmSBIOS_SCRATCH_0_DEFAULT 0x00000000
-#define mmSBIOS_SCRATCH_1_DEFAULT 0x00000000
-#define mmSBIOS_SCRATCH_2_DEFAULT 0x00000000
-#define mmSBIOS_SCRATCH_3_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_0_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_1_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_2_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_3_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_4_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_5_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_6_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_7_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_8_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_9_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_10_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_11_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_12_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_13_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_14_DEFAULT 0x00000000
-#define mmBIOS_SCRATCH_15_DEFAULT 0x00000000
-#define mmBIF_RLC_INTR_CNTL_DEFAULT 0x00000000
-#define mmBIF_VCE_INTR_CNTL_DEFAULT 0x00000000
-#define mmBIF_UVD_INTR_CNTL_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR0_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR0_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR1_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR1_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR2_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR2_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR3_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR3_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR4_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR4_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR5_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR5_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR6_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR6_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ADDR7_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_REMAP_ADDR7_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_CNTL_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ZERO_CPL_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_ONE_CPL_DEFAULT 0x00000000
-#define mmGFX_MMIOREG_CAM_PROGRAMMABLE_CPL_DEFAULT 0x00000000
-
-
-// addressBlock: bif_bx_pf_SYSPFVFDEC
-// base address: 0x0
-#define mmMM_INDEX_DEFAULT 0x00000000
-#define mmMM_DATA_DEFAULT 0x00000000
-#define mmMM_INDEX_HI_DEFAULT 0x00000000
-#define mmSYSHUB_INDEX_OVLP_DEFAULT 0x00000000
-#define mmSYSHUB_DATA_OVLP_DEFAULT 0x00000000
-#define mmPCIE_INDEX_DEFAULT 0x00000000
-#define mmPCIE_DATA_DEFAULT 0x00000000
-#define mmPCIE_INDEX2_DEFAULT 0x00000000
-#define mmPCIE_DATA2_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_dwn_BIFDEC1
-// base address: 0x0
-#define mmDN_PCIE_RESERVED_DEFAULT 0x00000000
-#define mmDN_PCIE_SCRATCH_DEFAULT 0x00000000
-#define mmDN_PCIE_CNTL_DEFAULT 0x00000000
-#define mmDN_PCIE_CONFIG_CNTL_DEFAULT 0x00000000
-#define mmDN_PCIE_RX_CNTL2_DEFAULT 0x00000000
-#define mmDN_PCIE_BUS_CNTL_DEFAULT 0x00000080
-#define mmDN_PCIE_CFG_CNTL_DEFAULT 0x00000000
-#define mmDN_PCIE_STRAP_F0_DEFAULT 0x00000001
-#define mmDN_PCIE_STRAP_MISC_DEFAULT 0x00000000
-#define mmDN_PCIE_STRAP_MISC2_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_dwnp_BIFDEC1
-// base address: 0x0
-#define mmPCIEP_RESERVED_DEFAULT 0x00000000
-#define mmPCIEP_SCRATCH_DEFAULT 0x00000000
-#define mmPCIE_ERR_CNTL_DEFAULT 0x00000500
-#define mmPCIE_RX_CNTL_DEFAULT 0x00000000
-#define mmPCIE_LC_SPEED_CNTL_DEFAULT 0x00000000
-#define mmPCIE_LC_CNTL2_DEFAULT 0x00000000
-#define mmPCIEP_STRAP_MISC_DEFAULT 0x00000000
-#define mmLTR_MSG_INFO_FROM_EP_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_ep_BIFDEC1
-// base address: 0x0
-#define mmEP_PCIE_SCRATCH_DEFAULT 0x00000000
-#define mmEP_PCIE_CNTL_DEFAULT 0x00000100
-#define mmEP_PCIE_INT_CNTL_DEFAULT 0x00000000
-#define mmEP_PCIE_INT_STATUS_DEFAULT 0x00000000
-#define mmEP_PCIE_RX_CNTL2_DEFAULT 0x00000000
-#define mmEP_PCIE_BUS_CNTL_DEFAULT 0x00000080
-#define mmEP_PCIE_CFG_CNTL_DEFAULT 0x00000000
-#define mmEP_PCIE_OBFF_CNTL_DEFAULT 0x00012774
-#define mmEP_PCIE_TX_LTR_CNTL_DEFAULT 0x00003468
-#define mmEP_PCIE_STRAP_MISC_DEFAULT 0x00000000
-#define mmEP_PCIE_STRAP_MISC2_DEFAULT 0x00000000
-#define mmEP_PCIE_STRAP_PI_DEFAULT 0x00000000
-#define mmEP_PCIE_F0_DPA_CAP_DEFAULT 0x190a1000
-#define mmEP_PCIE_F0_DPA_LATENCY_INDICATOR_DEFAULT 0x000000f0
-#define mmEP_PCIE_F0_DPA_CNTL_DEFAULT 0x00000100
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_DEFAULT 0x000000fa
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_DEFAULT 0x000000c8
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_DEFAULT 0x00000096
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_DEFAULT 0x00000064
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_DEFAULT 0x0000004b
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_DEFAULT 0x00000032
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_DEFAULT 0x00000019
-#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_DEFAULT 0x0000000a
-#define mmEP_PCIE_PME_CONTROL_DEFAULT 0x00000000
-#define mmEP_PCIEP_RESERVED_DEFAULT 0x00000000
-#define mmEP_PCIE_TX_CNTL_DEFAULT 0x00000000
-#define mmEP_PCIE_TX_REQUESTER_ID_DEFAULT 0x00000000
-#define mmEP_PCIE_ERR_CNTL_DEFAULT 0x00000500
-#define mmEP_PCIE_RX_CNTL_DEFAULT 0x01000000
-#define mmEP_PCIE_LC_SPEED_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: bif_bx_pf_BIFDEC1
-// base address: 0x0
-#define mmBIF_MM_INDACCESS_CNTL_DEFAULT 0x00000000
-#define mmBUS_CNTL_DEFAULT 0x00000000
-#define mmBIF_SCRATCH0_DEFAULT 0x00000000
-#define mmBIF_SCRATCH1_DEFAULT 0x00000000
-#define mmBX_RESET_EN_DEFAULT 0x00010003
-#define mmMM_CFGREGS_CNTL_DEFAULT 0x00000000
-#define mmBX_RESET_CNTL_DEFAULT 0x00000000
-#define mmINTERRUPT_CNTL_DEFAULT 0x00000010
-#define mmINTERRUPT_CNTL2_DEFAULT 0x00000000
-#define mmCLKREQB_PAD_CNTL_DEFAULT 0x000008e0
-#define mmCLKREQB_PERF_COUNTER_DEFAULT 0x00000000
-#define mmBIF_CLK_CTRL_DEFAULT 0x00000000
-#define mmBIF_FEATURES_CONTROL_MISC_DEFAULT 0x00000000
-#define mmBIF_DOORBELL_CNTL_DEFAULT 0x00000000
-#define mmBIF_DOORBELL_INT_CNTL_DEFAULT 0x00000000
-#define mmBIF_SLVARB_MODE_DEFAULT 0x00000000
-#define mmBIF_FB_EN_DEFAULT 0x00000000
-#define mmBIF_BUSY_DELAY_CNTR_DEFAULT 0x0000003f
-#define mmBIF_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmBIF_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
-#define mmBIF_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
-#define mmBIF_MST_TRANS_PENDING_VF_DEFAULT 0x00000000
-#define mmBIF_SLV_TRANS_PENDING_VF_DEFAULT 0x00000000
-#define mmBACO_CNTL_DEFAULT 0x00000000
-#define mmBIF_BACO_EXIT_TIME0_DEFAULT 0x00000100
-#define mmBIF_BACO_EXIT_TIMER1_DEFAULT 0x00000100
-#define mmBIF_BACO_EXIT_TIMER2_DEFAULT 0x00000300
-#define mmBIF_BACO_EXIT_TIMER3_DEFAULT 0x00000400
-#define mmBIF_BACO_EXIT_TIMER4_DEFAULT 0x00000100
-#define mmMEM_TYPE_CNTL_DEFAULT 0x00000000
-#define mmSMU_BIF_VDDGFX_PWR_STATUS_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_GFX0_LOWER_DEFAULT 0xc0008000
-#define mmBIF_VDDGFX_GFX0_UPPER_DEFAULT 0x0000cffc
-#define mmBIF_VDDGFX_GFX1_LOWER_DEFAULT 0xc0028000
-#define mmBIF_VDDGFX_GFX1_UPPER_DEFAULT 0x00031ffc
-#define mmBIF_VDDGFX_GFX2_LOWER_DEFAULT 0xc0034000
-#define mmBIF_VDDGFX_GFX2_UPPER_DEFAULT 0x00037ffc
-#define mmBIF_VDDGFX_GFX3_LOWER_DEFAULT 0xc003c000
-#define mmBIF_VDDGFX_GFX3_UPPER_DEFAULT 0x0003e1fc
-#define mmBIF_VDDGFX_GFX4_LOWER_DEFAULT 0xc003ec00
-#define mmBIF_VDDGFX_GFX4_UPPER_DEFAULT 0x0003f1fc
-#define mmBIF_VDDGFX_GFX5_LOWER_DEFAULT 0xc003fc00
-#define mmBIF_VDDGFX_GFX5_UPPER_DEFAULT 0x0003fffc
-#define mmBIF_VDDGFX_RSV1_LOWER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV1_UPPER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV2_LOWER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV2_UPPER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV3_LOWER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV3_UPPER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV4_LOWER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_RSV4_UPPER_DEFAULT 0x00000000
-#define mmBIF_VDDGFX_FB_CMP_DEFAULT 0x00000000
-#define mmBIF_DOORBELL_GBLAPER1_LOWER_DEFAULT 0x80000780
-#define mmBIF_DOORBELL_GBLAPER1_UPPER_DEFAULT 0x000007fc
-#define mmBIF_DOORBELL_GBLAPER2_LOWER_DEFAULT 0x80000800
-#define mmBIF_DOORBELL_GBLAPER2_UPPER_DEFAULT 0x0000087c
-#define mmREMAP_HDP_MEM_FLUSH_CNTL_DEFAULT 0x0000385c
-#define mmREMAP_HDP_REG_FLUSH_CNTL_DEFAULT 0x00003858
-#define mmBIF_RB_CNTL_DEFAULT 0x00000000
-#define mmBIF_RB_BASE_DEFAULT 0x00000000
-#define mmBIF_RB_RPTR_DEFAULT 0x00000000
-#define mmBIF_RB_WPTR_DEFAULT 0x00000000
-#define mmBIF_RB_WPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmBIF_RB_WPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmMAILBOX_INDEX_DEFAULT 0x00000000
-#define mmBIF_GPUIOV_RESET_NOTIFICATION_DEFAULT 0x00000000
-#define mmBIF_UVD_GPUIOV_CFG_SIZE_DEFAULT 0x00000008
-#define mmBIF_VCE_GPUIOV_CFG_SIZE_DEFAULT 0x00000008
-#define mmBIF_GFX_SDMA_GPUIOV_CFG_SIZE_DEFAULT 0x00000008
-#define mmBIF_GMI_WRR_WEIGHT_DEFAULT 0x00202020
-#define mmNBIF_STRAP_WRITE_CTRL_DEFAULT 0x00000000
-#define mmBIF_PERSTB_PAD_CNTL_DEFAULT 0x000000c0
-#define mmBIF_PX_EN_PAD_CNTL_DEFAULT 0x00000031
-#define mmBIF_REFPADKIN_PAD_CNTL_DEFAULT 0x00000007
-#define mmBIF_CLKREQB_PAD_CNTL_DEFAULT 0x00600100
-
-
-// addressBlock: rcc_pf_0_BIFDEC1
-// base address: 0x0
-#define mmRCC_BACO_CNTL_MISC_DEFAULT 0x00000000
-#define mmRCC_RESET_EN_DEFAULT 0x00008000
-#define mmRCC_VDM_SUPPORT_DEFAULT 0x00000000
-#define mmRCC_PEER_REG_RANGE0_DEFAULT 0xffff0000
-#define mmRCC_PEER_REG_RANGE1_DEFAULT 0xffff0000
-#define mmRCC_BUS_CNTL_DEFAULT 0x00000000
-#define mmRCC_CONFIG_CNTL_DEFAULT 0x00000000
-#define mmRCC_CONFIG_F0_BASE_DEFAULT 0x00000000
-#define mmRCC_CONFIG_APER_SIZE_DEFAULT 0x00000000
-#define mmRCC_CONFIG_REG_APER_SIZE_DEFAULT 0x00000000
-#define mmRCC_XDMA_LO_DEFAULT 0x00000000
-#define mmRCC_XDMA_HI_DEFAULT 0x00000000
-#define mmRCC_FEATURES_CONTROL_MISC_DEFAULT 0x00000000
-#define mmRCC_BUSNUM_CNTL1_DEFAULT 0x00000000
-#define mmRCC_BUSNUM_LIST0_DEFAULT 0x00000000
-#define mmRCC_BUSNUM_LIST1_DEFAULT 0x00000000
-#define mmRCC_BUSNUM_CNTL2_DEFAULT 0x00000000
-#define mmRCC_CAPTURE_HOST_BUSNUM_DEFAULT 0x00000000
-#define mmRCC_HOST_BUSNUM_DEFAULT 0x00000000
-#define mmRCC_PEER0_FB_OFFSET_HI_DEFAULT 0x00000000
-#define mmRCC_PEER0_FB_OFFSET_LO_DEFAULT 0x00000000
-#define mmRCC_PEER1_FB_OFFSET_HI_DEFAULT 0x00000000
-#define mmRCC_PEER1_FB_OFFSET_LO_DEFAULT 0x00000000
-#define mmRCC_PEER2_FB_OFFSET_HI_DEFAULT 0x00000000
-#define mmRCC_PEER2_FB_OFFSET_LO_DEFAULT 0x00000000
-#define mmRCC_PEER3_FB_OFFSET_HI_DEFAULT 0x00000000
-#define mmRCC_PEER3_FB_OFFSET_LO_DEFAULT 0x00000000
-#define mmRCC_DEVFUNCNUM_LIST0_DEFAULT 0x00000000
-#define mmRCC_DEVFUNCNUM_LIST1_DEFAULT 0x00000000
-#define mmRCC_DEV0_LINK_CNTL_DEFAULT 0x00000000
-#define mmRCC_CMN_LINK_CNTL_DEFAULT 0x00000000
-#define mmRCC_EP_REQUESTERID_RESTORE_DEFAULT 0x00000000
-#define mmRCC_LTR_LSWITCH_CNTL_DEFAULT 0x00000000
-#define mmRCC_MH_ARB_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_pf_0_BIFDEC2
-// base address: 0x0
-#define mmGFXMSIX_VECT0_ADDR_LO_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT0_ADDR_HI_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT0_MSG_DATA_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT0_CONTROL_DEFAULT 0x00000001
-#define mmGFXMSIX_VECT1_ADDR_LO_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT1_ADDR_HI_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT1_MSG_DATA_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT1_CONTROL_DEFAULT 0x00000001
-#define mmGFXMSIX_VECT2_ADDR_LO_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT2_ADDR_HI_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT2_MSG_DATA_DEFAULT 0x00000000
-#define mmGFXMSIX_VECT2_CONTROL_DEFAULT 0x00000001
-#define mmGFXMSIX_PBA_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_strap_BIFDEC1
-// base address: 0x0
-#define mmRCC_DEV0_PORT_STRAP0_DEFAULT 0x54228bc0
-#define mmRCC_DEV0_PORT_STRAP1_DEFAULT 0x1022145e
-#define mmRCC_DEV0_PORT_STRAP2_DEFAULT 0x1c65e009
-#define mmRCC_DEV0_PORT_STRAP3_DEFAULT 0x5ffff849
-#define mmRCC_DEV0_PORT_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_PORT_STRAP5_DEFAULT 0xaf800000
-#define mmRCC_DEV0_PORT_STRAP6_DEFAULT 0x00000002
-#define mmRCC_DEV0_PORT_STRAP7_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF0_STRAP0_DEFAULT 0x30000000
-#define mmRCC_DEV0_EPF0_STRAP1_DEFAULT 0x05530000
-#define mmRCC_DEV0_EPF0_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF0_STRAP2_DEFAULT 0x02000000
-#define mmRCC_DEV0_EPF0_STRAP3_DEFAULT 0x08b40001
-#define mmRCC_DEV0_EPF0_STRAP4_DEFAULT 0x1f000042
-#define mmRCC_DEV0_EPF0_STRAP5_DEFAULT 0x00001022
-#define mmRCC_DEV0_EPF0_STRAP8_DEFAULT 0xc8c73002
-#define mmRCC_DEV0_EPF0_STRAP9_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP0_DEFAULT 0x30000000
-#define mmRCC_DEV0_EPF1_STRAP10_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP11_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP12_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP3_DEFAULT 0x08040001
-#define mmRCC_DEV0_EPF1_STRAP4_DEFAULT 0x2f000000
-#define mmRCC_DEV0_EPF1_STRAP5_DEFAULT 0x00001022
-#define mmRCC_DEV0_EPF1_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF1_STRAP7_DEFAULT 0x00000000
-
-
-// addressBlock: bif_bx_pf_BIFPFVFDEC1
-// base address: 0x0
-#define mmBIF_BME_STATUS_DEFAULT 0x00000000
-#define mmBIF_ATOMIC_ERR_LOG_DEFAULT 0x00000000
-#define mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH_DEFAULT 0x00000000
-#define mmDOORBELL_SELFRING_GPA_APER_BASE_LOW_DEFAULT 0x00000000
-#define mmDOORBELL_SELFRING_GPA_APER_CNTL_DEFAULT 0x00000000
-#define mmHDP_REG_COHERENCY_FLUSH_CNTL_DEFAULT 0x00000000
-#define mmHDP_MEM_COHERENCY_FLUSH_CNTL_DEFAULT 0x00000000
-#define mmGPU_HDP_FLUSH_REQ_DEFAULT 0x00000000
-#define mmGPU_HDP_FLUSH_DONE_DEFAULT 0x00000000
-#define mmBIF_TRANS_PENDING_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_TRN_DW0_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_TRN_DW1_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_TRN_DW2_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_TRN_DW3_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_RCV_DW0_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_RCV_DW1_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_RCV_DW2_DEFAULT 0x00000000
-#define mmMAILBOX_MSGBUF_RCV_DW3_DEFAULT 0x00000000
-#define mmMAILBOX_CONTROL_DEFAULT 0x00000000
-#define mmMAILBOX_INT_CNTL_DEFAULT 0x00000000
-#define mmBIF_VMHV_MAILBOX_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_pf_0_BIFPFVFDEC1
-// base address: 0x0
-#define mmRCC_DOORBELL_APER_EN_DEFAULT 0x00000000
-#define mmRCC_CONFIG_MEMSIZE_DEFAULT 0x00000000
-#define mmRCC_CONFIG_RESERVED_DEFAULT 0x00000000
-#define mmRCC_IOV_FUNC_IDENTIFIER_DEFAULT 0x00000000
-
-
-// addressBlock: syshub_mmreg_ind_syshubdec
-// base address: 0x0
-#define mmSYSHUB_INDEX_DEFAULT 0x00000000
-#define mmSYSHUB_DATA_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_strap_rcc_strap_internal
-// base address: 0x10100000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP0_DEFAULT 0x54228bc0
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP1_DEFAULT 0x1022145e
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP2_DEFAULT 0x1c65e009
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP3_DEFAULT 0x5ffff849
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP4_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP5_DEFAULT 0xaf800000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP6_DEFAULT 0x00000002
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_PORT_STRAP7_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP1_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV1_PORT_STRAP7_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP0_DEFAULT 0x30000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP1_DEFAULT 0x05530000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP2_DEFAULT 0x02000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP3_DEFAULT 0x08b40001
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP4_DEFAULT 0x1f000042
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP5_DEFAULT 0x00001022
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP8_DEFAULT 0xc8c73002
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP9_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF0_STRAP13_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP0_DEFAULT 0x30000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP2_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP3_DEFAULT 0x08040001
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP4_DEFAULT 0x2f000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP5_DEFAULT 0x00001022
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP6_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP7_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP10_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP11_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP12_DEFAULT 0x00000000
-#define mmRCCSTRAPRCCSTRAP_RCC_DEV0_EPF1_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF2_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF3_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF4_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF5_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF6_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV0_EPF7_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF0_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF1_STRAP13_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP0_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP2_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP3_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP4_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP5_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP6_DEFAULT 0x00000000
-#define mmRCC_DEV1_EPF2_STRAP13_DEFAULT 0x00000000
-
-
-// addressBlock: bif_rst_bif_rst_regblk
-// base address: 0x10100000
-#define ixHARD_RST_CTRL_DEFAULT 0xb0000055
-#define ixRSMU_SOFT_RST_CTRL_DEFAULT 0x90000000
-#define ixSELF_SOFT_RST_DEFAULT 0x00000000
-#define ixGFX_DRV_MODE1_RST_CTRL_DEFAULT 0x000000a9
-#define ixBIF_RST_MISC_CTRL_DEFAULT 0x00000644
-#define ixBIF_RST_MISC_CTRL2_DEFAULT 0x00000000
-#define ixBIF_RST_MISC_CTRL3_DEFAULT 0x00004900
-#define ixBIF_RST_GFXVF_FLR_IDLE_DEFAULT 0x00000000
-#define ixDEV0_PF0_FLR_RST_CTRL_DEFAULT 0x0206a9a9
-#define ixDEV0_PF1_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF2_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF3_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF4_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF5_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF6_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixDEV0_PF7_FLR_RST_CTRL_DEFAULT 0x02060009
-#define ixBIF_INST_RESET_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_PF_FLR_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_D3HOTD0_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_POWER_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_PF_DSTATE_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_PF0_VF_FLR_INTR_STS_DEFAULT 0x00000000
-#define ixBIF_INST_RESET_INTR_MASK_DEFAULT 0x00000000
-#define ixBIF_PF_FLR_INTR_MASK_DEFAULT 0x00000000
-#define ixBIF_D3HOTD0_INTR_MASK_DEFAULT 0x000000ff
-#define ixBIF_POWER_INTR_MASK_DEFAULT 0x00000000
-#define ixBIF_PF_DSTATE_INTR_MASK_DEFAULT 0x00000000
-#define ixBIF_PF0_VF_FLR_INTR_MASK_DEFAULT 0x00000000
-#define ixBIF_PF_FLR_RST_DEFAULT 0x00000000
-#define ixBIF_PF0_VF_FLR_RST_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF0_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF1_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF2_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF3_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF4_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF5_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF6_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixBIF_DEV0_PF7_DSTATE_VALUE_DEFAULT 0x00000000
-#define ixDEV0_PF0_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF1_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF2_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF3_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF4_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF5_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF6_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixDEV0_PF7_D3HOTD0_RST_CTRL_DEFAULT 0x0000001b
-#define ixBIF_PORT0_DSTATE_VALUE_DEFAULT 0x00000000
-
-
-// addressBlock: bif_misc_bif_misc_regblk
-// base address: 0x10100000
-#define ixMISC_SCRATCH_DEFAULT 0x00000000
-#define ixINTR_LINE_POLARITY_DEFAULT 0x00000000
-#define ixINTR_LINE_ENABLE_DEFAULT 0x00000000
-#define ixOUTSTANDING_VC_ALLOC_DEFAULT 0x6f06c0cf
-#define ixBIFC_MISC_CTRL0_DEFAULT 0x08000004
-#define ixBIFC_MISC_CTRL1_DEFAULT 0x00008004
-#define ixBIFC_BME_ERR_LOG_DEFAULT 0x00000000
-#define ixBIFC_RCCBIH_BME_ERR_LOG_DEFAULT 0x00000000
-#define ixBIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1_DEFAULT 0x00000000
-#define ixBIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3_DEFAULT 0x00000000
-#define ixBIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5_DEFAULT 0x00000000
-#define ixBIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7_DEFAULT 0x00000000
-#define ixNBIF_VWIRE_CTRL_DEFAULT 0x00000000
-#define ixNBIF_SMN_VWR_VCHG_DIS_CTRL_DEFAULT 0x00000000
-#define ixNBIF_SMN_VWR_VCHG_RST_CTRL0_DEFAULT 0x00000000
-#define ixNBIF_SMN_VWR_VCHG_TRIG_DEFAULT 0x00000000
-#define ixNBIF_SMN_VWR_WTRIG_CNTL_DEFAULT 0x00000000
-#define ixNBIF_SMN_VWR_VCHG_DIS_CTRL_1_DEFAULT 0x00000000
-#define ixNBIF_MGCG_CTRL_DEFAULT 0x00000080
-#define ixNBIF_DS_CTRL_LCLK_DEFAULT 0x01000000
-#define ixSMN_MST_CNTL0_DEFAULT 0x00000001
-#define ixSMN_MST_EP_CNTL1_DEFAULT 0x00000000
-#define ixSMN_MST_EP_CNTL2_DEFAULT 0x00000000
-#define ixNBIF_SDP_VWR_VCHG_DIS_CTRL_DEFAULT 0x00000000
-#define ixNBIF_SDP_VWR_VCHG_RST_CTRL0_DEFAULT 0x00000000
-#define ixNBIF_SDP_VWR_VCHG_RST_CTRL1_DEFAULT 0x00000000
-#define ixNBIF_SDP_VWR_VCHG_TRIG_DEFAULT 0x00000000
-#define ixBME_DUMMY_CNTL_0_DEFAULT 0x0000aaaa
-#define ixBIFC_THT_CNTL_DEFAULT 0x00000222
-#define ixBIFC_HSTARB_CNTL_DEFAULT 0x00000000
-#define ixBIFC_GSI_CNTL_DEFAULT 0x000017c0
-#define ixBIFC_PCIEFUNC_CNTL_DEFAULT 0x00000000
-#define ixBIFC_SDP_CNTL_0_DEFAULT 0x003cf3cf
-#define ixBIFC_PERF_CNTL_0_DEFAULT 0x00000000
-#define ixBIFC_PERF_CNTL_1_DEFAULT 0x00000000
-#define ixBIFC_PERF_CNT_MMIO_RD_DEFAULT 0x00000000
-#define ixBIFC_PERF_CNT_MMIO_WR_DEFAULT 0x00000000
-#define ixBIFC_PERF_CNT_DMA_RD_DEFAULT 0x00000000
-#define ixBIFC_PERF_CNT_DMA_WR_DEFAULT 0x00000000
-#define ixNBIF_REGIF_ERRSET_CTRL_DEFAULT 0x00000000
-#define ixSMN_MST_EP_CNTL3_DEFAULT 0x00000000
-#define ixSMN_MST_EP_CNTL4_DEFAULT 0x00000000
-#define ixBIF_SELFRING_BUFFER_VID_DEFAULT 0x0000605f
-#define ixBIF_SELFRING_VECTOR_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: bif_ras_bif_ras_regblk
-// base address: 0x10100000
-#define ixBIF_RAS_LEAF0_CTRL_DEFAULT 0x00000000
-#define ixBIF_RAS_LEAF1_CTRL_DEFAULT 0x00000000
-#define ixBIF_RAS_LEAF2_CTRL_DEFAULT 0x00000000
-#define ixBIF_RAS_MISC_CTRL_DEFAULT 0x00000000
-#define ixBIF_IOHUB_RAS_IH_CNTL_DEFAULT 0x00000000
-#define ixBIF_RAS_VWR_FROM_IOHUB_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_pfc_amdgfx_RCCPFCDEC
-// base address: 0x10134000
-#define ixRCC_PFC_LTR_CNTL_DEFAULT 0x00000000
-#define ixRCC_PFC_PME_RESTORE_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_0_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_1_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_2_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_3_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_4_DEFAULT 0x00000000
-#define ixRCC_PFC_STICKY_RESTORE_5_DEFAULT 0x00000000
-#define ixRCC_PFC_AUXPWR_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: rcc_pfc_amdgfxaz_RCCPFCDEC
-// base address: 0x10134200
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_LTR_CNTL_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_PME_RESTORE_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_0_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_1_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_2_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_3_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_4_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_STICKY_RESTORE_5_DEFAULT 0x00000000
-#define ixRCCPFCAMDGFXAZ_RCC_PFC_AUXPWR_CNTL_DEFAULT 0x00000000
-
-
-// addressBlock: pciemsix_amdgfx_MSIXTDEC
-// base address: 0x10170000
-#define ixPCIEMSIX_VECT0_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT0_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT0_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT0_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT1_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT1_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT1_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT1_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT2_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT2_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT2_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT2_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT3_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT3_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT3_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT3_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT4_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT4_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT4_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT4_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT5_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT5_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT5_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT5_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT6_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT6_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT6_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT6_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT7_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT7_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT7_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT7_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT8_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT8_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT8_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT8_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT9_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT9_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT9_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT9_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT10_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT10_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT10_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT10_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT11_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT11_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT11_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT11_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT12_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT12_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT12_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT12_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT13_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT13_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT13_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT13_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT14_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT14_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT14_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT14_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT15_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT15_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT15_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT15_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT16_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT16_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT16_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT16_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT17_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT17_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT17_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT17_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT18_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT18_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT18_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT18_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT19_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT19_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT19_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT19_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT20_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT20_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT20_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT20_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT21_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT21_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT21_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT21_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT22_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT22_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT22_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT22_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT23_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT23_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT23_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT23_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT24_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT24_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT24_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT24_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT25_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT25_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT25_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT25_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT26_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT26_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT26_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT26_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT27_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT27_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT27_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT27_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT28_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT28_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT28_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT28_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT29_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT29_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT29_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT29_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT30_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT30_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT30_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT30_CONTROL_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT31_ADDR_LO_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT31_ADDR_HI_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT31_MSG_DATA_DEFAULT 0x00000000
-#define ixPCIEMSIX_VECT31_CONTROL_DEFAULT 0x00000000
-
-
-// addressBlock: pciemsix_amdgfx_MSIXPDEC
-// base address: 0x10171000
-#define ixPCIEMSIX_PBA_DEFAULT 0x00000000
-
-
-// addressBlock: syshub_mmreg_ind_syshubind
-// base address: 0x0
-#define ixSYSHUBMMREGIND_SYSHUB_DS_CTRL_SOCCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_SYSHUB_DS_CTRL2_SOCCLK_DEFAULT 0x00000100
-#define ixSYSHUBMMREGIND_SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW1_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL1_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL2_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL3_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL4_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW0_CL5_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW1_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK0_SW2_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_SYSHUB_CG_CNTL_DEFAULT 0x00082000
-#define ixSYSHUBMMREGIND_SYSHUB_TRANS_IDLE_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_SYSHUB_HP_TIMER_DEFAULT 0x00000100
-#define ixSYSHUBMMREGIND_SYSHUB_SCRATCH_DEFAULT 0x00000040
-#define ixSYSHUBMMREGIND_SYSHUB_DS_CTRL_SHUBCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_SYSHUB_DS_CTRL2_SHUBCLK_DEFAULT 0x00000100
-#define ixSYSHUBMMREGIND_SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SHUBCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SHUBCLK_DEFAULT 0x00000000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_SYSHUB_QOS_CNTL_DEFAULT 0x0000001e
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_CL1_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_CL2_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_CL3_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW0_CL4_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_CL0_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_CL1_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_CL2_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_CL3_CNTL_DEFAULT 0x20200000
-#define ixSYSHUBMMREGIND_DMA_CLK1_SW1_CL4_CNTL_DEFAULT 0x20200000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h
deleted file mode 100644
index 1fddd0f..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _osssys_4_0_DEFAULT_HEADER
-#define _osssys_4_0_DEFAULT_HEADER
-
-
-// addressBlock: osssys_osssysdec
-#define mmIH_VMID_0_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_1_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_2_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_3_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_4_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_5_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_6_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_7_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_8_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_9_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_10_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_11_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_12_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_13_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_14_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_15_LUT_DEFAULT 0x00000000
-#define mmIH_VMID_0_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_1_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_2_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_3_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_4_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_5_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_6_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_7_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_8_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_9_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_10_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_11_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_12_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_13_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_14_LUT_MM_DEFAULT 0x00000000
-#define mmIH_VMID_15_LUT_MM_DEFAULT 0x00000000
-#define mmIH_COOKIE_0_DEFAULT 0x00000000
-#define mmIH_COOKIE_1_DEFAULT 0x00000000
-#define mmIH_COOKIE_2_DEFAULT 0x00000000
-#define mmIH_COOKIE_3_DEFAULT 0x00000000
-#define mmIH_COOKIE_4_DEFAULT 0x00000000
-#define mmIH_COOKIE_5_DEFAULT 0x00000000
-#define mmIH_COOKIE_6_DEFAULT 0x00000000
-#define mmIH_COOKIE_7_DEFAULT 0x00000000
-#define mmIH_REGISTER_LAST_PART0_DEFAULT 0x00000000
-#define mmSEM_REQ_INPUT_0_DEFAULT 0x00000000
-#define mmSEM_REQ_INPUT_1_DEFAULT 0x00000000
-#define mmSEM_REQ_INPUT_2_DEFAULT 0x00000000
-#define mmSEM_REQ_INPUT_3_DEFAULT 0x00000000
-#define mmSEM_REGISTER_LAST_PART0_DEFAULT 0x00000000
-#define mmIH_RB_CNTL_DEFAULT 0x10610000
-#define mmIH_RB_BASE_DEFAULT 0x00000000
-#define mmIH_RB_BASE_HI_DEFAULT 0x00000000
-#define mmIH_RB_RPTR_DEFAULT 0x00000000
-#define mmIH_RB_WPTR_DEFAULT 0x00000000
-#define mmIH_RB_WPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmIH_RB_WPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmIH_DOORBELL_RPTR_DEFAULT 0x00000000
-#define mmIH_RB_CNTL_RING1_DEFAULT 0x10410000
-#define mmIH_RB_BASE_RING1_DEFAULT 0x00000000
-#define mmIH_RB_BASE_HI_RING1_DEFAULT 0x00000000
-#define mmIH_RB_RPTR_RING1_DEFAULT 0x00000000
-#define mmIH_RB_WPTR_RING1_DEFAULT 0x00000000
-#define mmIH_DOORBELL_RPTR_RING1_DEFAULT 0x00000000
-#define mmIH_RB_CNTL_RING2_DEFAULT 0x10410000
-#define mmIH_RB_BASE_RING2_DEFAULT 0x00000000
-#define mmIH_RB_BASE_HI_RING2_DEFAULT 0x00000000
-#define mmIH_RB_RPTR_RING2_DEFAULT 0x00000000
-#define mmIH_RB_WPTR_RING2_DEFAULT 0x00000000
-#define mmIH_DOORBELL_RPTR_RING2_DEFAULT 0x00000000
-#define mmIH_VERSION_DEFAULT 0x00000400
-#define mmIH_CNTL_DEFAULT 0x01000000
-#define mmIH_CNTL2_DEFAULT 0x000000ff
-#define mmIH_STATUS_DEFAULT 0x00040847
-#define mmIH_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmIH_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
-#define mmIH_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
-#define mmIH_DSM_MATCH_VALUE_BIT_31_0_DEFAULT 0x00000000
-#define mmIH_DSM_MATCH_VALUE_BIT_63_32_DEFAULT 0x00000000
-#define mmIH_DSM_MATCH_VALUE_BIT_95_64_DEFAULT 0x00000000
-#define mmIH_DSM_MATCH_FIELD_CONTROL_DEFAULT 0x0000007f
-#define mmIH_DSM_MATCH_DATA_CONTROL_DEFAULT 0x0fffffff
-#define mmIH_DSM_MATCH_FCN_ID_DEFAULT 0x00000000
-#define mmIH_LIMIT_INT_RATE_CNTL_DEFAULT 0x00000000
-#define mmIH_VF_RB_STATUS_DEFAULT 0x00000000
-#define mmIH_VF_RB_STATUS2_DEFAULT 0x00000000
-#define mmIH_VF_RB1_STATUS_DEFAULT 0x00000000
-#define mmIH_VF_RB1_STATUS2_DEFAULT 0x00000000
-#define mmIH_VF_RB2_STATUS_DEFAULT 0x00000000
-#define mmIH_VF_RB2_STATUS2_DEFAULT 0x00000000
-#define mmIH_INT_FLOOD_CNTL_DEFAULT 0x00000000
-#define mmIH_RB0_INT_FLOOD_STATUS_DEFAULT 0x00000000
-#define mmIH_RB1_INT_FLOOD_STATUS_DEFAULT 0x00000000
-#define mmIH_RB2_INT_FLOOD_STATUS_DEFAULT 0x00000000
-#define mmIH_INT_FLOOD_STATUS_DEFAULT 0x00000000
-#define mmIH_STORM_CLIENT_LIST_CNTL_DEFAULT 0x00000000
-#define mmIH_CLK_CTRL_DEFAULT 0x00000000
-#define mmIH_INT_FLAGS_DEFAULT 0x00000000
-#define mmIH_LAST_INT_INFO0_DEFAULT 0x00000000
-#define mmIH_LAST_INT_INFO1_DEFAULT 0x00000000
-#define mmIH_LAST_INT_INFO2_DEFAULT 0x00000000
-#define mmIH_SCRATCH_DEFAULT 0x00000000
-#define mmIH_CLIENT_CREDIT_ERROR_DEFAULT 0x00000000
-#define mmIH_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmIH_COOKIE_REC_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmIH_CREDIT_STATUS_DEFAULT 0xfffffffe
-#define mmIH_MMHUB_ERROR_DEFAULT 0x00000000
-#define mmIH_REGISTER_LAST_PART2_DEFAULT 0x00000000
-#define mmSEM_CLK_CTRL_DEFAULT 0x00000100
-#define mmSEM_UTC_CREDIT_DEFAULT 0x00000510
-#define mmSEM_UTC_CONFIG_DEFAULT 0x00000020
-#define mmSEM_UTCL2_TRAN_EN_LUT_DEFAULT 0x800000ff
-#define mmSEM_MCIF_CONFIG_DEFAULT 0x00001040
-#define mmSEM_PERFMON_CNTL_DEFAULT 0x00000000
-#define mmSEM_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
-#define mmSEM_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
-#define mmSEM_STATUS_DEFAULT 0x80f90003
-#define mmSEM_MAILBOX_CLIENTCONFIG_DEFAULT 0x00fac688
-#define mmSEM_MAILBOX_DEFAULT 0x00000000
-#define mmSEM_MAILBOX_CONTROL_DEFAULT 0x00000000
-#define mmSEM_CHICKEN_BITS_DEFAULT 0x00084ad6
-#define mmSEM_MAILBOX_CLIENTCONFIG_EXTRA_DEFAULT 0x00000008
-#define mmSEM_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmSEM_OUTSTANDING_THRESHOLD_DEFAULT 0x00000010
-#define mmSEM_REGISTER_LAST_PART2_DEFAULT 0x00000000
-#define mmIH_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmIH_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmIH_CLIENT_CFG_DEFAULT 0x0000001f
-#define mmIH_CLIENT_CFG_INDEX_DEFAULT 0x00000000
-#define mmIH_CLIENT_CFG_DATA_DEFAULT 0x00000000
-#define mmIH_CID_REMAP_INDEX_DEFAULT 0x00000000
-#define mmIH_CID_REMAP_DATA_DEFAULT 0x00000000
-#define mmIH_CHICKEN_DEFAULT 0x00000000
-#define mmIH_MMHUB_CNTL_DEFAULT 0x00000001
-#define mmIH_REGISTER_LAST_PART1_DEFAULT 0x00000000
-#define mmSEM_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmSEM_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmSEM_RESP_SDMA0_DEFAULT 0x0004950c
-#define mmSEM_RESP_SDMA1_DEFAULT 0x0004958c
-#define mmSEM_RESP_UVD_DEFAULT 0x0004860c
-#define mmSEM_RESP_VCE_0_DEFAULT 0x0004900c
-#define mmSEM_RESP_ACP_DEFAULT 0x0004870c
-#define mmSEM_RESP_ISP_DEFAULT 0x00000000
-#define mmSEM_RESP_VCE_1_DEFAULT 0x0004908c
-#define mmSEM_RESP_VP8_DEFAULT 0x00000000
-#define mmSEM_RESP_GC_DEFAULT 0x0004858c
-#define mmSEM_CID_REMAP_INDEX_DEFAULT 0x00000000
-#define mmSEM_CID_REMAP_DATA_DEFAULT 0x00000000
-#define mmSEM_ATOMIC_OP_LUT_DEFAULT 0x040a102f
-#define mmSEM_EDC_CONFIG_DEFAULT 0x00000002
-#define mmSEM_CHICKEN_BITS2_DEFAULT 0x00000000
-#define mmSEM_MMHUB_CNTL_DEFAULT 0x00000000
-#define mmSEM_REGISTER_LAST_PART1_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h
deleted file mode 100644
index afd15bd..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma0_4_0_DEFAULT_HEADER
-#define _sdma0_4_0_DEFAULT_HEADER
-
-
-// addressBlock: sdma0_sdma0dec
-#define mmSDMA0_UCODE_ADDR_DEFAULT 0x00000000
-#define mmSDMA0_UCODE_DATA_DEFAULT 0x00000000
-#define mmSDMA0_VM_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_VM_CTX_LO_DEFAULT 0x00000000
-#define mmSDMA0_VM_CTX_HI_DEFAULT 0x00000000
-#define mmSDMA0_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmSDMA0_VM_CTX_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmSDMA0_VF_ENABLE_DEFAULT 0x00000000
-#define mmSDMA0_CONTEXT_REG_TYPE0_DEFAULT 0xfffdf79f
-#define mmSDMA0_CONTEXT_REG_TYPE1_DEFAULT 0x003fbcff
-#define mmSDMA0_CONTEXT_REG_TYPE2_DEFAULT 0x000003ff
-#define mmSDMA0_CONTEXT_REG_TYPE3_DEFAULT 0x00000000
-#define mmSDMA0_PUB_REG_TYPE0_DEFAULT 0x3c000000
-#define mmSDMA0_PUB_REG_TYPE1_DEFAULT 0x30003882
-#define mmSDMA0_PUB_REG_TYPE2_DEFAULT 0x0fc6e880
-#define mmSDMA0_PUB_REG_TYPE3_DEFAULT 0x00000000
-#define mmSDMA0_MMHUB_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_DEFAULT 0x00000000
-#define mmSDMA0_POWER_CNTL_DEFAULT 0x0003c000
-#define mmSDMA0_CLK_CTRL_DEFAULT 0xff000100
-#define mmSDMA0_CNTL_DEFAULT 0x00000002
-#define mmSDMA0_CHICKEN_BITS_DEFAULT 0x00831f07
-#define mmSDMA0_GB_ADDR_CONFIG_DEFAULT 0x00100012
-#define mmSDMA0_GB_ADDR_CONFIG_READ_DEFAULT 0x00100012
-#define mmSDMA0_RB_RPTR_FETCH_HI_DEFAULT 0x00000000
-#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_RB_RPTR_FETCH_DEFAULT 0x00000000
-#define mmSDMA0_IB_OFFSET_FETCH_DEFAULT 0x00000000
-#define mmSDMA0_PROGRAM_DEFAULT 0x00000000
-#define mmSDMA0_STATUS_REG_DEFAULT 0x46dee557
-#define mmSDMA0_STATUS1_REG_DEFAULT 0x000003ff
-#define mmSDMA0_RD_BURST_CNTL_DEFAULT 0x00000003
-#define mmSDMA0_HBM_PAGE_CONFIG_DEFAULT 0x00000000
-#define mmSDMA0_UCODE_CHECKSUM_DEFAULT 0x00000000
-#define mmSDMA0_F32_CNTL_DEFAULT 0x00000001
-#define mmSDMA0_FREEZE_DEFAULT 0x00000000
-#define mmSDMA0_PHASE0_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA0_PHASE1_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA_POWER_GATING_DEFAULT 0x00000000
-#define mmSDMA_PGFSM_CONFIG_DEFAULT 0x00000000
-#define mmSDMA_PGFSM_WRITE_DEFAULT 0x00000000
-#define mmSDMA_PGFSM_READ_DEFAULT 0x00000000
-#define mmSDMA0_EDC_CONFIG_DEFAULT 0x00000002
-#define mmSDMA0_BA_THRESHOLD_DEFAULT 0x03ff03ff
-#define mmSDMA0_ID_DEFAULT 0x00000001
-#define mmSDMA0_VERSION_DEFAULT 0x00000400
-#define mmSDMA0_EDC_COUNTER_DEFAULT 0x00000000
-#define mmSDMA0_EDC_COUNTER_CLEAR_DEFAULT 0x00000000
-#define mmSDMA0_STATUS2_REG_DEFAULT 0x00000000
-#define mmSDMA0_ATOMIC_CNTL_DEFAULT 0x00000200
-#define mmSDMA0_ATOMIC_PREOP_LO_DEFAULT 0x00000000
-#define mmSDMA0_ATOMIC_PREOP_HI_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_CNTL_DEFAULT 0xd0003019
-#define mmSDMA0_UTCL1_WATERMK_DEFAULT 0xfffbe1fe
-#define mmSDMA0_UTCL1_RD_STATUS_DEFAULT 0x201001ff
-#define mmSDMA0_UTCL1_WR_STATUS_DEFAULT 0x503001ff
-#define mmSDMA0_UTCL1_INV0_DEFAULT 0x00000600
-#define mmSDMA0_UTCL1_INV1_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_INV2_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_RD_XNACK0_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_RD_XNACK1_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_WR_XNACK0_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_WR_XNACK1_DEFAULT 0x00000000
-#define mmSDMA0_UTCL1_TIMEOUT_DEFAULT 0x00010001
-#define mmSDMA0_UTCL1_PAGE_DEFAULT 0x000003e0
-#define mmSDMA0_POWER_CNTL_IDLE_DEFAULT 0x06060200
-#define mmSDMA0_RELAX_ORDERING_LUT_DEFAULT 0xc0000006
-#define mmSDMA0_CHICKEN_BITS_2_DEFAULT 0x00000005
-#define mmSDMA0_STATUS3_REG_DEFAULT 0x00100000
-#define mmSDMA0_PHYSICAL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_PHYSICAL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PHASE2_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA0_ERROR_LOG_DEFAULT 0x0000000f
-#define mmSDMA0_PUB_DUMMY_REG0_DEFAULT 0x00000000
-#define mmSDMA0_PUB_DUMMY_REG1_DEFAULT 0x00000000
-#define mmSDMA0_PUB_DUMMY_REG2_DEFAULT 0x00000000
-#define mmSDMA0_PUB_DUMMY_REG3_DEFAULT 0x00000000
-#define mmSDMA0_F32_COUNTER_DEFAULT 0x00000000
-#define mmSDMA0_UNBREAKABLE_DEFAULT 0x00000000
-#define mmSDMA0_PERFMON_CNTL_DEFAULT 0x000ff7fd
-#define mmSDMA0_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
-#define mmSDMA0_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
-#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_DEFAULT 0x00640000
-#define mmSDMA0_CRD_CNTL_DEFAULT 0x000085c0
-#define mmSDMA0_MMHUB_TRUSTLVL_DEFAULT 0x00000000
-#define mmSDMA0_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmSDMA0_ULV_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_EA_DBIT_ADDR_DATA_DEFAULT 0x00000000
-#define mmSDMA0_EA_DBIT_ADDR_INDEX_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA0_GFX_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA0_GFX_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA0_GFX_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_GFX_CONTEXT_STATUS_DEFAULT 0x00000005
-#define mmSDMA0_GFX_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA0_GFX_CONTEXT_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_GFX_STATUS_DEFAULT 0x00000000
-#define mmSDMA0_GFX_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA0_GFX_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA0_GFX_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_GFX_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_GFX_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA0_GFX_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA0_GFX_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_GFX_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA0_GFX_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA0_GFX_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA0_PAGE_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA0_PAGE_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA0_PAGE_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_STATUS_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA0_PAGE_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA0_RLC0_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA0_RLC0_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA0_RLC0_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_STATUS_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA0_RLC0_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA0_RLC1_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA0_RLC1_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA0_RLC1_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_STATUS_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA0_RLC1_MIDCMD_CNTL_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h
deleted file mode 100644
index b100c4e..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma0_4_0_OFFSET_HEADER
-#define _sdma0_4_0_OFFSET_HEADER
-
-
-
-// addressBlock: sdma0_sdma0dec
-// base address: 0x4980
-#define mmSDMA0_UCODE_ADDR 0x0000
-#define mmSDMA0_UCODE_ADDR_BASE_IDX 0
-#define mmSDMA0_UCODE_DATA 0x0001
-#define mmSDMA0_UCODE_DATA_BASE_IDX 0
-#define mmSDMA0_VM_CNTL 0x0004
-#define mmSDMA0_VM_CNTL_BASE_IDX 0
-#define mmSDMA0_VM_CTX_LO 0x0005
-#define mmSDMA0_VM_CTX_LO_BASE_IDX 0
-#define mmSDMA0_VM_CTX_HI 0x0006
-#define mmSDMA0_VM_CTX_HI_BASE_IDX 0
-#define mmSDMA0_ACTIVE_FCN_ID 0x0007
-#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0
-#define mmSDMA0_VM_CTX_CNTL 0x0008
-#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0
-#define mmSDMA0_VIRT_RESET_REQ 0x0009
-#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0
-#define mmSDMA0_VF_ENABLE 0x000a
-#define mmSDMA0_VF_ENABLE_BASE_IDX 0
-#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b
-#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0
-#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c
-#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0
-#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d
-#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0
-#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e
-#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0
-#define mmSDMA0_PUB_REG_TYPE0 0x000f
-#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0
-#define mmSDMA0_PUB_REG_TYPE1 0x0010
-#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0
-#define mmSDMA0_PUB_REG_TYPE2 0x0011
-#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0
-#define mmSDMA0_PUB_REG_TYPE3 0x0012
-#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0
-#define mmSDMA0_MMHUB_CNTL 0x0013
-#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0
-#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019
-#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
-#define mmSDMA0_POWER_CNTL 0x001a
-#define mmSDMA0_POWER_CNTL_BASE_IDX 0
-#define mmSDMA0_CLK_CTRL 0x001b
-#define mmSDMA0_CLK_CTRL_BASE_IDX 0
-#define mmSDMA0_CNTL 0x001c
-#define mmSDMA0_CNTL_BASE_IDX 0
-#define mmSDMA0_CHICKEN_BITS 0x001d
-#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0
-#define mmSDMA0_GB_ADDR_CONFIG 0x001e
-#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
-#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f
-#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
-#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020
-#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
-#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
-#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
-#define mmSDMA0_RB_RPTR_FETCH 0x0022
-#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0
-#define mmSDMA0_IB_OFFSET_FETCH 0x0023
-#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
-#define mmSDMA0_PROGRAM 0x0024
-#define mmSDMA0_PROGRAM_BASE_IDX 0
-#define mmSDMA0_STATUS_REG 0x0025
-#define mmSDMA0_STATUS_REG_BASE_IDX 0
-#define mmSDMA0_STATUS1_REG 0x0026
-#define mmSDMA0_STATUS1_REG_BASE_IDX 0
-#define mmSDMA0_RD_BURST_CNTL 0x0027
-#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0
-#define mmSDMA0_HBM_PAGE_CONFIG 0x0028
-#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
-#define mmSDMA0_UCODE_CHECKSUM 0x0029
-#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0
-#define mmSDMA0_F32_CNTL 0x002a
-#define mmSDMA0_F32_CNTL_BASE_IDX 0
-#define mmSDMA0_FREEZE 0x002b
-#define mmSDMA0_FREEZE_BASE_IDX 0
-#define mmSDMA0_PHASE0_QUANTUM 0x002c
-#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0
-#define mmSDMA0_PHASE1_QUANTUM 0x002d
-#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0
-#define mmSDMA_POWER_GATING 0x002e
-#define mmSDMA_POWER_GATING_BASE_IDX 0
-#define mmSDMA_PGFSM_CONFIG 0x002f
-#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0
-#define mmSDMA_PGFSM_WRITE 0x0030
-#define mmSDMA_PGFSM_WRITE_BASE_IDX 0
-#define mmSDMA_PGFSM_READ 0x0031
-#define mmSDMA_PGFSM_READ_BASE_IDX 0
-#define mmSDMA0_EDC_CONFIG 0x0032
-#define mmSDMA0_EDC_CONFIG_BASE_IDX 0
-#define mmSDMA0_BA_THRESHOLD 0x0033
-#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0
-#define mmSDMA0_ID 0x0034
-#define mmSDMA0_ID_BASE_IDX 0
-#define mmSDMA0_VERSION 0x0035
-#define mmSDMA0_VERSION_BASE_IDX 0
-#define mmSDMA0_EDC_COUNTER 0x0036
-#define mmSDMA0_EDC_COUNTER_BASE_IDX 0
-#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037
-#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
-#define mmSDMA0_STATUS2_REG 0x0038
-#define mmSDMA0_STATUS2_REG_BASE_IDX 0
-#define mmSDMA0_ATOMIC_CNTL 0x0039
-#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0
-#define mmSDMA0_ATOMIC_PREOP_LO 0x003a
-#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
-#define mmSDMA0_ATOMIC_PREOP_HI 0x003b
-#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
-#define mmSDMA0_UTCL1_CNTL 0x003c
-#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0
-#define mmSDMA0_UTCL1_WATERMK 0x003d
-#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0
-#define mmSDMA0_UTCL1_RD_STATUS 0x003e
-#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
-#define mmSDMA0_UTCL1_WR_STATUS 0x003f
-#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
-#define mmSDMA0_UTCL1_INV0 0x0040
-#define mmSDMA0_UTCL1_INV0_BASE_IDX 0
-#define mmSDMA0_UTCL1_INV1 0x0041
-#define mmSDMA0_UTCL1_INV1_BASE_IDX 0
-#define mmSDMA0_UTCL1_INV2 0x0042
-#define mmSDMA0_UTCL1_INV2_BASE_IDX 0
-#define mmSDMA0_UTCL1_RD_XNACK0 0x0043
-#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
-#define mmSDMA0_UTCL1_RD_XNACK1 0x0044
-#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
-#define mmSDMA0_UTCL1_WR_XNACK0 0x0045
-#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
-#define mmSDMA0_UTCL1_WR_XNACK1 0x0046
-#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
-#define mmSDMA0_UTCL1_TIMEOUT 0x0047
-#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
-#define mmSDMA0_UTCL1_PAGE 0x0048
-#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0
-#define mmSDMA0_POWER_CNTL_IDLE 0x0049
-#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0
-#define mmSDMA0_RELAX_ORDERING_LUT 0x004a
-#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
-#define mmSDMA0_CHICKEN_BITS_2 0x004b
-#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0
-#define mmSDMA0_STATUS3_REG 0x004c
-#define mmSDMA0_STATUS3_REG_BASE_IDX 0
-#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d
-#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e
-#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_PHASE2_QUANTUM 0x004f
-#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0
-#define mmSDMA0_ERROR_LOG 0x0050
-#define mmSDMA0_ERROR_LOG_BASE_IDX 0
-#define mmSDMA0_PUB_DUMMY_REG0 0x0051
-#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
-#define mmSDMA0_PUB_DUMMY_REG1 0x0052
-#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
-#define mmSDMA0_PUB_DUMMY_REG2 0x0053
-#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
-#define mmSDMA0_PUB_DUMMY_REG3 0x0054
-#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
-#define mmSDMA0_F32_COUNTER 0x0055
-#define mmSDMA0_F32_COUNTER_BASE_IDX 0
-#define mmSDMA0_UNBREAKABLE 0x0056
-#define mmSDMA0_UNBREAKABLE_BASE_IDX 0
-#define mmSDMA0_PERFMON_CNTL 0x0057
-#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0
-#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058
-#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0
-#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059
-#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0
-#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
-#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
-#define mmSDMA0_CRD_CNTL 0x005b
-#define mmSDMA0_CRD_CNTL_BASE_IDX 0
-#define mmSDMA0_MMHUB_TRUSTLVL 0x005c
-#define mmSDMA0_MMHUB_TRUSTLVL_BASE_IDX 0
-#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
-#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
-#define mmSDMA0_ULV_CNTL 0x005e
-#define mmSDMA0_ULV_CNTL_BASE_IDX 0
-#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060
-#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
-#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061
-#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
-#define mmSDMA0_GFX_RB_CNTL 0x0080
-#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_RB_BASE 0x0081
-#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0
-#define mmSDMA0_GFX_RB_BASE_HI 0x0082
-#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_GFX_RB_RPTR 0x0083
-#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0
-#define mmSDMA0_GFX_RB_RPTR_HI 0x0084
-#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA0_GFX_RB_WPTR 0x0085
-#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0
-#define mmSDMA0_GFX_RB_WPTR_HI 0x0086
-#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087
-#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088
-#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089
-#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_GFX_IB_CNTL 0x008a
-#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_IB_RPTR 0x008b
-#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0
-#define mmSDMA0_GFX_IB_OFFSET 0x008c
-#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0
-#define mmSDMA0_GFX_IB_BASE_LO 0x008d
-#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA0_GFX_IB_BASE_HI 0x008e
-#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_GFX_IB_SIZE 0x008f
-#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0
-#define mmSDMA0_GFX_SKIP_CNTL 0x0090
-#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091
-#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA0_GFX_DOORBELL 0x0092
-#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0
-#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093
-#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_STATUS 0x00a8
-#define mmSDMA0_GFX_STATUS_BASE_IDX 0
-#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9
-#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA0_GFX_WATERMARK 0x00aa
-#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0
-#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab
-#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac
-#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad
-#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af
-#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA0_GFX_PREEMPT 0x00b0
-#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0
-#define mmSDMA0_GFX_DUMMY_REG 0x00b1
-#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
-#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4
-#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5
-#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0
-#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1
-#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2
-#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3
-#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4
-#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5
-#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6
-#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7
-#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8
-#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9
-#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_CNTL 0x00e0
-#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_BASE 0x00e1
-#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_BASE_HI 0x00e2
-#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_RPTR 0x00e3
-#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_RPTR_HI 0x00e4
-#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_WPTR 0x00e5
-#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_WPTR_HI 0x00e6
-#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00e7
-#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e8
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e9
-#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_CNTL 0x00ea
-#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_RPTR 0x00eb
-#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_OFFSET 0x00ec
-#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_BASE_LO 0x00ed
-#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_BASE_HI 0x00ee
-#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_SIZE 0x00ef
-#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0
-#define mmSDMA0_PAGE_SKIP_CNTL 0x00f0
-#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00f1
-#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA0_PAGE_DOORBELL 0x00f2
-#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0
-#define mmSDMA0_PAGE_STATUS 0x0108
-#define mmSDMA0_PAGE_STATUS_BASE_IDX 0
-#define mmSDMA0_PAGE_DOORBELL_LOG 0x0109
-#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA0_PAGE_WATERMARK 0x010a
-#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0
-#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x010b
-#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA0_PAGE_CSA_ADDR_LO 0x010c
-#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_PAGE_CSA_ADDR_HI 0x010d
-#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x010f
-#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA0_PAGE_PREEMPT 0x0110
-#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0
-#define mmSDMA0_PAGE_DUMMY_REG 0x0111
-#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
-#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_PAGE_RB_AQL_CNTL 0x0114
-#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x0115
-#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0120
-#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0121
-#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA2 0x0122
-#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA3 0x0123
-#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA4 0x0124
-#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA5 0x0125
-#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA6 0x0126
-#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA7 0x0127
-#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0128
-#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0129
-#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_CNTL 0x0140
-#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_BASE 0x0141
-#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_BASE_HI 0x0142
-#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_RPTR 0x0143
-#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_RPTR_HI 0x0144
-#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_WPTR 0x0145
-#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_WPTR_HI 0x0146
-#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0147
-#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0148
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0149
-#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_CNTL 0x014a
-#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_RPTR 0x014b
-#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_OFFSET 0x014c
-#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_BASE_LO 0x014d
-#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_BASE_HI 0x014e
-#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_SIZE 0x014f
-#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0
-#define mmSDMA0_RLC0_SKIP_CNTL 0x0150
-#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0151
-#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA0_RLC0_DOORBELL 0x0152
-#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0
-#define mmSDMA0_RLC0_STATUS 0x0168
-#define mmSDMA0_RLC0_STATUS_BASE_IDX 0
-#define mmSDMA0_RLC0_DOORBELL_LOG 0x0169
-#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA0_RLC0_WATERMARK 0x016a
-#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0
-#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x016b
-#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA0_RLC0_CSA_ADDR_LO 0x016c
-#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC0_CSA_ADDR_HI 0x016d
-#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x016f
-#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA0_RLC0_PREEMPT 0x0170
-#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0
-#define mmSDMA0_RLC0_DUMMY_REG 0x0171
-#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
-#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0174
-#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0175
-#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0180
-#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0181
-#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0182
-#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0183
-#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0184
-#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0185
-#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0186
-#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0187
-#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0188
-#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0189
-#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_CNTL 0x01a0
-#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_BASE 0x01a1
-#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_BASE_HI 0x01a2
-#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_RPTR 0x01a3
-#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_RPTR_HI 0x01a4
-#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_WPTR 0x01a5
-#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_WPTR_HI 0x01a6
-#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x01a7
-#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x01a8
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x01a9
-#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_CNTL 0x01aa
-#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_RPTR 0x01ab
-#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_OFFSET 0x01ac
-#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_BASE_LO 0x01ad
-#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_BASE_HI 0x01ae
-#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_SIZE 0x01af
-#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0
-#define mmSDMA0_RLC1_SKIP_CNTL 0x01b0
-#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_CONTEXT_STATUS 0x01b1
-#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA0_RLC1_DOORBELL 0x01b2
-#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0
-#define mmSDMA0_RLC1_STATUS 0x01c8
-#define mmSDMA0_RLC1_STATUS_BASE_IDX 0
-#define mmSDMA0_RLC1_DOORBELL_LOG 0x01c9
-#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA0_RLC1_WATERMARK 0x01ca
-#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0
-#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01cb
-#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01cc
-#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01cd
-#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01cf
-#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA0_RLC1_PREEMPT 0x01d0
-#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0
-#define mmSDMA0_RLC1_DUMMY_REG 0x01d1
-#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
-#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01d4
-#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01d5
-#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01e0
-#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01e1
-#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01e2
-#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01e3
-#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01e4
-#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01e5
-#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01e6
-#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01e7
-#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01e8
-#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01e9
-#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h
deleted file mode 100644
index 412ae45..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h
+++ /dev/null
@@ -1,1852 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma0_4_0_SH_MASK_HEADER
-#define _sdma0_4_0_SH_MASK_HEADER
-
-
-// addressBlock: sdma0_sdma0dec
-//SDMA0_UCODE_ADDR
-#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
-#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
-//SDMA0_UCODE_DATA
-#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
-#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_VM_CNTL
-#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
-#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
-//SDMA0_VM_CTX_LO
-#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
-#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_VM_CTX_HI
-#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
-#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_ACTIVE_FCN_ID
-#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
-#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
-#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//SDMA0_VM_CTX_CNTL
-#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
-#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
-#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
-#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
-//SDMA0_VIRT_RESET_REQ
-#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
-#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
-#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
-#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
-//SDMA0_VF_ENABLE
-#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0
-#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
-//SDMA0_CONTEXT_REG_TYPE0
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L
-#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L
-//SDMA0_CONTEXT_REG_TYPE1
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd
-#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
-#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L
-#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L
-#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
-#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
-//SDMA0_CONTEXT_REG_TYPE2
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9
-#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L
-#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L
-#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
-//SDMA0_CONTEXT_REG_TYPE3
-#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
-#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
-//SDMA0_PUB_REG_TYPE0
-#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0
-#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1
-#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6
-#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9
-#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12
-#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13
-#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
-#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
-#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
-#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
-#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L
-#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L
-#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L
-#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
-#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
-//SDMA0_PUB_REG_TYPE1
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0
-#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2
-#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7
-#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
-#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa
-#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd
-#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
-#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
-#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L
-#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
-#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L
-#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L
-//SDMA0_PUB_REG_TYPE2
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8
-#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9
-#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
-#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
-#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf
-#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
-#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
-#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
-#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL__SHIFT 0x1c
-#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
-#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e
-#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_MMHUB_TRUSTLVL_MASK 0x10000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
-#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L
-#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
-//SDMA0_PUB_REG_TYPE3
-#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
-#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
-#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
-#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
-#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
-#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
-//SDMA0_MMHUB_CNTL
-#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
-#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
-//SDMA0_CONTEXT_GROUP_BOUNDARY
-#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
-#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
-//SDMA0_POWER_CNTL
-#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
-#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
-#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
-#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
-#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
-#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
-#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
-#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
-#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
-#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
-#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
-#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
-#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
-#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
-#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
-#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
-//SDMA0_CLK_CTRL
-#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0
-#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
-#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
-#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
-#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
-#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
-#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
-//SDMA0_CNTL
-#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
-#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1
-#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
-#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
-#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
-#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
-#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
-#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
-#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
-#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
-#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
-#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
-#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
-#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
-#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
-#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
-#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
-#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
-#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
-#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
-#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
-//SDMA0_CHICKEN_BITS
-#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
-#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
-#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
-#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
-#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
-#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
-#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
-#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
-#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
-#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
-#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
-#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
-#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
-#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
-#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
-#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
-#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
-#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
-#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
-#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
-#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
-#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
-#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
-#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
-#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
-#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
-//SDMA0_GB_ADDR_CONFIG
-#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
-#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
-#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
-#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
-#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
-#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
-#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
-#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
-#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
-#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
-//SDMA0_GB_ADDR_CONFIG_READ
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
-#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
-#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
-#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
-#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
-#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
-//SDMA0_RB_RPTR_FETCH_HI
-#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
-#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
-#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
-#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
-//SDMA0_RB_RPTR_FETCH
-#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
-#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
-//SDMA0_IB_OFFSET_FETCH
-#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
-#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
-//SDMA0_PROGRAM
-#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
-#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
-//SDMA0_STATUS_REG
-#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
-#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
-#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
-#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
-#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
-#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
-#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
-#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
-#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
-#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
-#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
-#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
-#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
-#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
-#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
-#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
-#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
-#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
-#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
-#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
-#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
-#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
-#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
-#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
-#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
-#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
-#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
-#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
-#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
-#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
-#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
-#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
-#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
-#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
-#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
-#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
-#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
-#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
-#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
-#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
-#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
-#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
-#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
-#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
-#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
-#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
-#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
-#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
-#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
-#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
-#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
-#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
-#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
-#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
-#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
-#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
-#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
-#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
-//SDMA0_STATUS1_REG
-#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
-#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
-#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
-#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
-#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
-#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
-#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
-#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
-#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
-#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
-#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
-#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf
-#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
-#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
-#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
-#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
-#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
-#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
-#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
-#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
-#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
-#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
-#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
-#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
-#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
-#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L
-#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
-#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
-//SDMA0_RD_BURST_CNTL
-#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
-#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
-//SDMA0_HBM_PAGE_CONFIG
-#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
-#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
-//SDMA0_UCODE_CHECKSUM
-#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
-#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
-//SDMA0_F32_CNTL
-#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
-#define SDMA0_F32_CNTL__STEP__SHIFT 0x1
-#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
-#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L
-//SDMA0_FREEZE
-#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
-#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
-#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
-#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
-#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
-#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
-#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
-#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
-//SDMA0_PHASE0_QUANTUM
-#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA0_PHASE1_QUANTUM
-#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA_POWER_GATING
-#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0
-#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1
-#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2
-#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3
-#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
-#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L
-#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L
-#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L
-#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L
-#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
-//SDMA_PGFSM_CONFIG
-#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
-#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
-#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
-#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
-#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
-#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
-#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
-#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
-#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
-#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
-#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
-#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
-#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
-#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
-#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
-#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
-#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
-#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
-//SDMA_PGFSM_WRITE
-#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
-#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
-//SDMA_PGFSM_READ
-#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
-#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
-//SDMA0_EDC_CONFIG
-#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
-#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
-#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
-#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
-//SDMA0_BA_THRESHOLD
-#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
-#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
-#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
-#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
-//SDMA0_ID
-#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
-#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
-//SDMA0_VERSION
-#define SDMA0_VERSION__MINVER__SHIFT 0x0
-#define SDMA0_VERSION__MAJVER__SHIFT 0x8
-#define SDMA0_VERSION__REV__SHIFT 0x10
-#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
-#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
-#define SDMA0_VERSION__REV_MASK 0x003F0000L
-//SDMA0_EDC_COUNTER
-#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
-#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
-#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
-#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
-#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
-#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
-#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
-#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
-#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
-#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
-#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
-#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
-#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
-#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
-#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
-#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
-#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
-#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
-#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
-//SDMA0_EDC_COUNTER_CLEAR
-#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
-#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
-//SDMA0_STATUS2_REG
-#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
-#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
-#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
-#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
-#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
-#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
-//SDMA0_ATOMIC_CNTL
-#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
-#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
-#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
-#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
-//SDMA0_ATOMIC_PREOP_LO
-#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
-#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
-//SDMA0_ATOMIC_PREOP_HI
-#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
-#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
-//SDMA0_UTCL1_CNTL
-#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
-#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
-#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
-#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
-#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
-#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
-#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
-#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
-#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
-#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
-#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
-#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
-//SDMA0_UTCL1_WATERMK
-#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
-#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
-#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
-#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
-#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
-#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
-#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
-#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
-//SDMA0_UTCL1_RD_STATUS
-#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
-#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
-#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
-#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
-#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
-#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
-#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
-#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
-#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
-#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
-#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
-#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
-#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
-#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
-#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
-#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
-#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
-#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
-#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
-#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
-#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
-#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
-#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
-#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
-#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
-#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
-#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
-#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
-#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
-#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
-#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
-#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
-#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
-#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
-#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
-#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
-#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
-#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
-#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
-#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
-#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
-//SDMA0_UTCL1_WR_STATUS
-#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
-#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
-#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
-#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
-#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
-#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
-#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
-#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
-#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
-#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
-#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
-#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
-#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
-#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
-#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
-#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
-#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
-#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
-#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
-#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
-#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
-#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
-#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
-#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
-#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
-#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
-#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
-#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
-#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
-#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
-#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
-#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
-#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
-#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
-#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
-#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
-#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
-#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
-#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
-#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
-#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
-#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
-#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
-//SDMA0_UTCL1_INV0
-#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
-#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
-#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
-#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
-#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
-#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
-#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
-#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
-#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
-#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
-#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
-#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
-#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
-#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
-#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
-#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
-#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
-#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
-#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
-#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
-#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
-#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
-#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
-#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
-#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
-#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
-#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
-#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
-//SDMA0_UTCL1_INV1
-#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
-#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA0_UTCL1_INV2
-#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
-#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
-//SDMA0_UTCL1_RD_XNACK0
-#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
-#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA0_UTCL1_RD_XNACK1
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
-#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
-#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
-#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
-//SDMA0_UTCL1_WR_XNACK0
-#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
-#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA0_UTCL1_WR_XNACK1
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
-#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
-#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
-#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
-//SDMA0_UTCL1_TIMEOUT
-#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
-#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
-#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
-#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
-//SDMA0_UTCL1_PAGE
-#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
-#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
-#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
-#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
-#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
-#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
-#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
-#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
-//SDMA0_POWER_CNTL_IDLE
-#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
-#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
-#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
-#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
-#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
-#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
-//SDMA0_RELAX_ORDERING_LUT
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
-#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
-#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
-#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
-#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
-#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
-#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
-#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
-#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
-#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
-#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
-#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
-#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
-#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
-#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
-#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
-#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
-#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
-#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
-#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
-#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
-#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
-#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
-#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
-#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
-#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
-#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
-#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
-#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
-#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
-//SDMA0_CHICKEN_BITS_2
-#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
-#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
-//SDMA0_STATUS3_REG
-#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
-#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
-#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
-#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
-#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
-#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
-//SDMA0_PHYSICAL_ADDR_LO
-#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
-#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
-#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
-#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
-#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
-#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
-#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
-#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
-//SDMA0_PHYSICAL_ADDR_HI
-#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
-//SDMA0_PHASE2_QUANTUM
-#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA0_ERROR_LOG
-#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
-#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
-#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
-#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
-//SDMA0_PUB_DUMMY_REG0
-#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
-#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_PUB_DUMMY_REG1
-#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
-#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_PUB_DUMMY_REG2
-#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
-#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_PUB_DUMMY_REG3
-#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
-#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_F32_COUNTER
-#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
-#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_UNBREAKABLE
-#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0
-#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L
-//SDMA0_PERFMON_CNTL
-#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
-#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
-#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
-#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
-#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
-#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
-#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
-#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
-#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
-#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
-#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
-#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
-//SDMA0_PERFCOUNTER0_RESULT
-#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
-#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
-//SDMA0_PERFCOUNTER1_RESULT
-#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
-#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
-//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
-#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
-//SDMA0_CRD_CNTL
-#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
-#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
-#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
-#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
-//SDMA0_MMHUB_TRUSTLVL
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
-#define SDMA0_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
-//SDMA0_GPU_IOV_VIOLATION_LOG
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
-#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
-#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
-#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
-#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
-#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
-#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
-#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
-#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
-#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
-//SDMA0_ULV_CNTL
-#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0
-#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
-#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
-#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
-#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
-#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
-#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
-#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
-//SDMA0_EA_DBIT_ADDR_DATA
-#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
-#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
-//SDMA0_EA_DBIT_ADDR_INDEX
-#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
-#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
-//SDMA0_GFX_RB_CNTL
-#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA0_GFX_RB_BASE
-#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_BASE_HI
-#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA0_GFX_RB_RPTR
-#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_RPTR_HI
-#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_WPTR
-#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_WPTR_HI
-#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_WPTR_POLL_CNTL
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA0_GFX_RB_RPTR_ADDR_HI
-#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_RPTR_ADDR_LO
-#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_GFX_IB_CNTL
-#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA0_GFX_IB_RPTR
-#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA0_GFX_IB_OFFSET
-#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA0_GFX_IB_BASE_LO
-#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA0_GFX_IB_BASE_HI
-#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_GFX_IB_SIZE
-#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA0_GFX_SKIP_CNTL
-#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA0_GFX_CONTEXT_STATUS
-#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA0_GFX_DOORBELL
-#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA0_GFX_CONTEXT_CNTL
-#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
-#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
-//SDMA0_GFX_STATUS
-#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA0_GFX_DOORBELL_LOG
-#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA0_GFX_WATERMARK
-#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA0_GFX_DOORBELL_OFFSET
-#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA0_GFX_CSA_ADDR_LO
-#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_GFX_CSA_ADDR_HI
-#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_GFX_IB_SUB_REMAIN
-#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA0_GFX_PREEMPT
-#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA0_GFX_DUMMY_REG
-#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI
-#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO
-#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_GFX_RB_AQL_CNTL
-#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA0_GFX_MINOR_PTR_UPDATE
-#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA0_GFX_MIDCMD_DATA0
-#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA1
-#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA2
-#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA3
-#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA4
-#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA5
-#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA6
-#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA7
-#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_DATA8
-#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA0_GFX_MIDCMD_CNTL
-#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA0_PAGE_RB_CNTL
-#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA0_PAGE_RB_BASE
-#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_BASE_HI
-#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA0_PAGE_RB_RPTR
-#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_RPTR_HI
-#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_WPTR
-#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_WPTR_HI
-#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_WPTR_POLL_CNTL
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA0_PAGE_RB_RPTR_ADDR_HI
-#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_RPTR_ADDR_LO
-#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_PAGE_IB_CNTL
-#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA0_PAGE_IB_RPTR
-#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA0_PAGE_IB_OFFSET
-#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA0_PAGE_IB_BASE_LO
-#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA0_PAGE_IB_BASE_HI
-#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_IB_SIZE
-#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA0_PAGE_SKIP_CNTL
-#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA0_PAGE_CONTEXT_STATUS
-#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA0_PAGE_DOORBELL
-#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA0_PAGE_STATUS
-#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA0_PAGE_DOORBELL_LOG
-#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA0_PAGE_WATERMARK
-#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA0_PAGE_DOORBELL_OFFSET
-#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA0_PAGE_CSA_ADDR_LO
-#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_PAGE_CSA_ADDR_HI
-#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_IB_SUB_REMAIN
-#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA0_PAGE_PREEMPT
-#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA0_PAGE_DUMMY_REG
-#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI
-#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO
-#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_PAGE_RB_AQL_CNTL
-#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA0_PAGE_MINOR_PTR_UPDATE
-#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA0_PAGE_MIDCMD_DATA0
-#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA1
-#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA2
-#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA3
-#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA4
-#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA5
-#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA6
-#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA7
-#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_DATA8
-#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA0_PAGE_MIDCMD_CNTL
-#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA0_RLC0_RB_CNTL
-#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA0_RLC0_RB_BASE
-#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_BASE_HI
-#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA0_RLC0_RB_RPTR
-#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_RPTR_HI
-#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_WPTR
-#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_WPTR_HI
-#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_WPTR_POLL_CNTL
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA0_RLC0_RB_RPTR_ADDR_HI
-#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_RPTR_ADDR_LO
-#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC0_IB_CNTL
-#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA0_RLC0_IB_RPTR
-#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA0_RLC0_IB_OFFSET
-#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA0_RLC0_IB_BASE_LO
-#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA0_RLC0_IB_BASE_HI
-#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_IB_SIZE
-#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA0_RLC0_SKIP_CNTL
-#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA0_RLC0_CONTEXT_STATUS
-#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA0_RLC0_DOORBELL
-#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA0_RLC0_STATUS
-#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA0_RLC0_DOORBELL_LOG
-#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA0_RLC0_WATERMARK
-#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA0_RLC0_DOORBELL_OFFSET
-#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA0_RLC0_CSA_ADDR_LO
-#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC0_CSA_ADDR_HI
-#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_IB_SUB_REMAIN
-#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA0_RLC0_PREEMPT
-#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA0_RLC0_DUMMY_REG
-#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI
-#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO
-#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC0_RB_AQL_CNTL
-#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA0_RLC0_MINOR_PTR_UPDATE
-#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA0_RLC0_MIDCMD_DATA0
-#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA1
-#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA2
-#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA3
-#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA4
-#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA5
-#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA6
-#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA7
-#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_DATA8
-#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA0_RLC0_MIDCMD_CNTL
-#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA0_RLC1_RB_CNTL
-#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA0_RLC1_RB_BASE
-#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_BASE_HI
-#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA0_RLC1_RB_RPTR
-#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_RPTR_HI
-#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_WPTR
-#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_WPTR_HI
-#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_WPTR_POLL_CNTL
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA0_RLC1_RB_RPTR_ADDR_HI
-#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_RPTR_ADDR_LO
-#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC1_IB_CNTL
-#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA0_RLC1_IB_RPTR
-#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA0_RLC1_IB_OFFSET
-#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA0_RLC1_IB_BASE_LO
-#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA0_RLC1_IB_BASE_HI
-#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_IB_SIZE
-#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA0_RLC1_SKIP_CNTL
-#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA0_RLC1_CONTEXT_STATUS
-#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA0_RLC1_DOORBELL
-#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA0_RLC1_STATUS
-#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA0_RLC1_DOORBELL_LOG
-#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA0_RLC1_WATERMARK
-#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA0_RLC1_DOORBELL_OFFSET
-#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA0_RLC1_CSA_ADDR_LO
-#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC1_CSA_ADDR_HI
-#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_IB_SUB_REMAIN
-#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA0_RLC1_PREEMPT
-#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA0_RLC1_DUMMY_REG
-#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI
-#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO
-#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA0_RLC1_RB_AQL_CNTL
-#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA0_RLC1_MINOR_PTR_UPDATE
-#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA0_RLC1_MIDCMD_DATA0
-#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA1
-#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA2
-#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA3
-#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA4
-#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA5
-#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA6
-#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA7
-#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_DATA8
-#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA0_RLC1_MIDCMD_CNTL
-#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h
deleted file mode 100644
index 85c5c5e..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma1_4_0_DEFAULT_HEADER
-#define _sdma1_4_0_DEFAULT_HEADER
-
-
-// addressBlock: sdma1_sdma1dec
-#define mmSDMA1_UCODE_ADDR_DEFAULT 0x00000000
-#define mmSDMA1_UCODE_DATA_DEFAULT 0x00000000
-#define mmSDMA1_VM_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_VM_CTX_LO_DEFAULT 0x00000000
-#define mmSDMA1_VM_CTX_HI_DEFAULT 0x00000000
-#define mmSDMA1_ACTIVE_FCN_ID_DEFAULT 0x00000000
-#define mmSDMA1_VM_CTX_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_VIRT_RESET_REQ_DEFAULT 0x00000000
-#define mmSDMA1_VF_ENABLE_DEFAULT 0x00000000
-#define mmSDMA1_CONTEXT_REG_TYPE0_DEFAULT 0xfffdf79f
-#define mmSDMA1_CONTEXT_REG_TYPE1_DEFAULT 0x003fbcff
-#define mmSDMA1_CONTEXT_REG_TYPE2_DEFAULT 0x000003ff
-#define mmSDMA1_CONTEXT_REG_TYPE3_DEFAULT 0x00000000
-#define mmSDMA1_PUB_REG_TYPE0_DEFAULT 0x3c000000
-#define mmSDMA1_PUB_REG_TYPE1_DEFAULT 0x30003882
-#define mmSDMA1_PUB_REG_TYPE2_DEFAULT 0x0fc6e880
-#define mmSDMA1_PUB_REG_TYPE3_DEFAULT 0x00000000
-#define mmSDMA1_MMHUB_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_DEFAULT 0x00000000
-#define mmSDMA1_POWER_CNTL_DEFAULT 0x0003c000
-#define mmSDMA1_CLK_CTRL_DEFAULT 0xff000100
-#define mmSDMA1_CNTL_DEFAULT 0x00000002
-#define mmSDMA1_CHICKEN_BITS_DEFAULT 0x00831f07
-#define mmSDMA1_GB_ADDR_CONFIG_DEFAULT 0x00100012
-#define mmSDMA1_GB_ADDR_CONFIG_READ_DEFAULT 0x00100012
-#define mmSDMA1_RB_RPTR_FETCH_HI_DEFAULT 0x00000000
-#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_RB_RPTR_FETCH_DEFAULT 0x00000000
-#define mmSDMA1_IB_OFFSET_FETCH_DEFAULT 0x00000000
-#define mmSDMA1_PROGRAM_DEFAULT 0x00000000
-#define mmSDMA1_STATUS_REG_DEFAULT 0x46dee557
-#define mmSDMA1_STATUS1_REG_DEFAULT 0x000003ff
-#define mmSDMA1_RD_BURST_CNTL_DEFAULT 0x00000003
-#define mmSDMA1_HBM_PAGE_CONFIG_DEFAULT 0x00000000
-#define mmSDMA1_UCODE_CHECKSUM_DEFAULT 0x00000000
-#define mmSDMA1_F32_CNTL_DEFAULT 0x00000001
-#define mmSDMA1_FREEZE_DEFAULT 0x00000000
-#define mmSDMA1_PHASE0_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA1_PHASE1_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA1_EDC_CONFIG_DEFAULT 0x00000002
-#define mmSDMA1_BA_THRESHOLD_DEFAULT 0x03ff03ff
-#define mmSDMA1_ID_DEFAULT 0x00000001
-#define mmSDMA1_VERSION_DEFAULT 0x00000400
-#define mmSDMA1_EDC_COUNTER_DEFAULT 0x00000000
-#define mmSDMA1_EDC_COUNTER_CLEAR_DEFAULT 0x00000000
-#define mmSDMA1_STATUS2_REG_DEFAULT 0x00000001
-#define mmSDMA1_ATOMIC_CNTL_DEFAULT 0x00000200
-#define mmSDMA1_ATOMIC_PREOP_LO_DEFAULT 0x00000000
-#define mmSDMA1_ATOMIC_PREOP_HI_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_CNTL_DEFAULT 0xd0003019
-#define mmSDMA1_UTCL1_WATERMK_DEFAULT 0xfffbe1fe
-#define mmSDMA1_UTCL1_RD_STATUS_DEFAULT 0x201001ff
-#define mmSDMA1_UTCL1_WR_STATUS_DEFAULT 0x503001ff
-#define mmSDMA1_UTCL1_INV0_DEFAULT 0x00000600
-#define mmSDMA1_UTCL1_INV1_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_INV2_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_RD_XNACK0_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_RD_XNACK1_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_WR_XNACK0_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_WR_XNACK1_DEFAULT 0x00000000
-#define mmSDMA1_UTCL1_TIMEOUT_DEFAULT 0x00010001
-#define mmSDMA1_UTCL1_PAGE_DEFAULT 0x000003e0
-#define mmSDMA1_POWER_CNTL_IDLE_DEFAULT 0x06060200
-#define mmSDMA1_RELAX_ORDERING_LUT_DEFAULT 0xc0000006
-#define mmSDMA1_CHICKEN_BITS_2_DEFAULT 0x00000005
-#define mmSDMA1_STATUS3_REG_DEFAULT 0x00100000
-#define mmSDMA1_PHYSICAL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_PHYSICAL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PHASE2_QUANTUM_DEFAULT 0x00010002
-#define mmSDMA1_ERROR_LOG_DEFAULT 0x0000000f
-#define mmSDMA1_PUB_DUMMY_REG0_DEFAULT 0x00000000
-#define mmSDMA1_PUB_DUMMY_REG1_DEFAULT 0x00000000
-#define mmSDMA1_PUB_DUMMY_REG2_DEFAULT 0x00000000
-#define mmSDMA1_PUB_DUMMY_REG3_DEFAULT 0x00000000
-#define mmSDMA1_F32_COUNTER_DEFAULT 0x00000000
-#define mmSDMA1_UNBREAKABLE_DEFAULT 0x00000000
-#define mmSDMA1_PERFMON_CNTL_DEFAULT 0x000ff7fd
-#define mmSDMA1_PERFCOUNTER0_RESULT_DEFAULT 0x00000000
-#define mmSDMA1_PERFCOUNTER1_RESULT_DEFAULT 0x00000000
-#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_DEFAULT 0x00640000
-#define mmSDMA1_CRD_CNTL_DEFAULT 0x000085c0
-#define mmSDMA1_MMHUB_TRUSTLVL_DEFAULT 0x00000000
-#define mmSDMA1_GPU_IOV_VIOLATION_LOG_DEFAULT 0x00000000
-#define mmSDMA1_ULV_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_EA_DBIT_ADDR_DATA_DEFAULT 0x00000000
-#define mmSDMA1_EA_DBIT_ADDR_INDEX_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA1_GFX_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA1_GFX_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA1_GFX_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_GFX_CONTEXT_STATUS_DEFAULT 0x00000005
-#define mmSDMA1_GFX_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA1_GFX_CONTEXT_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_GFX_STATUS_DEFAULT 0x00000000
-#define mmSDMA1_GFX_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA1_GFX_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA1_GFX_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_GFX_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_GFX_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA1_GFX_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA1_GFX_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_GFX_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA1_GFX_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA1_GFX_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA1_PAGE_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA1_PAGE_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA1_PAGE_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_STATUS_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA1_PAGE_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA1_RLC0_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA1_RLC0_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA1_RLC0_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_STATUS_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA1_RLC0_MIDCMD_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_CNTL_DEFAULT 0x00040000
-#define mmSDMA1_RLC1_RB_BASE_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_RPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_WPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_WPTR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_DEFAULT 0x00401000
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_CNTL_DEFAULT 0x00000100
-#define mmSDMA1_RLC1_IB_RPTR_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_BASE_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_BASE_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_SIZE_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_SKIP_CNTL_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_CONTEXT_STATUS_DEFAULT 0x00000004
-#define mmSDMA1_RLC1_DOORBELL_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_STATUS_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_DOORBELL_LOG_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_WATERMARK_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_DOORBELL_OFFSET_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_CSA_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_CSA_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_IB_SUB_REMAIN_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_PREEMPT_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_DUMMY_REG_DEFAULT 0x0000000f
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_RB_AQL_CNTL_DEFAULT 0x00004000
-#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA0_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA1_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA2_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA3_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA4_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA5_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA6_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA7_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_DATA8_DEFAULT 0x00000000
-#define mmSDMA1_RLC1_MIDCMD_CNTL_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h
deleted file mode 100644
index 92150d6..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma1_4_0_OFFSET_HEADER
-#define _sdma1_4_0_OFFSET_HEADER
-
-
-
-// addressBlock: sdma1_sdma1dec
-// base address: 0x5180
-#define mmSDMA1_UCODE_ADDR 0x0000
-#define mmSDMA1_UCODE_ADDR_BASE_IDX 0
-#define mmSDMA1_UCODE_DATA 0x0001
-#define mmSDMA1_UCODE_DATA_BASE_IDX 0
-#define mmSDMA1_VM_CNTL 0x0004
-#define mmSDMA1_VM_CNTL_BASE_IDX 0
-#define mmSDMA1_VM_CTX_LO 0x0005
-#define mmSDMA1_VM_CTX_LO_BASE_IDX 0
-#define mmSDMA1_VM_CTX_HI 0x0006
-#define mmSDMA1_VM_CTX_HI_BASE_IDX 0
-#define mmSDMA1_ACTIVE_FCN_ID 0x0007
-#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0
-#define mmSDMA1_VM_CTX_CNTL 0x0008
-#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0
-#define mmSDMA1_VIRT_RESET_REQ 0x0009
-#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0
-#define mmSDMA1_VF_ENABLE 0x000a
-#define mmSDMA1_VF_ENABLE_BASE_IDX 0
-#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b
-#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0
-#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c
-#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0
-#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d
-#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0
-#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e
-#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0
-#define mmSDMA1_PUB_REG_TYPE0 0x000f
-#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0
-#define mmSDMA1_PUB_REG_TYPE1 0x0010
-#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0
-#define mmSDMA1_PUB_REG_TYPE2 0x0011
-#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0
-#define mmSDMA1_PUB_REG_TYPE3 0x0012
-#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0
-#define mmSDMA1_MMHUB_CNTL 0x0013
-#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0
-#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019
-#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
-#define mmSDMA1_POWER_CNTL 0x001a
-#define mmSDMA1_POWER_CNTL_BASE_IDX 0
-#define mmSDMA1_CLK_CTRL 0x001b
-#define mmSDMA1_CLK_CTRL_BASE_IDX 0
-#define mmSDMA1_CNTL 0x001c
-#define mmSDMA1_CNTL_BASE_IDX 0
-#define mmSDMA1_CHICKEN_BITS 0x001d
-#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0
-#define mmSDMA1_GB_ADDR_CONFIG 0x001e
-#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
-#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f
-#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
-#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020
-#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
-#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
-#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
-#define mmSDMA1_RB_RPTR_FETCH 0x0022
-#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0
-#define mmSDMA1_IB_OFFSET_FETCH 0x0023
-#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
-#define mmSDMA1_PROGRAM 0x0024
-#define mmSDMA1_PROGRAM_BASE_IDX 0
-#define mmSDMA1_STATUS_REG 0x0025
-#define mmSDMA1_STATUS_REG_BASE_IDX 0
-#define mmSDMA1_STATUS1_REG 0x0026
-#define mmSDMA1_STATUS1_REG_BASE_IDX 0
-#define mmSDMA1_RD_BURST_CNTL 0x0027
-#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0
-#define mmSDMA1_HBM_PAGE_CONFIG 0x0028
-#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
-#define mmSDMA1_UCODE_CHECKSUM 0x0029
-#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0
-#define mmSDMA1_F32_CNTL 0x002a
-#define mmSDMA1_F32_CNTL_BASE_IDX 0
-#define mmSDMA1_FREEZE 0x002b
-#define mmSDMA1_FREEZE_BASE_IDX 0
-#define mmSDMA1_PHASE0_QUANTUM 0x002c
-#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0
-#define mmSDMA1_PHASE1_QUANTUM 0x002d
-#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0
-#define mmSDMA1_EDC_CONFIG 0x0032
-#define mmSDMA1_EDC_CONFIG_BASE_IDX 0
-#define mmSDMA1_BA_THRESHOLD 0x0033
-#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0
-#define mmSDMA1_ID 0x0034
-#define mmSDMA1_ID_BASE_IDX 0
-#define mmSDMA1_VERSION 0x0035
-#define mmSDMA1_VERSION_BASE_IDX 0
-#define mmSDMA1_EDC_COUNTER 0x0036
-#define mmSDMA1_EDC_COUNTER_BASE_IDX 0
-#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037
-#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
-#define mmSDMA1_STATUS2_REG 0x0038
-#define mmSDMA1_STATUS2_REG_BASE_IDX 0
-#define mmSDMA1_ATOMIC_CNTL 0x0039
-#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0
-#define mmSDMA1_ATOMIC_PREOP_LO 0x003a
-#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
-#define mmSDMA1_ATOMIC_PREOP_HI 0x003b
-#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
-#define mmSDMA1_UTCL1_CNTL 0x003c
-#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0
-#define mmSDMA1_UTCL1_WATERMK 0x003d
-#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0
-#define mmSDMA1_UTCL1_RD_STATUS 0x003e
-#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
-#define mmSDMA1_UTCL1_WR_STATUS 0x003f
-#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
-#define mmSDMA1_UTCL1_INV0 0x0040
-#define mmSDMA1_UTCL1_INV0_BASE_IDX 0
-#define mmSDMA1_UTCL1_INV1 0x0041
-#define mmSDMA1_UTCL1_INV1_BASE_IDX 0
-#define mmSDMA1_UTCL1_INV2 0x0042
-#define mmSDMA1_UTCL1_INV2_BASE_IDX 0
-#define mmSDMA1_UTCL1_RD_XNACK0 0x0043
-#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
-#define mmSDMA1_UTCL1_RD_XNACK1 0x0044
-#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
-#define mmSDMA1_UTCL1_WR_XNACK0 0x0045
-#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
-#define mmSDMA1_UTCL1_WR_XNACK1 0x0046
-#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
-#define mmSDMA1_UTCL1_TIMEOUT 0x0047
-#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
-#define mmSDMA1_UTCL1_PAGE 0x0048
-#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0
-#define mmSDMA1_POWER_CNTL_IDLE 0x0049
-#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0
-#define mmSDMA1_RELAX_ORDERING_LUT 0x004a
-#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
-#define mmSDMA1_CHICKEN_BITS_2 0x004b
-#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0
-#define mmSDMA1_STATUS3_REG 0x004c
-#define mmSDMA1_STATUS3_REG_BASE_IDX 0
-#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d
-#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e
-#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_PHASE2_QUANTUM 0x004f
-#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0
-#define mmSDMA1_ERROR_LOG 0x0050
-#define mmSDMA1_ERROR_LOG_BASE_IDX 0
-#define mmSDMA1_PUB_DUMMY_REG0 0x0051
-#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
-#define mmSDMA1_PUB_DUMMY_REG1 0x0052
-#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
-#define mmSDMA1_PUB_DUMMY_REG2 0x0053
-#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
-#define mmSDMA1_PUB_DUMMY_REG3 0x0054
-#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
-#define mmSDMA1_F32_COUNTER 0x0055
-#define mmSDMA1_F32_COUNTER_BASE_IDX 0
-#define mmSDMA1_UNBREAKABLE 0x0056
-#define mmSDMA1_UNBREAKABLE_BASE_IDX 0
-#define mmSDMA1_PERFMON_CNTL 0x0057
-#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0
-#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058
-#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0
-#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059
-#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0
-#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a
-#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0
-#define mmSDMA1_CRD_CNTL 0x005b
-#define mmSDMA1_CRD_CNTL_BASE_IDX 0
-#define mmSDMA1_MMHUB_TRUSTLVL 0x005c
-#define mmSDMA1_MMHUB_TRUSTLVL_BASE_IDX 0
-#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d
-#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
-#define mmSDMA1_ULV_CNTL 0x005e
-#define mmSDMA1_ULV_CNTL_BASE_IDX 0
-#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060
-#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
-#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061
-#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
-#define mmSDMA1_GFX_RB_CNTL 0x0080
-#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_RB_BASE 0x0081
-#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0
-#define mmSDMA1_GFX_RB_BASE_HI 0x0082
-#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_GFX_RB_RPTR 0x0083
-#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0
-#define mmSDMA1_GFX_RB_RPTR_HI 0x0084
-#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA1_GFX_RB_WPTR 0x0085
-#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0
-#define mmSDMA1_GFX_RB_WPTR_HI 0x0086
-#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087
-#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088
-#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089
-#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_GFX_IB_CNTL 0x008a
-#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_IB_RPTR 0x008b
-#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0
-#define mmSDMA1_GFX_IB_OFFSET 0x008c
-#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0
-#define mmSDMA1_GFX_IB_BASE_LO 0x008d
-#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA1_GFX_IB_BASE_HI 0x008e
-#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_GFX_IB_SIZE 0x008f
-#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0
-#define mmSDMA1_GFX_SKIP_CNTL 0x0090
-#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091
-#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA1_GFX_DOORBELL 0x0092
-#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0
-#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093
-#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_STATUS 0x00a8
-#define mmSDMA1_GFX_STATUS_BASE_IDX 0
-#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9
-#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA1_GFX_WATERMARK 0x00aa
-#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0
-#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab
-#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac
-#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad
-#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af
-#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA1_GFX_PREEMPT 0x00b0
-#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0
-#define mmSDMA1_GFX_DUMMY_REG 0x00b1
-#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
-#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4
-#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5
-#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0
-#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1
-#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2
-#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3
-#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4
-#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5
-#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6
-#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7
-#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8
-#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9
-#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_CNTL 0x00e0
-#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_BASE 0x00e1
-#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_BASE_HI 0x00e2
-#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_RPTR 0x00e3
-#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_RPTR_HI 0x00e4
-#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_WPTR 0x00e5
-#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_WPTR_HI 0x00e6
-#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00e7
-#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e8
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e9
-#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_CNTL 0x00ea
-#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_RPTR 0x00eb
-#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_OFFSET 0x00ec
-#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_BASE_LO 0x00ed
-#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_BASE_HI 0x00ee
-#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_SIZE 0x00ef
-#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0
-#define mmSDMA1_PAGE_SKIP_CNTL 0x00f0
-#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00f1
-#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA1_PAGE_DOORBELL 0x00f2
-#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0
-#define mmSDMA1_PAGE_STATUS 0x0108
-#define mmSDMA1_PAGE_STATUS_BASE_IDX 0
-#define mmSDMA1_PAGE_DOORBELL_LOG 0x0109
-#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA1_PAGE_WATERMARK 0x010a
-#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0
-#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x010b
-#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA1_PAGE_CSA_ADDR_LO 0x010c
-#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_PAGE_CSA_ADDR_HI 0x010d
-#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x010f
-#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA1_PAGE_PREEMPT 0x0110
-#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0
-#define mmSDMA1_PAGE_DUMMY_REG 0x0111
-#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113
-#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_PAGE_RB_AQL_CNTL 0x0114
-#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x0115
-#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0120
-#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0121
-#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA2 0x0122
-#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA3 0x0123
-#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA4 0x0124
-#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA5 0x0125
-#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA6 0x0126
-#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA7 0x0127
-#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0128
-#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0129
-#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_CNTL 0x0140
-#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_BASE 0x0141
-#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_BASE_HI 0x0142
-#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_RPTR 0x0143
-#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_RPTR_HI 0x0144
-#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_WPTR 0x0145
-#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_WPTR_HI 0x0146
-#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0147
-#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0148
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0149
-#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_CNTL 0x014a
-#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_RPTR 0x014b
-#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_OFFSET 0x014c
-#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_BASE_LO 0x014d
-#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_BASE_HI 0x014e
-#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_SIZE 0x014f
-#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0
-#define mmSDMA1_RLC0_SKIP_CNTL 0x0150
-#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0151
-#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA1_RLC0_DOORBELL 0x0152
-#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0
-#define mmSDMA1_RLC0_STATUS 0x0168
-#define mmSDMA1_RLC0_STATUS_BASE_IDX 0
-#define mmSDMA1_RLC0_DOORBELL_LOG 0x0169
-#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA1_RLC0_WATERMARK 0x016a
-#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0
-#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x016b
-#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA1_RLC0_CSA_ADDR_LO 0x016c
-#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC0_CSA_ADDR_HI 0x016d
-#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x016f
-#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA1_RLC0_PREEMPT 0x0170
-#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0
-#define mmSDMA1_RLC0_DUMMY_REG 0x0171
-#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173
-#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0174
-#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0175
-#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0180
-#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0181
-#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0182
-#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0183
-#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0184
-#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0185
-#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0186
-#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0187
-#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0188
-#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0189
-#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_CNTL 0x01a0
-#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_BASE 0x01a1
-#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_BASE_HI 0x01a2
-#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_RPTR 0x01a3
-#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_RPTR_HI 0x01a4
-#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_WPTR 0x01a5
-#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_WPTR_HI 0x01a6
-#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x01a7
-#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x01a8
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x01a9
-#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_CNTL 0x01aa
-#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_RPTR 0x01ab
-#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_OFFSET 0x01ac
-#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_BASE_LO 0x01ad
-#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_BASE_HI 0x01ae
-#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_SIZE 0x01af
-#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0
-#define mmSDMA1_RLC1_SKIP_CNTL 0x01b0
-#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_CONTEXT_STATUS 0x01b1
-#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0
-#define mmSDMA1_RLC1_DOORBELL 0x01b2
-#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0
-#define mmSDMA1_RLC1_STATUS 0x01c8
-#define mmSDMA1_RLC1_STATUS_BASE_IDX 0
-#define mmSDMA1_RLC1_DOORBELL_LOG 0x01c9
-#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0
-#define mmSDMA1_RLC1_WATERMARK 0x01ca
-#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0
-#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01cb
-#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0
-#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01cc
-#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01cd
-#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01cf
-#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0
-#define mmSDMA1_RLC1_PREEMPT 0x01d0
-#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0
-#define mmSDMA1_RLC1_DUMMY_REG 0x01d1
-#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3
-#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
-#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01d4
-#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0
-#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01d5
-#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01e0
-#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01e1
-#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01e2
-#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01e3
-#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01e4
-#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01e5
-#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01e6
-#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01e7
-#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01e8
-#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0
-#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01e9
-#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h
deleted file mode 100644
index 25decdf..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h
+++ /dev/null
@@ -1,1810 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _sdma1_4_0_SH_MASK_HEADER
-#define _sdma1_4_0_SH_MASK_HEADER
-
-
-// addressBlock: sdma1_sdma1dec
-//SDMA1_UCODE_ADDR
-#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
-#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
-//SDMA1_UCODE_DATA
-#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
-#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_VM_CNTL
-#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
-#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
-//SDMA1_VM_CTX_LO
-#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
-#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_VM_CTX_HI
-#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
-#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_ACTIVE_FCN_ID
-#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
-#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
-#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
-#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
-#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
-#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
-//SDMA1_VM_CTX_CNTL
-#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
-#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
-#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
-#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
-//SDMA1_VIRT_RESET_REQ
-#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
-#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
-#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
-#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
-//SDMA1_VF_ENABLE
-#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0
-#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
-//SDMA1_CONTEXT_REG_TYPE0
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L
-#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L
-//SDMA1_CONTEXT_REG_TYPE1
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd
-#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
-#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L
-#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L
-#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
-#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
-//SDMA1_CONTEXT_REG_TYPE2
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9
-#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L
-#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L
-#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L
-//SDMA1_CONTEXT_REG_TYPE3
-#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
-#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
-//SDMA1_PUB_REG_TYPE0
-#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0
-#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1
-#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6
-#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9
-#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12
-#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13
-#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19
-#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
-#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
-#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
-#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L
-#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L
-#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L
-#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
-#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
-//SDMA1_PUB_REG_TYPE1
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0
-#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2
-#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7
-#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
-#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa
-#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd
-#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
-#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
-#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L
-#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
-#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L
-#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L
-//SDMA1_PUB_REG_TYPE2
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8
-#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9
-#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
-#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
-#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf
-#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
-#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a
-#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
-#define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL__SHIFT 0x1c
-#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
-#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e
-#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_MMHUB_TRUSTLVL_MASK 0x10000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
-#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L
-#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L
-//SDMA1_PUB_REG_TYPE3
-#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
-#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
-#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x2
-#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
-#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
-#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL
-//SDMA1_MMHUB_CNTL
-#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
-#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
-//SDMA1_CONTEXT_GROUP_BOUNDARY
-#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
-#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
-//SDMA1_POWER_CNTL
-#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
-#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
-#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
-#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
-#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
-#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
-#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
-#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
-#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
-#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
-//SDMA1_CLK_CTRL
-#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0
-#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
-#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
-#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
-#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
-#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
-#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
-//SDMA1_CNTL
-#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
-#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1
-#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
-#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
-#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
-#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
-#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
-#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
-#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
-#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
-#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
-#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
-#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
-#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
-#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
-#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
-#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
-#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
-#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
-#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
-#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
-//SDMA1_CHICKEN_BITS
-#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
-#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
-#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
-#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
-#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
-#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
-#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
-#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
-#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
-#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19
-#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a
-#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c
-#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e
-#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
-#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
-#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
-#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
-#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
-#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
-#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
-#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
-#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
-#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L
-#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L
-#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L
-#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L
-//SDMA1_GB_ADDR_CONFIG
-#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
-#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
-#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
-#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
-#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
-#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
-#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
-#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
-#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
-#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
-//SDMA1_GB_ADDR_CONFIG_READ
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
-#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
-#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
-#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
-#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
-#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
-//SDMA1_RB_RPTR_FETCH_HI
-#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
-#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
-#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
-#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
-//SDMA1_RB_RPTR_FETCH
-#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
-#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
-//SDMA1_IB_OFFSET_FETCH
-#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
-#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
-//SDMA1_PROGRAM
-#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
-#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
-//SDMA1_STATUS_REG
-#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
-#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
-#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
-#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
-#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
-#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
-#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
-#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
-#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
-#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
-#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
-#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
-#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
-#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
-#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
-#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
-#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
-#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
-#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
-#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
-#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
-#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
-#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
-#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
-#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
-#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
-#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
-#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
-#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
-#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
-#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
-#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
-#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
-#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
-#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
-#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
-#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
-#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
-#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
-#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
-#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
-#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
-#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
-#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
-#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
-#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
-#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
-#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
-#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
-#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
-#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
-#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
-#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
-#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
-#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
-#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
-#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
-#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
-//SDMA1_STATUS1_REG
-#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
-#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
-#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
-#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
-#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
-#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
-#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
-#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
-#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
-#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
-#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
-#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf
-#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
-#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
-#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
-#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
-#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
-#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
-#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
-#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
-#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
-#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
-#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
-#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
-#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
-#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L
-#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
-#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
-//SDMA1_RD_BURST_CNTL
-#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
-#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
-//SDMA1_HBM_PAGE_CONFIG
-#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
-#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L
-//SDMA1_UCODE_CHECKSUM
-#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
-#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
-//SDMA1_F32_CNTL
-#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
-#define SDMA1_F32_CNTL__STEP__SHIFT 0x1
-#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
-#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L
-//SDMA1_FREEZE
-#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
-#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
-#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
-#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
-#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
-#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
-#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
-#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
-//SDMA1_PHASE0_QUANTUM
-#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA1_PHASE1_QUANTUM
-#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA1_EDC_CONFIG
-#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
-#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
-#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
-#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
-//SDMA1_BA_THRESHOLD
-#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
-#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
-#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
-#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
-//SDMA1_ID
-#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
-#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
-//SDMA1_VERSION
-#define SDMA1_VERSION__MINVER__SHIFT 0x0
-#define SDMA1_VERSION__MAJVER__SHIFT 0x8
-#define SDMA1_VERSION__REV__SHIFT 0x10
-#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
-#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
-#define SDMA1_VERSION__REV_MASK 0x003F0000L
-//SDMA1_EDC_COUNTER
-#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
-#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
-#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
-#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
-#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
-#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
-#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
-#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
-#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
-#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
-#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
-#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
-#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
-#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
-#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
-#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
-#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
-#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
-#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
-//SDMA1_EDC_COUNTER_CLEAR
-#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
-#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
-//SDMA1_STATUS2_REG
-#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
-#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2
-#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
-#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
-#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL
-#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
-//SDMA1_ATOMIC_CNTL
-#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
-#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
-#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
-#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
-//SDMA1_ATOMIC_PREOP_LO
-#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
-#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
-//SDMA1_ATOMIC_PREOP_HI
-#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
-#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
-//SDMA1_UTCL1_CNTL
-#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
-#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
-#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
-#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
-#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
-#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
-#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
-#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
-#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
-#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
-#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
-#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
-//SDMA1_UTCL1_WATERMK
-#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0
-#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0xa
-#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x12
-#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x1a
-#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000003FFL
-#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0003FC00L
-#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x03FC0000L
-#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFC000000L
-//SDMA1_UTCL1_RD_STATUS
-#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
-#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
-#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
-#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
-#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
-#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
-#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
-#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
-#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
-#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
-#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
-#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
-#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
-#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
-#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
-#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
-#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
-#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
-#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
-#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
-#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
-#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
-#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
-#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
-#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
-#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
-#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
-#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
-#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
-#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
-#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
-#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
-#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
-#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
-#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
-#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
-#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
-#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
-#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
-#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
-#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
-//SDMA1_UTCL1_WR_STATUS
-#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
-#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
-#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
-#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
-#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
-#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
-#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
-#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
-#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
-#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
-#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
-#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
-#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
-#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
-#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
-#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
-#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
-#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
-#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
-#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
-#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
-#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
-#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
-#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
-#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
-#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
-#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
-#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
-#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
-#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
-#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
-#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
-#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
-#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
-#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
-#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
-#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
-#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
-#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
-#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
-#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
-#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
-#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
-//SDMA1_UTCL1_INV0
-#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
-#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
-#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
-#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
-#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
-#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
-#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
-#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
-#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
-#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
-#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
-#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
-#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
-#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
-#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
-#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
-#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
-#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
-#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
-#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
-#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
-#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
-#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
-#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
-#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
-#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
-#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
-#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
-//SDMA1_UTCL1_INV1
-#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
-#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA1_UTCL1_INV2
-#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
-#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
-//SDMA1_UTCL1_RD_XNACK0
-#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
-#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA1_UTCL1_RD_XNACK1
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
-#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
-#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
-#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
-//SDMA1_UTCL1_WR_XNACK0
-#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
-#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
-//SDMA1_UTCL1_WR_XNACK1
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
-#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
-#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
-#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
-//SDMA1_UTCL1_TIMEOUT
-#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
-#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
-#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
-#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
-//SDMA1_UTCL1_PAGE
-#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
-#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
-#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
-#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
-#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
-#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
-#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
-#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
-//SDMA1_POWER_CNTL_IDLE
-#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
-#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
-#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
-#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
-#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
-#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
-//SDMA1_RELAX_ORDERING_LUT
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
-#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
-#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
-#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
-#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
-#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
-#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
-#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
-#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
-#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
-#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
-#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
-#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
-#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
-#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
-#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
-#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
-#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
-#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
-#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
-#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
-#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
-#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
-#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
-#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
-#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
-#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
-#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
-#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
-#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
-//SDMA1_CHICKEN_BITS_2
-#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
-#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
-//SDMA1_STATUS3_REG
-#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
-#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
-#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
-#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
-#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
-#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
-//SDMA1_PHYSICAL_ADDR_LO
-#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
-#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
-#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
-#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
-#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
-#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
-#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
-#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
-//SDMA1_PHYSICAL_ADDR_HI
-#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
-//SDMA1_PHASE2_QUANTUM
-#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0
-#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8
-#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
-#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
-#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
-#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
-//SDMA1_ERROR_LOG
-#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
-#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
-#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
-#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
-//SDMA1_PUB_DUMMY_REG0
-#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
-#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_PUB_DUMMY_REG1
-#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
-#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_PUB_DUMMY_REG2
-#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
-#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_PUB_DUMMY_REG3
-#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
-#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_F32_COUNTER
-#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
-#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_UNBREAKABLE
-#define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0
-#define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L
-//SDMA1_PERFMON_CNTL
-#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0
-#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1
-#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
-#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa
-#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb
-#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc
-#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L
-#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L
-#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL
-#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L
-#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L
-#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L
-//SDMA1_PERFCOUNTER0_RESULT
-#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
-#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
-//SDMA1_PERFCOUNTER1_RESULT
-#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
-#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
-//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L
-#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L
-//SDMA1_CRD_CNTL
-#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
-#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
-#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
-#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
-//SDMA1_MMHUB_TRUSTLVL
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x3
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x6
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0x9
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0xc
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0xf
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x12
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x15
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x00000007L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x00000038L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x000001C0L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x00000E00L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x00007000L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00038000L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x001C0000L
-#define SDMA1_MMHUB_TRUSTLVL__SECFLAG7_MASK 0x00E00000L
-//SDMA1_GPU_IOV_VIOLATION_LOG
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
-#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
-#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
-#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
-#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
-#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
-#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
-#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
-#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
-#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L
-//SDMA1_ULV_CNTL
-#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0
-#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
-#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
-#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
-#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
-#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
-#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
-#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
-//SDMA1_EA_DBIT_ADDR_DATA
-#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
-#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
-//SDMA1_EA_DBIT_ADDR_INDEX
-#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
-#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
-//SDMA1_GFX_RB_CNTL
-#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA1_GFX_RB_BASE
-#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_BASE_HI
-#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA1_GFX_RB_RPTR
-#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_RPTR_HI
-#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_WPTR
-#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_WPTR_HI
-#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_WPTR_POLL_CNTL
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA1_GFX_RB_RPTR_ADDR_HI
-#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_RPTR_ADDR_LO
-#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_GFX_IB_CNTL
-#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA1_GFX_IB_RPTR
-#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA1_GFX_IB_OFFSET
-#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA1_GFX_IB_BASE_LO
-#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA1_GFX_IB_BASE_HI
-#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_GFX_IB_SIZE
-#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA1_GFX_SKIP_CNTL
-#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA1_GFX_CONTEXT_STATUS
-#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA1_GFX_DOORBELL
-#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA1_GFX_CONTEXT_CNTL
-#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
-#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
-//SDMA1_GFX_STATUS
-#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA1_GFX_DOORBELL_LOG
-#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA1_GFX_WATERMARK
-#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA1_GFX_DOORBELL_OFFSET
-#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA1_GFX_CSA_ADDR_LO
-#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_GFX_CSA_ADDR_HI
-#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_GFX_IB_SUB_REMAIN
-#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA1_GFX_PREEMPT
-#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA1_GFX_DUMMY_REG
-#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI
-#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO
-#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_GFX_RB_AQL_CNTL
-#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA1_GFX_MINOR_PTR_UPDATE
-#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA1_GFX_MIDCMD_DATA0
-#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA1
-#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA2
-#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA3
-#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA4
-#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA5
-#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA6
-#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA7
-#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_DATA8
-#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA1_GFX_MIDCMD_CNTL
-#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA1_PAGE_RB_CNTL
-#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA1_PAGE_RB_BASE
-#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_BASE_HI
-#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA1_PAGE_RB_RPTR
-#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_RPTR_HI
-#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_WPTR
-#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_WPTR_HI
-#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_WPTR_POLL_CNTL
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA1_PAGE_RB_RPTR_ADDR_HI
-#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_RPTR_ADDR_LO
-#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_PAGE_IB_CNTL
-#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA1_PAGE_IB_RPTR
-#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA1_PAGE_IB_OFFSET
-#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA1_PAGE_IB_BASE_LO
-#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA1_PAGE_IB_BASE_HI
-#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_IB_SIZE
-#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA1_PAGE_SKIP_CNTL
-#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA1_PAGE_CONTEXT_STATUS
-#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA1_PAGE_DOORBELL
-#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA1_PAGE_STATUS
-#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA1_PAGE_DOORBELL_LOG
-#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA1_PAGE_WATERMARK
-#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA1_PAGE_DOORBELL_OFFSET
-#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA1_PAGE_CSA_ADDR_LO
-#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_PAGE_CSA_ADDR_HI
-#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_IB_SUB_REMAIN
-#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA1_PAGE_PREEMPT
-#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA1_PAGE_DUMMY_REG
-#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI
-#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO
-#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_PAGE_RB_AQL_CNTL
-#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA1_PAGE_MINOR_PTR_UPDATE
-#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA1_PAGE_MIDCMD_DATA0
-#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA1
-#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA2
-#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA3
-#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA4
-#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA5
-#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA6
-#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA7
-#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_DATA8
-#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA1_PAGE_MIDCMD_CNTL
-#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA1_RLC0_RB_CNTL
-#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA1_RLC0_RB_BASE
-#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_BASE_HI
-#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA1_RLC0_RB_RPTR
-#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_RPTR_HI
-#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_WPTR
-#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_WPTR_HI
-#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_WPTR_POLL_CNTL
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA1_RLC0_RB_RPTR_ADDR_HI
-#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_RPTR_ADDR_LO
-#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC0_IB_CNTL
-#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA1_RLC0_IB_RPTR
-#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA1_RLC0_IB_OFFSET
-#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA1_RLC0_IB_BASE_LO
-#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA1_RLC0_IB_BASE_HI
-#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_IB_SIZE
-#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA1_RLC0_SKIP_CNTL
-#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA1_RLC0_CONTEXT_STATUS
-#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA1_RLC0_DOORBELL
-#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA1_RLC0_STATUS
-#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA1_RLC0_DOORBELL_LOG
-#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA1_RLC0_WATERMARK
-#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA1_RLC0_DOORBELL_OFFSET
-#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA1_RLC0_CSA_ADDR_LO
-#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC0_CSA_ADDR_HI
-#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_IB_SUB_REMAIN
-#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA1_RLC0_PREEMPT
-#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA1_RLC0_DUMMY_REG
-#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI
-#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO
-#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC0_RB_AQL_CNTL
-#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA1_RLC0_MINOR_PTR_UPDATE
-#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA1_RLC0_MIDCMD_DATA0
-#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA1
-#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA2
-#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA3
-#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA4
-#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA5
-#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA6
-#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA7
-#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_DATA8
-#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA1_RLC0_MIDCMD_CNTL
-#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-//SDMA1_RLC1_RB_CNTL
-#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
-#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
-#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
-#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
-#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
-#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000007EL
-#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
-#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
-#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
-#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
-//SDMA1_RLC1_RB_BASE
-#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_BASE_HI
-#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
-//SDMA1_RLC1_RB_RPTR
-#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
-#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_RPTR_HI
-#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_WPTR
-#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
-#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_WPTR_HI
-#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
-#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_WPTR_POLL_CNTL
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
-#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
-//SDMA1_RLC1_RB_RPTR_ADDR_HI
-#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_RPTR_ADDR_LO
-#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC1_IB_CNTL
-#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
-#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
-#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
-#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
-#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
-#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
-#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
-//SDMA1_RLC1_IB_RPTR
-#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
-#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
-//SDMA1_RLC1_IB_OFFSET
-#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
-//SDMA1_RLC1_IB_BASE_LO
-#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
-#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
-//SDMA1_RLC1_IB_BASE_HI
-#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_IB_SIZE
-#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0
-#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
-//SDMA1_RLC1_SKIP_CNTL
-#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
-#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x00003FFFL
-//SDMA1_RLC1_CONTEXT_STATUS
-#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
-#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
-#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
-#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
-#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
-#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
-#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
-#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
-#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
-#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
-#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
-#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
-#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
-#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
-#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
-#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
-//SDMA1_RLC1_DOORBELL
-#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
-#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
-#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
-#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
-//SDMA1_RLC1_STATUS
-#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
-#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
-#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
-#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
-//SDMA1_RLC1_DOORBELL_LOG
-#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
-#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
-#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
-#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
-//SDMA1_RLC1_WATERMARK
-#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
-#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
-#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
-#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
-//SDMA1_RLC1_DOORBELL_OFFSET
-#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
-#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
-//SDMA1_RLC1_CSA_ADDR_LO
-#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC1_CSA_ADDR_HI
-#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_IB_SUB_REMAIN
-#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
-#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
-//SDMA1_RLC1_PREEMPT
-#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
-#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
-//SDMA1_RLC1_DUMMY_REG
-#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
-#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI
-#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
-#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO
-#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
-#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
-//SDMA1_RLC1_RB_AQL_CNTL
-#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
-#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
-#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
-#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
-#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
-#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
-//SDMA1_RLC1_MINOR_PTR_UPDATE
-#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
-#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
-//SDMA1_RLC1_MIDCMD_DATA0
-#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA1
-#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA2
-#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA3
-#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA4
-#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA5
-#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA6
-#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA7
-#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_DATA8
-#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
-//SDMA1_RLC1_MIDCMD_CNTL
-#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
-#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
-#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
-#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
-#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
-#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
-#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
-#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h
deleted file mode 100644
index 5c186c2..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _smuio_9_0_DEFAULT_HEADER
-#define _smuio_9_0_DEFAULT_HEADER
-
-
-// addressBlock: smuio_smuio_SmuSmuioDec
-#define mmROM_CNTL_DEFAULT 0x00000000
-#define mmROM_STATUS_DEFAULT 0x00000000
-#define mmCGTT_ROM_CLK_CTRL0_DEFAULT 0xc0000100
-#define mmROM_INDEX_DEFAULT 0x00000000
-#define mmROM_DATA_DEFAULT 0x00000000
-#define mmROM_START_DEFAULT 0x00000000
-#define mmROM_SW_CNTL_DEFAULT 0x00000000
-#define mmROM_SW_STATUS_DEFAULT 0x00000000
-#define mmROM_SW_COMMAND_DEFAULT 0x00000000
-#define mmROM_SW_DATA_1_DEFAULT 0x00000000
-#define mmROM_SW_DATA_2_DEFAULT 0x00000000
-#define mmROM_SW_DATA_3_DEFAULT 0x00000000
-#define mmROM_SW_DATA_4_DEFAULT 0x00000000
-#define mmROM_SW_DATA_5_DEFAULT 0x00000000
-#define mmROM_SW_DATA_6_DEFAULT 0x00000000
-#define mmROM_SW_DATA_7_DEFAULT 0x00000000
-#define mmROM_SW_DATA_8_DEFAULT 0x00000000
-#define mmROM_SW_DATA_9_DEFAULT 0x00000000
-#define mmROM_SW_DATA_10_DEFAULT 0x00000000
-#define mmROM_SW_DATA_11_DEFAULT 0x00000000
-#define mmROM_SW_DATA_12_DEFAULT 0x00000000
-#define mmROM_SW_DATA_13_DEFAULT 0x00000000
-#define mmROM_SW_DATA_14_DEFAULT 0x00000000
-#define mmROM_SW_DATA_15_DEFAULT 0x00000000
-#define mmROM_SW_DATA_16_DEFAULT 0x00000000
-#define mmROM_SW_DATA_17_DEFAULT 0x00000000
-#define mmROM_SW_DATA_18_DEFAULT 0x00000000
-#define mmROM_SW_DATA_19_DEFAULT 0x00000000
-#define mmROM_SW_DATA_20_DEFAULT 0x00000000
-#define mmROM_SW_DATA_21_DEFAULT 0x00000000
-#define mmROM_SW_DATA_22_DEFAULT 0x00000000
-#define mmROM_SW_DATA_23_DEFAULT 0x00000000
-#define mmROM_SW_DATA_24_DEFAULT 0x00000000
-#define mmROM_SW_DATA_25_DEFAULT 0x00000000
-#define mmROM_SW_DATA_26_DEFAULT 0x00000000
-#define mmROM_SW_DATA_27_DEFAULT 0x00000000
-#define mmROM_SW_DATA_28_DEFAULT 0x00000000
-#define mmROM_SW_DATA_29_DEFAULT 0x00000000
-#define mmROM_SW_DATA_30_DEFAULT 0x00000000
-#define mmROM_SW_DATA_31_DEFAULT 0x00000000
-#define mmROM_SW_DATA_32_DEFAULT 0x00000000
-#define mmROM_SW_DATA_33_DEFAULT 0x00000000
-#define mmROM_SW_DATA_34_DEFAULT 0x00000000
-#define mmROM_SW_DATA_35_DEFAULT 0x00000000
-#define mmROM_SW_DATA_36_DEFAULT 0x00000000
-#define mmROM_SW_DATA_37_DEFAULT 0x00000000
-#define mmROM_SW_DATA_38_DEFAULT 0x00000000
-#define mmROM_SW_DATA_39_DEFAULT 0x00000000
-#define mmROM_SW_DATA_40_DEFAULT 0x00000000
-#define mmROM_SW_DATA_41_DEFAULT 0x00000000
-#define mmROM_SW_DATA_42_DEFAULT 0x00000000
-#define mmROM_SW_DATA_43_DEFAULT 0x00000000
-#define mmROM_SW_DATA_44_DEFAULT 0x00000000
-#define mmROM_SW_DATA_45_DEFAULT 0x00000000
-#define mmROM_SW_DATA_46_DEFAULT 0x00000000
-#define mmROM_SW_DATA_47_DEFAULT 0x00000000
-#define mmROM_SW_DATA_48_DEFAULT 0x00000000
-#define mmROM_SW_DATA_49_DEFAULT 0x00000000
-#define mmROM_SW_DATA_50_DEFAULT 0x00000000
-#define mmROM_SW_DATA_51_DEFAULT 0x00000000
-#define mmROM_SW_DATA_52_DEFAULT 0x00000000
-#define mmROM_SW_DATA_53_DEFAULT 0x00000000
-#define mmROM_SW_DATA_54_DEFAULT 0x00000000
-#define mmROM_SW_DATA_55_DEFAULT 0x00000000
-#define mmROM_SW_DATA_56_DEFAULT 0x00000000
-#define mmROM_SW_DATA_57_DEFAULT 0x00000000
-#define mmROM_SW_DATA_58_DEFAULT 0x00000000
-#define mmROM_SW_DATA_59_DEFAULT 0x00000000
-#define mmROM_SW_DATA_60_DEFAULT 0x00000000
-#define mmROM_SW_DATA_61_DEFAULT 0x00000000
-#define mmROM_SW_DATA_62_DEFAULT 0x00000000
-#define mmROM_SW_DATA_63_DEFAULT 0x00000000
-#define mmROM_SW_DATA_64_DEFAULT 0x00000000
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h
deleted file mode 100644
index 48963ca..0000000
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
- * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _uvd_7_0_DEFAULT_HEADER
-#define _uvd_7_0_DEFAULT_HEADER
-
-
-// addressBlock: uvd0_uvd_pg_dec
-#define mmUVD_POWER_STATUS_DEFAULT 0x00000000
-#define mmUVD_DPG_RBC_RB_CNTL_DEFAULT 0x01000101
-#define mmUVD_DPG_RBC_RB_BASE_LOW_DEFAULT 0x00000000
-#define mmUVD_DPG_RBC_RB_BASE_HIGH_DEFAULT 0x00000000
-#define mmUVD_DPG_RBC_RB_WPTR_CNTL_DEFAULT 0x00000000
-#define mmUVD_DPG_RBC_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_DPG_RBC_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_DPG_VCPU_CACHE_OFFSET0_DEFAULT 0x00000000
-
-
-// addressBlock: uvd0_uvdnpdec
-#define mmUVD_JPEG_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_GPCOM_VCPU_CMD_DEFAULT 0x00000000
-#define mmUVD_GPCOM_VCPU_DATA0_DEFAULT 0x00000000
-#define mmUVD_GPCOM_VCPU_DATA1_DEFAULT 0x00000000
-#define mmUVD_UDEC_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_UDEC_DB_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_UDEC_DBW_ADDR_CONFIG_DEFAULT 0x22010010
-#define mmUVD_SUVD_CGC_GATE_DEFAULT 0x00000000
-#define mmUVD_SUVD_CGC_CTRL_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_POWER_STATUS_U_DEFAULT 0x00000000
-#define mmUVD_NO_OP_DEFAULT 0x00000000
-#define mmUVD_GP_SCRATCH8_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO2_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI2_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE2_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR2_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR2_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_JRBC_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH_DEFAULT 0x00000000
-#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW_DEFAULT 0x00000000
-
-
-// addressBlock: uvd0_uvddec
-#define mmUVD_SEMA_CNTL_DEFAULT 0x00000003
-#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW_DEFAULT 0x00000000
-#define mmUVD_JRBC_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_RB_RPTR3_DEFAULT 0x00000000
-#define mmUVD_RB_WPTR3_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_LO3_DEFAULT 0x00000000
-#define mmUVD_RB_BASE_HI3_DEFAULT 0x00000000
-#define mmUVD_RB_SIZE3_DEFAULT 0x00000000
-#define mmJPEG_CGC_GATE_DEFAULT 0x00300000
-#define mmUVD_CTX_INDEX_DEFAULT 0x00000000
-#define mmUVD_CTX_DATA_DEFAULT 0x00000000
-#define mmUVD_CGC_GATE_DEFAULT 0x000fffff
-#define mmUVD_CGC_CTRL_DEFAULT 0x1fff018d
-#define mmUVD_GP_SCRATCH4_DEFAULT 0x00000000
-#define mmUVD_LMI_CTRL2_DEFAULT 0x003e0000
-#define mmUVD_MASTINT_EN_DEFAULT 0x00000000
-#define mmJPEG_CGC_CTRL_DEFAULT 0x0000018d
-#define mmUVD_LMI_CTRL_DEFAULT 0x00104340
-#define mmUVD_LMI_VM_CTRL_DEFAULT 0x00000000
-#define mmUVD_LMI_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUVD_MP_SWAP_CNTL_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUXA0_DEFAULT 0x00002040
-#define mmUVD_MPC_SET_MUXA1_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUXB0_DEFAULT 0x00002040
-#define mmUVD_MPC_SET_MUXB1_DEFAULT 0x00000000
-#define mmUVD_MPC_SET_MUX_DEFAULT 0x00000088
-#define mmUVD_MPC_SET_ALU_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET0_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE0_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET1_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE1_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_OFFSET2_DEFAULT 0x00000000
-#define mmUVD_VCPU_CACHE_SIZE2_DEFAULT 0x00000000
-#define mmUVD_VCPU_CNTL_DEFAULT 0x0ff20000
-#define mmUVD_SOFT_RESET_DEFAULT 0x00000008
-#define mmUVD_LMI_RBC_IB_VMID_DEFAULT 0x00000000
-#define mmUVD_RBC_IB_SIZE_DEFAULT 0x00000000
-#define mmUVD_LMI_RBC_RB_VMID_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_RPTR_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_WPTR_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_WPTR_CNTL_DEFAULT 0x00000000
-#define mmUVD_RBC_RB_CNTL_DEFAULT 0x01000101
-#define mmUVD_RBC_RB_RPTR_ADDR_DEFAULT 0x00000000
-#define mmUVD_STATUS_DEFAULT 0x00000000
-#define mmUVD_SEMA_TIMEOUT_STATUS_DEFAULT 0x00000000
-#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_DEFAULT 0x02000000
-#define mmUVD_CONTEXT_ID_DEFAULT 0x00000000
-#define mmUVD_CONTEXT_ID2_DEFAULT 0x00000000
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 7c92f47..3ae3da4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -381,7 +381,7 @@ struct atom_rom_hw_function_header
struct atom_master_list_of_data_tables_v2_1{
uint16_t utilitypipeline; /* Offest for the utility to get parser info,Don't change this position!*/
uint16_t multimedia_info;
- uint16_t sw_datatable2;
+ uint16_t smc_dpm_info;
uint16_t sw_datatable3;
uint16_t firmwareinfo; /* Shared by various SW components */
uint16_t sw_datatable5;
@@ -1198,6 +1198,86 @@ struct atom_smu_info_v3_1
uint8_t fw_ctf_polarity; // GPIO polarity for CTF
};
+/*
+ ***************************************************************************
+ Data Table smc_dpm_info structure
+ ***************************************************************************
+ */
+struct atom_smc_dpm_info_v4_1
+{
+ struct atom_common_table_header table_header;
+ uint8_t liquid1_i2c_address;
+ uint8_t liquid2_i2c_address;
+ uint8_t vr_i2c_address;
+ uint8_t plx_i2c_address;
+
+ uint8_t liquid_i2c_linescl;
+ uint8_t liquid_i2c_linesda;
+ uint8_t vr_i2c_linescl;
+ uint8_t vr_i2c_linesda;
+
+ uint8_t plx_i2c_linescl;
+ uint8_t plx_i2c_linesda;
+ uint8_t vrsensorpresent;
+ uint8_t liquidsensorpresent;
+
+ uint16_t maxvoltagestepgfx;
+ uint16_t maxvoltagestepsoc;
+
+ uint8_t vddgfxvrmapping;
+ uint8_t vddsocvrmapping;
+ uint8_t vddmem0vrmapping;
+ uint8_t vddmem1vrmapping;
+
+ uint8_t gfxulvphasesheddingmask;
+ uint8_t soculvphasesheddingmask;
+ uint8_t padding8_v[2];
+
+ uint16_t gfxmaxcurrent;
+ uint8_t gfxoffset;
+ uint8_t padding_telemetrygfx;
+
+ uint16_t socmaxcurrent;
+ uint8_t socoffset;
+ uint8_t padding_telemetrysoc;
+
+ uint16_t mem0maxcurrent;
+ uint8_t mem0offset;
+ uint8_t padding_telemetrymem0;
+
+ uint16_t mem1maxcurrent;
+ uint8_t mem1offset;
+ uint8_t padding_telemetrymem1;
+
+ uint8_t acdcgpio;
+ uint8_t acdcpolarity;
+ uint8_t vr0hotgpio;
+ uint8_t vr0hotpolarity;
+
+ uint8_t vr1hotgpio;
+ uint8_t vr1hotpolarity;
+ uint8_t padding1;
+ uint8_t padding2;
+
+ uint8_t ledpin0;
+ uint8_t ledpin1;
+ uint8_t ledpin2;
+ uint8_t padding8_4;
+
+ uint8_t gfxclkspreadenabled;
+ uint8_t gfxclkspreadpercent;
+ uint16_t gfxclkspreadfreq;
+
+ uint8_t uclkspreadenabled;
+ uint8_t uclkspreadpercent;
+ uint16_t uclkspreadfreq;
+
+ uint8_t socclkspreadenabled;
+ uint8_t socclkspreadpercent;
+ uint16_t socclkspreadfreq;
+
+ uint32_t boardreserved[3];
+};
/*
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 675988d..f2814ae 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -29,18 +29,6 @@
struct cgs_device;
/**
- * enum cgs_gpu_mem_type - GPU memory types
- */
-enum cgs_gpu_mem_type {
- CGS_GPU_MEM_TYPE__VISIBLE_FB,
- CGS_GPU_MEM_TYPE__INVISIBLE_FB,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
- CGS_GPU_MEM_TYPE__GART_CACHEABLE,
- CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
-};
-
-/**
* enum cgs_ind_reg - Indirect register spaces
*/
enum cgs_ind_reg {
@@ -88,32 +76,6 @@ enum cgs_ucode_id {
CGS_UCODE_ID_MAXIMUM,
};
-enum cgs_system_info_id {
- CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
- CGS_SYSTEM_INFO_PCIE_GEN_INFO,
- CGS_SYSTEM_INFO_PCIE_MLW,
- CGS_SYSTEM_INFO_PCIE_DEV,
- CGS_SYSTEM_INFO_PCIE_REV,
- CGS_SYSTEM_INFO_CG_FLAGS,
- CGS_SYSTEM_INFO_PG_FLAGS,
- CGS_SYSTEM_INFO_GFX_CU_INFO,
- CGS_SYSTEM_INFO_GFX_SE_INFO,
- CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
- CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
- CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
- CGS_SYSTEM_INFO_ID_MAXIMUM,
-};
-
-struct cgs_system_info {
- uint64_t size;
- enum cgs_system_info_id info_id;
- union {
- void *ptr;
- uint64_t value;
- };
- uint64_t padding[13];
-};
-
/*
* enum cgs_resource_type - GPU resource type
*/
@@ -144,7 +106,6 @@ struct cgs_firmware_info {
struct cgs_mode_info {
uint32_t refresh_rate;
- uint32_t ref_clock;
uint32_t vblank_time_us;
};
@@ -156,121 +117,6 @@ struct cgs_display_info {
typedef unsigned long cgs_handle_t;
-#define CGS_ACPI_METHOD_ATCS 0x53435441
-#define CGS_ACPI_METHOD_ATIF 0x46495441
-#define CGS_ACPI_METHOD_ATPX 0x58505441
-#define CGS_ACPI_FIELD_METHOD_NAME 0x00000001
-#define CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT 0x00000002
-#define CGS_ACPI_MAX_BUFFER_SIZE 256
-#define CGS_ACPI_TYPE_ANY 0x00
-#define CGS_ACPI_TYPE_INTEGER 0x01
-#define CGS_ACPI_TYPE_STRING 0x02
-#define CGS_ACPI_TYPE_BUFFER 0x03
-#define CGS_ACPI_TYPE_PACKAGE 0x04
-
-struct cgs_acpi_method_argument {
- uint32_t type;
- uint32_t data_length;
- union{
- uint32_t value;
- void *pointer;
- };
-};
-
-struct cgs_acpi_method_info {
- uint32_t size;
- uint32_t field;
- uint32_t input_count;
- uint32_t name;
- struct cgs_acpi_method_argument *pinput_argument;
- uint32_t output_count;
- struct cgs_acpi_method_argument *poutput_argument;
- uint32_t padding[9];
-};
-
-/**
- * cgs_alloc_gpu_mem() - Allocate GPU memory
- * @cgs_device: opaque device handle
- * @type: memory type
- * @size: size in bytes
- * @align: alignment in bytes
- * @handle: memory handle (output)
- *
- * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
- * memory allocation. This guarantees that the MC address returned by
- * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
- * FB memory types may be GART mapped depending on memory
- * fragmentation and memory allocator policies.
- *
- * If min/max_offset are non-0, the allocation will be forced to
- * reside between these offsets in its respective memory heap. The
- * base address that the offset relates to, depends on the memory
- * type.
- *
- * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
- * - CGS_GPU_MEM_TYPE__GART_*: GART aperture base address
- * - others: undefined, don't use with max_offset
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
- uint64_t size, uint64_t align,
- cgs_handle_t *handle);
-
-/**
- * cgs_free_gpu_mem() - Free GPU memory
- * @cgs_device: opaque device handle
- * @handle: memory handle returned by alloc or import
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
-
-/**
- * cgs_gmap_gpu_mem() - GPU-map GPU memory
- * @cgs_device: opaque device handle
- * @handle: memory handle returned by alloc or import
- * @mcaddr: MC address (output)
- *
- * Ensures that a buffer is GPU accessible and returns its MC address.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
- uint64_t *mcaddr);
-
-/**
- * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
- * @cgs_device: opaque device handle
- * @handle: memory handle returned by alloc or import
- *
- * Allows the buffer to be migrated while it's not used by the GPU.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
-
-/**
- * cgs_kmap_gpu_mem() - Kernel-map GPU memory
- *
- * @cgs_device: opaque device handle
- * @handle: memory handle returned by alloc or import
- * @map: Kernel virtual address the memory was mapped to (output)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
- void **map);
-
-/**
- * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
- * @cgs_device: opaque device handle
- * @handle: memory handle returned by alloc or import
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
-
/**
* cgs_read_register() - Read an MMIO register
* @cgs_device: opaque device handle
@@ -406,35 +252,13 @@ typedef int(*cgs_get_active_displays_info)(
typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
-typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
- uint32_t acpi_method,
- uint32_t acpi_function,
- void *pinput, void *poutput,
- uint32_t output_count,
- uint32_t input_size,
- uint32_t output_size);
-
-typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info);
-
typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
-struct amd_pp_init;
-typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
- int (*call_back_func)(struct amd_pp_init *, void **));
-
struct cgs_ops {
- /* memory management calls (similar to KFD interface) */
- cgs_alloc_gpu_mem_t alloc_gpu_mem;
- cgs_free_gpu_mem_t free_gpu_mem;
- cgs_gmap_gpu_mem_t gmap_gpu_mem;
- cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
- cgs_kmap_gpu_mem_t kmap_gpu_mem;
- cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
/* MMIO access */
cgs_read_register_t read_register;
cgs_write_register_t write_register;
@@ -456,14 +280,9 @@ struct cgs_ops {
cgs_get_active_displays_info get_active_displays_info;
/* notify dpm enabled */
cgs_notify_dpm_enabled notify_dpm_enabled;
- /* ACPI */
- cgs_call_acpi_method call_acpi_method;
- /* get system info */
- cgs_query_system_info query_system_info;
cgs_is_virtualization_enabled_t is_virtualization_enabled;
cgs_enter_safe_mode enter_safe_mode;
cgs_lock_grbm_idx lock_grbm_idx;
- cgs_register_pp_handle register_pp_handle;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -471,7 +290,6 @@ struct cgs_os_ops; /* To be define in OS-specific CGS header */
struct cgs_device
{
const struct cgs_ops *ops;
- const struct cgs_os_ops *os_ops;
/* to be embedded at the start of driver private structure */
};
@@ -482,19 +300,6 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
- CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
-#define cgs_free_gpu_mem(dev,handle) \
- CGS_CALL(free_gpu_mem,dev,handle)
-#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
- CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
-#define cgs_gunmap_gpu_mem(dev,handle) \
- CGS_CALL(gunmap_gpu_mem,dev,handle)
-#define cgs_kmap_gpu_mem(dev,handle,map) \
- CGS_CALL(kmap_gpu_mem,dev,handle,map)
-#define cgs_kunmap_gpu_mem(dev,handle) \
- CGS_CALL(kunmap_gpu_mem,dev,handle)
-
#define cgs_read_register(dev,offset) \
CGS_CALL(read_register,dev,offset)
#define cgs_write_register(dev,offset,value) \
@@ -525,10 +330,6 @@ struct cgs_device
#define cgs_get_active_displays_info(dev, info) \
CGS_CALL(get_active_displays_info, dev, info)
-#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
- CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
-#define cgs_query_system_info(dev, sys_info) \
- CGS_CALL(query_system_info, dev, sys_info)
#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
resource_base) \
CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
@@ -542,7 +343,6 @@ struct cgs_device
#define cgs_lock_grbm_idx(cgs_device, lock) \
CGS_CALL(lock_grbm_idx, cgs_device, lock)
-#define cgs_register_pp_handle(cgs_device, call_back_func) \
- CGS_CALL(register_pp_handle, cgs_device, call_back_func)
+
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
deleted file mode 100644
index bc7446c..0000000
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _CGS_LINUX_H
-#define _CGS_LINUX_H
-
-#include "cgs_common.h"
-
-/**
- * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
- * @private_data: private data provided to cgs_add_irq_source
- * @src_id: interrupt source ID
- * @type: interrupt type
- * @enabled: 0 = disable source, non-0 = enable source
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_irq_source_set_func_t)(void *private_data,
- unsigned src_id, unsigned type,
- int enabled);
-
-/**
- * cgs_irq_handler_func() - Interrupt handler callback
- * @private_data: private data provided to cgs_add_irq_source
- * @src_id: interrupt source ID
- * @iv_entry: pointer to raw ih ring entry
- *
- * This callback runs in interrupt context.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_irq_handler_func_t)(void *private_data,
- unsigned src_id, const uint32_t *iv_entry);
-
-/**
- * cgs_add_irq_source() - Add an IRQ source
- * @cgs_device: opaque device handle
- * @src_id: interrupt source ID
- * @num_types: number of interrupt types that can be independently enabled
- * @set: callback function to enable/disable an interrupt type
- * @handler: interrupt handler callback
- * @private_data: private data to pass to callback functions
- *
- * The same IRQ source can be added only once. Adding an IRQ source
- * indicates ownership of that IRQ source and all its IRQ types.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned client_id,
- unsigned src_id,
- unsigned num_types,
- cgs_irq_source_set_func_t set,
- cgs_irq_handler_func_t handler,
- void *private_data);
-
-/**
- * cgs_irq_get() - Request enabling an IRQ source and type
- * @cgs_device: opaque device handle
- * @src_id: interrupt source ID
- * @type: interrupt type
- *
- * cgs_irq_get and cgs_irq_put calls must be balanced. They count
- * "references" to IRQ sources.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
-
-/**
- * cgs_irq_put() - Indicate IRQ source is no longer needed
- * @cgs_device: opaque device handle
- * @src_id: interrupt source ID
- * @type: interrupt type
- *
- * cgs_irq_get and cgs_irq_put calls must be balanced. They count
- * "references" to IRQ sources. Even after cgs_irq_put is called, the
- * IRQ handler may still be called if there are more refecences to
- * the IRQ source.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
-
-struct cgs_os_ops {
- /* IRQ handling */
- cgs_add_irq_source_t add_irq_source;
- cgs_irq_get_t irq_get;
- cgs_irq_put_t irq_put;
-};
-
-#define cgs_add_irq_source(dev,client_id,src_id,num_types,set,handler,private_data) \
- CGS_OS_CALL(add_irq_source,dev,client_id,src_id,num_types,set,handler, \
- private_data)
-#define cgs_irq_get(dev,client_id,src_id,type) \
- CGS_OS_CALL(irq_get,dev,client_id,src_id,type)
-#define cgs_irq_put(dev,client_id,src_id,type) \
- CGS_OS_CALL(irq_put,dev,client_id,src_id,type)
-
-#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index 7343aed..7852952 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -23,7 +23,146 @@
#ifndef _DM_PP_INTERFACE_
#define _DM_PP_INTERFACE_
-#define PP_MAX_CLOCK_LEVELS 8
+#define PP_MAX_CLOCK_LEVELS 16
+
+enum amd_pp_display_config_type{
+ AMD_PP_DisplayConfigType_None = 0,
+ AMD_PP_DisplayConfigType_DP54 ,
+ AMD_PP_DisplayConfigType_DP432 ,
+ AMD_PP_DisplayConfigType_DP324 ,
+ AMD_PP_DisplayConfigType_DP27,
+ AMD_PP_DisplayConfigType_DP243,
+ AMD_PP_DisplayConfigType_DP216,
+ AMD_PP_DisplayConfigType_DP162,
+ AMD_PP_DisplayConfigType_HDMI6G ,
+ AMD_PP_DisplayConfigType_HDMI297 ,
+ AMD_PP_DisplayConfigType_HDMI162,
+ AMD_PP_DisplayConfigType_LVDS,
+ AMD_PP_DisplayConfigType_DVI,
+ AMD_PP_DisplayConfigType_WIRELESS,
+ AMD_PP_DisplayConfigType_VGA
+};
+
+struct single_display_configuration
+{
+ uint32_t controller_index;
+ uint32_t controller_id;
+ uint32_t signal_type;
+ uint32_t display_state;
+ /* phy id for the primary internal transmitter */
+ uint8_t primary_transmitter_phyi_d;
+ /* bitmap with the active lanes */
+ uint8_t primary_transmitter_active_lanemap;
+ /* phy id for the secondary internal transmitter (for dual-link dvi) */
+ uint8_t secondary_transmitter_phy_id;
+ /* bitmap with the active lanes */
+ uint8_t secondary_transmitter_active_lanemap;
+ /* misc phy settings for SMU. */
+ uint32_t config_flags;
+ uint32_t display_type;
+ uint32_t view_resolution_cx;
+ uint32_t view_resolution_cy;
+ enum amd_pp_display_config_type displayconfigtype;
+ uint32_t vertical_refresh; /* for active display */
+};
+
+#define MAX_NUM_DISPLAY 32
+
+struct amd_pp_display_configuration {
+ bool nb_pstate_switch_disable;/* controls NB PState switch */
+ bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
+ bool cpu_pstate_disable;
+ uint32_t cpu_pstate_separation_time;
+
+ uint32_t num_display; /* total number of display*/
+ uint32_t num_path_including_non_display;
+ uint32_t crossfire_display_index;
+ uint32_t min_mem_set_clock;
+ uint32_t min_core_set_clock;
+ /* unit 10KHz x bit*/
+ uint32_t min_bus_bandwidth;
+ /* minimum required stutter sclk, in 10khz uint32_t ulMinCoreSetClk;*/
+ uint32_t min_core_set_clock_in_sr;
+
+ struct single_display_configuration displays[MAX_NUM_DISPLAY];
+
+ uint32_t vrefresh; /* for active display*/
+
+ uint32_t min_vblank_time; /* for active display*/
+ bool multi_monitor_in_sync;
+ /* Controller Index of primary display - used in MCLK SMC switching hang
+ * SW Workaround*/
+ uint32_t crtc_index;
+ /* htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
+ uint32_t line_time_in_us;
+ bool invalid_vblank_time;
+
+ uint32_t display_clk;
+ /*
+ * for given display configuration if multimonitormnsync == false then
+ * Memory clock DPMS with this latency or below is allowed, DPMS with
+ * higher latency not allowed.
+ */
+ uint32_t dce_tolerable_mclk_in_active_latency;
+ uint32_t min_dcef_set_clk;
+ uint32_t min_dcef_deep_sleep_set_clk;
+};
+
+struct amd_pp_simple_clock_info {
+ uint32_t engine_max_clock;
+ uint32_t memory_max_clock;
+ uint32_t level;
+};
+
+enum PP_DAL_POWERLEVEL {
+ PP_DAL_POWERLEVEL_INVALID = 0,
+ PP_DAL_POWERLEVEL_ULTRALOW,
+ PP_DAL_POWERLEVEL_LOW,
+ PP_DAL_POWERLEVEL_NOMINAL,
+ PP_DAL_POWERLEVEL_PERFORMANCE,
+
+ PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
+ PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
+ PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
+ PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
+ PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
+ PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
+ PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
+ PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
+};
+
+struct amd_pp_clock_info {
+ uint32_t min_engine_clock;
+ uint32_t max_engine_clock;
+ uint32_t min_memory_clock;
+ uint32_t max_memory_clock;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+ uint32_t max_engine_clock_in_sr;
+ uint32_t min_engine_clock_in_sr;
+ enum PP_DAL_POWERLEVEL max_clocks_state;
+};
+
+enum amd_pp_clock_type {
+ amd_pp_disp_clock = 1,
+ amd_pp_sys_clock,
+ amd_pp_mem_clock,
+ amd_pp_dcef_clock,
+ amd_pp_soc_clock,
+ amd_pp_pixel_clock,
+ amd_pp_phy_clock,
+ amd_pp_dcf_clock,
+ amd_pp_dpp_clock,
+ amd_pp_f_clock = amd_pp_dcef_clock,
+};
+
+#define MAX_NUM_CLOCKS 16
+
+struct amd_pp_clocks {
+ uint32_t count;
+ uint32_t clock[MAX_NUM_CLOCKS];
+ uint32_t latency[MAX_NUM_CLOCKS];
+};
struct pp_clock_with_latency {
uint32_t clocks_in_khz;
@@ -45,6 +184,11 @@ struct pp_clock_levels_with_voltage {
struct pp_clock_with_voltage data[PP_MAX_CLOCK_LEVELS];
};
+struct pp_display_clock_request {
+ enum amd_pp_clock_type clock_type;
+ uint32_t clock_freq_in_khz;
+};
+
#define PP_MAX_WM_SETS 4
enum pp_wm_set_id {
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index f516fd1..237289a 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/bitmap.h>
+#include <linux/dma-fence.h>
struct pci_dev;
@@ -46,6 +47,28 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_cu_info {
+ uint32_t num_shader_engines;
+ uint32_t num_shader_arrays_per_engine;
+ uint32_t num_cu_per_sh;
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t simd_per_cu;
+ uint32_t max_waves_per_simd;
+ uint32_t wave_front_size;
+ uint32_t max_scratch_slots_per_cu;
+ uint32_t lds_size;
+ uint32_t cu_bitmap[4][4];
+};
+
+/* For getting GPU local memory information from KGD */
+struct kfd_local_mem_info {
+ uint64_t local_mem_size_private;
+ uint64_t local_mem_size_public;
+ uint32_t vram_width;
+ uint32_t mem_clk_max;
+};
+
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1,
KGD_POOL_SYSTEM_WRITECOMBINE = 2,
@@ -85,6 +108,12 @@ struct kgd2kfd_shared_resources {
/* Number of bytes at start of aperture reserved for KGD. */
size_t doorbell_start_offset;
+
+ /* GPUVM address space size in bytes */
+ uint64_t gpuvm_size;
+
+ /* Minor device number of the render node */
+ int drm_render_minor;
};
struct tile_config {
@@ -98,6 +127,27 @@ struct tile_config {
uint32_t num_ranks;
};
+
+/*
+ * Allocation flag domains
+ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
+#define ALLOC_MEM_FLAGS_GTT (1 << 1)
+#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
+#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
+
+/*
+ * Allocation flags attributes/access options.
+ * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
+ */
+#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
+#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
+#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
+#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
+#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
+#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
+
/**
* struct kfd2kgd_calls
*
@@ -106,7 +156,7 @@ struct tile_config {
*
* @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
*
- * @get_vmem_size: Retrieves (physical) size of VRAM
+ * @get_local_mem_info: Retrieves information about GPU local memory
*
* @get_gpu_clock_counter: Retrieves GPU clock counter
*
@@ -131,6 +181,12 @@ struct tile_config {
* @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
* used only for no HWS mode.
*
+ * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
+ * Array is allocated with kmalloc, needs to be freed with kfree by caller.
+ *
+ * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
+ * Array is allocated with kmalloc, needs to be freed with kfree by caller.
+ *
* @hqd_is_occupies: Checks if a hqd slot is occupied.
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
@@ -147,6 +203,49 @@ struct tile_config {
*
* @get_tile_config: Returns GPU-specific tiling mode information
*
+ * @get_cu_info: Retrieves activated cu info
+ *
+ * @get_vram_usage: Returns current VRAM usage
+ *
+ * @create_process_vm: Create a VM address space for a given process and GPU
+ *
+ * @destroy_process_vm: Destroy a VM
+ *
+ * @get_process_page_dir: Get physical address of a VM page directory
+ *
+ * @set_vm_context_page_table_base: Program page table base for a VMID
+ *
+ * @alloc_memory_of_gpu: Allocate GPUVM memory
+ *
+ * @free_memory_of_gpu: Free GPUVM memory
+ *
+ * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
+ * space. Allocates and updates page tables and page directories as
+ * needed. This function may return before all page table updates have
+ * completed. This allows multiple map operations (on multiple GPUs)
+ * to happen concurrently. Use sync_memory to synchronize with all
+ * pending updates.
+ *
+ * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
+ *
+ * @sync_memory: Wait for pending page table updates to complete
+ *
+ * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
+ * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
+ * The kernel virtual address remains valid until the BO is freed.
+ *
+ * @restore_process_bos: Restore all BOs that belong to the
+ * process. This is intended for restoring memory mappings after a TTM
+ * eviction.
+ *
+ * @invalidate_tlbs: Invalidate TLBs for a specific PASID
+ *
+ * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
+ *
+ * @submit_ib: Submits an IB to the engine specified by inserting the
+ * IB to the corresponding ring (ring type). The IB is executed with the
+ * specified VMID in a user mode context.
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -158,7 +257,8 @@ struct kfd2kgd_calls {
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
- uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
+ void (*get_local_mem_info)(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
@@ -184,7 +284,16 @@ struct kfd2kgd_calls {
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
- int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
+ int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+
+ int (*hqd_dump)(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+
+ int (*hqd_sdma_dump)(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
@@ -216,14 +325,45 @@ struct kfd2kgd_calls {
uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
struct kgd_dev *kgd,
uint8_t vmid);
- void (*write_vmid_invalidate_request)(struct kgd_dev *kgd,
- uint8_t vmid);
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
+
+ void (*get_cu_info)(struct kgd_dev *kgd,
+ struct kfd_cu_info *cu_info);
+ uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
+
+ int (*create_process_vm)(struct kgd_dev *kgd, void **vm,
+ void **process_info, struct dma_fence **ef);
+ int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
+ void **vm, void **process_info, struct dma_fence **ef);
+ void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
+ uint32_t (*get_process_page_dir)(void *vm);
+ void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
+ uint32_t vmid, uint32_t page_table_base);
+ int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
+ uint64_t size, void *vm,
+ struct kgd_mem **mem, uint64_t *offset,
+ uint32_t flags);
+ int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
+ int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
+ void *vm);
+ int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
+ void *vm);
+ int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
+ int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
+ void **kptr, uint64_t *size);
+ int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
+
+ int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
+ int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
+
+ int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len);
};
/**
@@ -242,6 +382,9 @@ struct kfd2kgd_calls {
*
* @resume: Notifies amdkfd about a resume action done to a kgd device
*
+ * @schedule_evict_and_restore_process: Schedules work queue that will prepare
+ * for safe eviction of KFD BOs that belong to the specified process.
+ *
* This structure contains function callback pointers so the kgd driver
* will notify to the amdkfd about certain status changes.
*
@@ -256,6 +399,8 @@ struct kgd2kfd_calls {
void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
void (*suspend)(struct kfd_dev *kfd);
int (*resume)(struct kfd_dev *kfd);
+ int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
+ struct dma_fence *fence);
};
int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
new file mode 100644
index 0000000..5c840c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __KGD_PP_INTERFACE_H__
+#define __KGD_PP_INTERFACE_H__
+
+extern const struct amdgpu_ip_block_version pp_smu_ip_block;
+
+struct amd_vce_state {
+ /* vce clocks */
+ u32 evclk;
+ u32 ecclk;
+ /* gpu clocks */
+ u32 sclk;
+ u32 mclk;
+ u8 clk_idx;
+ u8 pstate;
+};
+
+
+enum amd_dpm_forced_level {
+ AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
+ AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
+ AMD_DPM_FORCED_LEVEL_LOW = 0x4,
+ AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
+ AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
+ AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
+};
+
+enum amd_pm_state_type {
+ /* not used for dpm */
+ POWER_STATE_TYPE_DEFAULT,
+ POWER_STATE_TYPE_POWERSAVE,
+ /* user selectable states */
+ POWER_STATE_TYPE_BATTERY,
+ POWER_STATE_TYPE_BALANCED,
+ POWER_STATE_TYPE_PERFORMANCE,
+ /* internal states */
+ POWER_STATE_TYPE_INTERNAL_UVD,
+ POWER_STATE_TYPE_INTERNAL_UVD_SD,
+ POWER_STATE_TYPE_INTERNAL_UVD_HD,
+ POWER_STATE_TYPE_INTERNAL_UVD_HD2,
+ POWER_STATE_TYPE_INTERNAL_UVD_MVC,
+ POWER_STATE_TYPE_INTERNAL_BOOT,
+ POWER_STATE_TYPE_INTERNAL_THERMAL,
+ POWER_STATE_TYPE_INTERNAL_ACPI,
+ POWER_STATE_TYPE_INTERNAL_ULV,
+ POWER_STATE_TYPE_INTERNAL_3DPERF,
+};
+
+#define AMD_MAX_VCE_LEVELS 6
+
+enum amd_vce_level {
+ AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+enum amd_fan_ctrl_mode {
+ AMD_FAN_CTRL_NONE = 0,
+ AMD_FAN_CTRL_MANUAL = 1,
+ AMD_FAN_CTRL_AUTO = 2,
+};
+
+enum pp_clock_type {
+ PP_SCLK,
+ PP_MCLK,
+ PP_PCIE,
+ OD_SCLK,
+ OD_MCLK,
+};
+
+enum amd_pp_sensors {
+ AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_VDDNB,
+ AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_UVD_VCLK,
+ AMDGPU_PP_SENSOR_UVD_DCLK,
+ AMDGPU_PP_SENSOR_VCE_ECCLK,
+ AMDGPU_PP_SENSOR_GPU_LOAD,
+ AMDGPU_PP_SENSOR_GFX_MCLK,
+ AMDGPU_PP_SENSOR_GPU_TEMP,
+ AMDGPU_PP_SENSOR_VCE_POWER,
+ AMDGPU_PP_SENSOR_UVD_POWER,
+ AMDGPU_PP_SENSOR_GPU_POWER,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
+};
+
+enum amd_pp_task {
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ AMD_PP_TASK_COMPLETE_INIT,
+ AMD_PP_TASK_MAX
+};
+
+enum PP_SMC_POWER_PROFILE {
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
+ PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
+ PP_SMC_POWER_PROFILE_VIDEO = 0x2,
+ PP_SMC_POWER_PROFILE_VR = 0x3,
+ PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
+ PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
+};
+
+enum {
+ PP_GROUP_UNKNOWN = 0,
+ PP_GROUP_GFX = 1,
+ PP_GROUP_SYS,
+ PP_GROUP_MAX
+};
+
+enum PP_OD_DPM_TABLE_COMMAND {
+ PP_OD_EDIT_SCLK_VDDC_TABLE,
+ PP_OD_EDIT_MCLK_VDDC_TABLE,
+ PP_OD_RESTORE_DEFAULT_TABLE,
+ PP_OD_COMMIT_DPM_TABLE
+};
+
+struct pp_states_info {
+ uint32_t nums;
+ uint32_t states[16];
+};
+
+struct pp_gpu_power {
+ uint32_t vddc_power;
+ uint32_t vddci_power;
+ uint32_t max_gpu_power;
+ uint32_t average_gpu_power;
+};
+
+#define PP_GROUP_MASK 0xF0000000
+#define PP_GROUP_SHIFT 28
+
+#define PP_BLOCK_MASK 0x0FFFFF00
+#define PP_BLOCK_SHIFT 8
+
+#define PP_BLOCK_GFX_CG 0x01
+#define PP_BLOCK_GFX_MG 0x02
+#define PP_BLOCK_GFX_3D 0x04
+#define PP_BLOCK_GFX_RLC 0x08
+#define PP_BLOCK_GFX_CP 0x10
+#define PP_BLOCK_SYS_BIF 0x01
+#define PP_BLOCK_SYS_MC 0x02
+#define PP_BLOCK_SYS_ROM 0x04
+#define PP_BLOCK_SYS_DRM 0x08
+#define PP_BLOCK_SYS_HDP 0x10
+#define PP_BLOCK_SYS_SDMA 0x20
+
+#define PP_STATE_MASK 0x0000000F
+#define PP_STATE_SHIFT 0
+#define PP_STATE_SUPPORT_MASK 0x000000F0
+#define PP_STATE_SUPPORT_SHIFT 0
+
+#define PP_STATE_CG 0x01
+#define PP_STATE_LS 0x02
+#define PP_STATE_DS 0x04
+#define PP_STATE_SD 0x08
+#define PP_STATE_SUPPORT_CG 0x10
+#define PP_STATE_SUPPORT_LS 0x20
+#define PP_STATE_SUPPORT_DS 0x40
+#define PP_STATE_SUPPORT_SD 0x80
+
+#define PP_CG_MSG_ID(group, block, support, state) \
+ ((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
+ (support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT)
+
+struct seq_file;
+enum amd_pp_clock_type;
+struct amd_pp_simple_clock_info;
+struct amd_pp_display_configuration;
+struct amd_pp_clock_info;
+struct pp_display_clock_request;
+struct pp_wm_sets_with_clock_ranges_soc15;
+struct pp_clock_levels_with_voltage;
+struct pp_clock_levels_with_latency;
+struct amd_pp_clocks;
+
+struct amd_pm_funcs {
+/* export for dpm on ci and si */
+ int (*pre_set_power_state)(void *handle);
+ int (*set_power_state)(void *handle);
+ void (*post_set_power_state)(void *handle);
+ void (*display_configuration_changed)(void *handle);
+ void (*print_power_state)(void *handle, void *ps);
+ bool (*vblank_too_short)(void *handle);
+ void (*enable_bapm)(void *handle, bool enable);
+ int (*check_state_equal)(void *handle,
+ void *cps,
+ void *rps,
+ bool *equal);
+/* export for sysfs */
+ void (*set_fan_control_mode)(void *handle, u32 mode);
+ u32 (*get_fan_control_mode)(void *handle);
+ int (*set_fan_speed_percent)(void *handle, u32 speed);
+ int (*get_fan_speed_percent)(void *handle, u32 *speed);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ enum amd_dpm_forced_level (*get_performance_level)(void *handle);
+ enum amd_pm_state_type (*get_current_power_state)(void *handle);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+ void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
+ int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
+/* export to amdgpu */
+ void (*powergate_uvd)(void *handle, bool gate);
+ void (*powergate_vce)(void *handle, bool gate);
+ struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
+ int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+ int (*load_firmware)(void *handle);
+ int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
+ int (*notify_smu_memory_info)(void *handle, uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size);
+ int (*set_power_limit)(void *handle, uint32_t n);
+ int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+/* export to DC */
+ u32 (*get_sclk)(void *handle, bool low);
+ u32 (*get_mclk)(void *handle, bool low);
+ int (*display_configuration_change)(void *handle,
+ const struct amd_pp_display_configuration *input);
+ int (*get_display_power_level)(void *handle,
+ struct amd_pp_simple_clock_info *output);
+ int (*get_current_clocks)(void *handle,
+ struct amd_pp_clock_info *clocks);
+ int (*get_clock_by_type)(void *handle,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_clock_by_type_with_latency)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks);
+ int (*get_clock_by_type_with_voltage)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+ int (*set_watermarks_for_clocks_ranges)(void *handle,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*display_clock_voltage_request)(void *handle,
+ struct pp_display_clock_request *clock);
+ int (*get_display_mode_validation_clocks)(void *handle,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*get_power_profile_mode)(void *handle, char *buf);
+ int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
+ int (*set_mmhub_powergating_by_smu)(void *handle);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/soc15_hw_ip.h b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
new file mode 100644
index 0000000..f17e30c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _soc15_hw_ip_HEADER
+#define _soc15_hw_ip_HEADER
+
+// HW ID
+#define MP1_HWID 1
+#define MP2_HWID 2
+#define THM_HWID 3
+#define SMUIO_HWID 4
+#define FUSE_HWID 5
+#define CLKA_HWID 6
+#define PWR_HWID 10
+#define GC_HWID 11
+#define UVD_HWID 12
+#define VCN_HWID UVD_HWID
+#define AUDIO_AZ_HWID 13
+#define ACP_HWID 14
+#define DCI_HWID 15
+#define DMU_HWID 271
+#define DCO_HWID 16
+#define DIO_HWID 272
+#define XDMA_HWID 17
+#define DCEAZ_HWID 18
+#define DAZ_HWID 274
+#define SDPMUX_HWID 19
+#define NTB_HWID 20
+#define IOHC_HWID 24
+#define L2IMU_HWID 28
+#define VCE_HWID 32
+#define MMHUB_HWID 34
+#define ATHUB_HWID 35
+#define DBGU_NBIO_HWID 36
+#define DFX_HWID 37
+#define DBGU0_HWID 38
+#define DBGU1_HWID 39
+#define OSSSYS_HWID 40
+#define HDP_HWID 41
+#define SDMA0_HWID 42
+#define SDMA1_HWID 43
+#define ISP_HWID 44
+#define DBGU_IO_HWID 45
+#define DF_HWID 46
+#define CLKB_HWID 47
+#define FCH_HWID 48
+#define DFX_DAP_HWID 49
+#define L1IMU_PCIE_HWID 50
+#define L1IMU_NBIF_HWID 51
+#define L1IMU_IOAGR_HWID 52
+#define L1IMU3_HWID 53
+#define L1IMU4_HWID 54
+#define L1IMU5_HWID 55
+#define L1IMU6_HWID 56
+#define L1IMU7_HWID 57
+#define L1IMU8_HWID 58
+#define L1IMU9_HWID 59
+#define L1IMU10_HWID 60
+#define L1IMU11_HWID 61
+#define L1IMU12_HWID 62
+#define L1IMU13_HWID 63
+#define L1IMU14_HWID 64
+#define L1IMU15_HWID 65
+#define WAFLC_HWID 66
+#define FCH_USB_PD_HWID 67
+#define PCIE_HWID 70
+#define PCS_HWID 80
+#define DDCL_HWID 89
+#define SST_HWID 90
+#define IOAGR_HWID 100
+#define NBIF_HWID 108
+#define IOAPIC_HWID 124
+#define SYSTEMHUB_HWID 128
+#define NTBCCP_HWID 144
+#define UMC_HWID 150
+#define SATA_HWID 168
+#define USB_HWID 170
+#define CCXSEC_HWID 176
+#define XGBE_HWID 216
+#define MP0_HWID 254
+#endif
diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
new file mode 100644
index 0000000..a12d4f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SOC15_IH_CLIENTID_H__
+#define __SOC15_IH_CLIENTID_H__
+
+ /*
+ * vega10+ IH clients
+ */
+enum soc15_ih_clientid {
+ SOC15_IH_CLIENTID_IH = 0x00,
+ SOC15_IH_CLIENTID_ACP = 0x01,
+ SOC15_IH_CLIENTID_ATHUB = 0x02,
+ SOC15_IH_CLIENTID_BIF = 0x03,
+ SOC15_IH_CLIENTID_DCE = 0x04,
+ SOC15_IH_CLIENTID_ISP = 0x05,
+ SOC15_IH_CLIENTID_PCIE0 = 0x06,
+ SOC15_IH_CLIENTID_RLC = 0x07,
+ SOC15_IH_CLIENTID_SDMA0 = 0x08,
+ SOC15_IH_CLIENTID_SDMA1 = 0x09,
+ SOC15_IH_CLIENTID_SE0SH = 0x0a,
+ SOC15_IH_CLIENTID_SE1SH = 0x0b,
+ SOC15_IH_CLIENTID_SE2SH = 0x0c,
+ SOC15_IH_CLIENTID_SE3SH = 0x0d,
+ SOC15_IH_CLIENTID_SYSHUB = 0x0e,
+ SOC15_IH_CLIENTID_THM = 0x0f,
+ SOC15_IH_CLIENTID_UVD = 0x10,
+ SOC15_IH_CLIENTID_VCE0 = 0x11,
+ SOC15_IH_CLIENTID_VMC = 0x12,
+ SOC15_IH_CLIENTID_XDMA = 0x13,
+ SOC15_IH_CLIENTID_GRBM_CP = 0x14,
+ SOC15_IH_CLIENTID_ATS = 0x15,
+ SOC15_IH_CLIENTID_ROM_SMUIO = 0x16,
+ SOC15_IH_CLIENTID_DF = 0x17,
+ SOC15_IH_CLIENTID_VCE1 = 0x18,
+ SOC15_IH_CLIENTID_PWR = 0x19,
+ SOC15_IH_CLIENTID_UTCL2 = 0x1b,
+ SOC15_IH_CLIENTID_EA = 0x1c,
+ SOC15_IH_CLIENTID_UTCL2LOG = 0x1d,
+ SOC15_IH_CLIENTID_MP0 = 0x1e,
+ SOC15_IH_CLIENTID_MP1 = 0x1f,
+
+ SOC15_IH_CLIENTID_MAX,
+
+ SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD
+};
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65..c14ba65 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/soc15ip.h b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
index 1767db6..976dd2d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/soc15ip.h
+++ b/drivers/gpu/drm/amd/include/vega10_ip_offset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,273 +18,197 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _soc15ip_new_HEADER
-#define _soc15ip_new_HEADER
-
-// HW ID
-#define MP1_HWID 1
-#define MP2_HWID 2
-#define THM_HWID 3
-#define SMUIO_HWID 4
-#define FUSE_HWID 5
-#define CLKA_HWID 6
-#define PWR_HWID 10
-#define GC_HWID 11
-#define UVD_HWID 12
-#define VCN_HWID UVD_HWID
-#define AUDIO_AZ_HWID 13
-#define ACP_HWID 14
-#define DCI_HWID 15
-#define DMU_HWID 271
-#define DCO_HWID 16
-#define DIO_HWID 272
-#define XDMA_HWID 17
-#define DCEAZ_HWID 18
-#define DAZ_HWID 274
-#define SDPMUX_HWID 19
-#define NTB_HWID 20
-#define IOHC_HWID 24
-#define L2IMU_HWID 28
-#define VCE_HWID 32
-#define MMHUB_HWID 34
-#define ATHUB_HWID 35
-#define DBGU_NBIO_HWID 36
-#define DFX_HWID 37
-#define DBGU0_HWID 38
-#define DBGU1_HWID 39
-#define OSSSYS_HWID 40
-#define HDP_HWID 41
-#define SDMA0_HWID 42
-#define SDMA1_HWID 43
-#define ISP_HWID 44
-#define DBGU_IO_HWID 45
-#define DF_HWID 46
-#define CLKB_HWID 47
-#define FCH_HWID 48
-#define DFX_DAP_HWID 49
-#define L1IMU_PCIE_HWID 50
-#define L1IMU_NBIF_HWID 51
-#define L1IMU_IOAGR_HWID 52
-#define L1IMU3_HWID 53
-#define L1IMU4_HWID 54
-#define L1IMU5_HWID 55
-#define L1IMU6_HWID 56
-#define L1IMU7_HWID 57
-#define L1IMU8_HWID 58
-#define L1IMU9_HWID 59
-#define L1IMU10_HWID 60
-#define L1IMU11_HWID 61
-#define L1IMU12_HWID 62
-#define L1IMU13_HWID 63
-#define L1IMU14_HWID 64
-#define L1IMU15_HWID 65
-#define WAFLC_HWID 66
-#define FCH_USB_PD_HWID 67
-#define PCIE_HWID 70
-#define PCS_HWID 80
-#define DDCL_HWID 89
-#define SST_HWID 90
-#define IOAGR_HWID 100
-#define NBIF_HWID 108
-#define IOAPIC_HWID 124
-#define SYSTEMHUB_HWID 128
-#define NTBCCP_HWID 144
-#define UMC_HWID 150
-#define SATA_HWID 168
-#define USB_HWID 170
-#define CCXSEC_HWID 176
-#define XGBE_HWID 216
-#define MP0_HWID 254
+#ifndef _vega10_ip_offset_HEADER
+#define _vega10_ip_offset_HEADER
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
-
-struct IP_BASE_INSTANCE
+struct IP_BASE_INSTANCE
{
unsigned int segment[MAX_SEGMENT];
};
-
-struct IP_BASE
+
+struct IP_BASE
{
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
-static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIF_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP2_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
-static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCN_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; //note: GLN does not use the first segment
-static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_BASE = { { { { 0x00000180, 0x000001A0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_NBIO_BASE = { { { { 0x000001C0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DBGU_IO_BASE = { { { { 0x000001E0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DFX_DAP_BASE = { { { { 0x000005A0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE DFX_BASE = { { { { 0x00000580, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // this file does not contain registers
-static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ISP_BASE = { { { { 0x00018000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SYSTEMHUB_BASE = { { { { 0x00000EA0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } }; // not exist
-static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE L2IMU_BASE = { { { { 0x00007DC0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE IOHC_BASE = { { { { 0x00010000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCE_BASE = { { { { 0x00007E00, 0x00048800, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE RSMU_BASE = { { { { 0x00012000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA1_BASE = { { { { 0x00001460, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE XDMA_BASE = { { { { 0x00003400, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+static const struct IP_BASE PWR_BASE = { { { { 0x00016A00, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0, 0, 0, 0 } },
- { { 0x00016E00, 0, 0, 0, 0 } },
- { { 0x00017000, 0, 0, 0, 0 } },
- { { 0x00017200, 0, 0, 0, 0 } },
- { { 0x00017E00, 0, 0, 0, 0 } } } };
-static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0, 0, 0, 0 } },
+ { { 0x00017000, 0, 0, 0, 0 } },
+ { { 0x00017200, 0, 0, 0, 0 } },
+ { { 0x00017E00, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
@@ -1337,7 +1261,5 @@ static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0 } },
#define FUSE_BASE__INST4_SEG2 0
#define FUSE_BASE__INST4_SEG3 0
#define FUSE_BASE__INST4_SEG4 0
-
-
#endif
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 2023482..717fbae 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -153,6 +153,8 @@ struct vi_sdma_mqd {
uint32_t reserved_125;
uint32_t reserved_126;
uint32_t reserved_127;
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
};
struct vi_mqd {
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c7e3412..7e8ad30 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -27,79 +27,76 @@
#include <linux/slab.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
-#include "pp_instance.h"
#include "power_state.h"
+#include "amdgpu.h"
+#include "hwmgr.h"
#define PP_DPM_DISABLED 0xCCCC
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
- void *input, void *output);
+ enum amd_pm_state_type *user_state);
-static inline int pp_check(struct pp_instance *handle)
-{
- if (handle == NULL)
- return -EINVAL;
+static const struct amd_pm_funcs pp_dpm_funcs;
- if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
+static inline int pp_check(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
return -EINVAL;
- if (handle->pm_en == 0)
- return PP_DPM_DISABLED;
-
- if (handle->hwmgr->hwmgr_func == NULL)
+ if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
return PP_DPM_DISABLED;
return 0;
}
-static int amd_powerplay_create(struct amd_pp_init *pp_init,
- void **handle)
+static int amd_powerplay_create(struct amdgpu_device *adev)
{
- struct pp_instance *instance;
+ struct pp_hwmgr *hwmgr;
- if (pp_init == NULL || handle == NULL)
+ if (adev == NULL)
return -EINVAL;
- instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
- if (instance == NULL)
+ hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
+ if (hwmgr == NULL)
return -ENOMEM;
- instance->chip_family = pp_init->chip_family;
- instance->chip_id = pp_init->chip_id;
- instance->pm_en = pp_init->pm_en;
- instance->feature_mask = pp_init->feature_mask;
- instance->device = pp_init->device;
- mutex_init(&instance->pp_lock);
- *handle = instance;
+ hwmgr->adev = adev;
+ hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ hwmgr->device = amdgpu_cgs_create_device(adev);
+ mutex_init(&hwmgr->smu_lock);
+ hwmgr->chip_family = adev->family;
+ hwmgr->chip_id = adev->asic_type;
+ hwmgr->feature_mask = amdgpu_pp_feature_mask;
+ adev->powerplay.pp_handle = hwmgr;
+ adev->powerplay.pp_funcs = &pp_dpm_funcs;
return 0;
}
-static int amd_powerplay_destroy(void *handle)
+
+static int amd_powerplay_destroy(struct amdgpu_device *adev)
{
- struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- kfree(instance->hwmgr->hardcode_pp_table);
- instance->hwmgr->hardcode_pp_table = NULL;
+ kfree(hwmgr->hardcode_pp_table);
+ hwmgr->hardcode_pp_table = NULL;
- kfree(instance->hwmgr);
- instance->hwmgr = NULL;
+ kfree(hwmgr);
+ hwmgr = NULL;
- kfree(instance);
- instance = NULL;
return 0;
}
static int pp_early_init(void *handle)
{
int ret;
- struct pp_instance *pp_handle = NULL;
+ struct amdgpu_device *adev = handle;
- pp_handle = cgs_register_pp_handle(handle, amd_powerplay_create);
+ ret = amd_powerplay_create(adev);
- if (!pp_handle)
- return -EINVAL;
+ if (ret != 0)
+ return ret;
- ret = hwmgr_early_init(pp_handle);
+ ret = hwmgr_early_init(adev->powerplay.pp_handle);
if (ret)
return -EINVAL;
@@ -108,71 +105,73 @@ static int pp_early_init(void *handle)
static int pp_sw_init(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->smumgr_funcs->smu_init == NULL)
return -EINVAL;
ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
- pr_info("amdgpu: powerplay sw initialized\n");
+ phm_register_irq_handlers(hwmgr);
+
+ pr_debug("amdgpu: powerplay sw initialized\n");
}
+
return ret;
}
static int pp_sw_fini(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
+ if (hwmgr->smumgr_funcs->smu_fini != NULL)
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ }
- if (hwmgr->smumgr_funcs->smu_fini == NULL)
- return -EINVAL;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_fini_bo(adev);
- ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
- }
- return ret;
+ return 0;
}
static int pp_hw_init(void *handle)
{
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- ret = pp_check(pp_handle);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_init_bo(adev);
- if (ret >= 0) {
- hwmgr = pp_handle->hwmgr;
+ ret = pp_check(hwmgr);
+ if (ret >= 0) {
if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
- return -EINVAL;;
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+ return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
goto exit;
- ret = hwmgr_hw_init(pp_handle);
+ ret = hwmgr_hw_init(hwmgr);
if (ret)
goto exit;
}
return ret;
exit:
- pp_handle->pm_en = 0;
+ hwmgr->pm_en = 0;
cgs_notify_dpm_enabled(hwmgr->device, false);
return 0;
@@ -180,32 +179,37 @@ exit:
static int pp_hw_fini(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret == 0)
- hwmgr_hw_fini(pp_handle);
+ hwmgr_hw_fini(hwmgr);
return 0;
}
static int pp_late_init(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
+
if (ret == 0)
- pp_dpm_dispatch_tasks(pp_handle,
- AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
+ pp_dpm_dispatch_tasks(hwmgr,
+ AMD_PP_TASK_COMPLETE_INIT, NULL);
return 0;
}
static void pp_late_fini(void *handle)
{
- amd_powerplay_destroy(handle);
+ struct amdgpu_device *adev = handle;
+
+ amd_powerplay_destroy(adev);
}
@@ -227,17 +231,15 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
@@ -250,44 +252,49 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret == 0)
- hwmgr_hw_suspend(pp_handle);
+ hwmgr_hw_suspend(hwmgr);
return 0;
}
static int pp_resume(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct amdgpu_device *adev = handle;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret < 0)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
return 0;
- return hwmgr_hw_resume(pp_handle);
+ return hwmgr_hw_resume(hwmgr);
+}
+
+static int pp_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
}
-const struct amd_ip_funcs pp_ip_funcs = {
+static const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
.late_init = pp_late_init,
@@ -301,10 +308,19 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
- .set_clockgating_state = NULL,
+ .set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
};
+const struct amdgpu_ip_block_version pp_smu_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &pp_ip_funcs,
+};
+
static int pp_dpm_load_fw(void *handle)
{
return 0;
@@ -317,17 +333,14 @@ static int pp_dpm_fw_loading_complete(void *handle)
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
@@ -375,186 +388,158 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (level == hwmgr->dpm_level)
return 0;
- if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
- hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
- ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (!ret)
- hwmgr->dpm_level = hwmgr->request_dpm_level;
+ hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ mutex_unlock(&hwmgr->smu_lock);
- mutex_unlock(&pp_handle->pp_lock);
return 0;
}
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
enum amd_dpm_forced_level level;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
level = hwmgr->dpm_level;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t clk = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_sclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return clk;
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t clk = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_mclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return clk;
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
- void *input, void *output)
+ enum amd_pm_state_type *user_state)
{
int ret = 0;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr_handle_task(pp_handle, task_id, input, output);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr_handle_task(hwmgr, task_id, user_state);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
- struct pp_hwmgr *hwmgr;
+ struct pp_hwmgr *hwmgr = handle;
struct pp_power_state *state;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
enum amd_pm_state_type pm_type;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->current_ps == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
state = hwmgr->current_ps;
@@ -575,168 +560,129 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
}
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
uint32_t mode = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return mode;
}
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&pp_handle->pp_lock);
- return ret;
-}
-
-static int pp_dpm_get_temperature(void *handle)
-{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- int ret = 0;
-
- ret = pp_check(pp_handle);
-
- if (ret)
- return ret;
-
- hwmgr = pp_handle->hwmgr;
-
- if (hwmgr->hwmgr_func->get_temperature == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_temperature(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_pp_num_states(void *handle,
struct pp_states_info *data)
{
- struct pp_hwmgr *hwmgr;
+ struct pp_hwmgr *hwmgr = handle;
int i;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ memset(data, 0, sizeof(*data));
+
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->ps == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
data->nums = hwmgr->num_ps;
@@ -760,53 +706,68 @@ static int pp_dpm_get_pp_num_states(void *handle,
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
int size = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (!hwmgr->soft_pp_table)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
size = hwmgr->soft_pp_table_size;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return size;
}
+static int amd_powerplay_reset(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret;
+
+ ret = pp_check(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = hwmgr_hw_fini(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = hwmgr_hw_init(hwmgr);
+ if (ret)
+ return ret;
+
+ return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
+}
+
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table) {
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return -ENOMEM;
}
}
@@ -814,7 +775,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
memcpy(hwmgr->hardcode_pp_table, buf, size);
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
ret = amd_powerplay_reset(handle);
if (ret)
@@ -832,436 +793,396 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
- hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_sclk_od(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_get_mclk_od(void *handle)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
-
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
+ if (value == NULL)
+ return -EINVAL;
- if (hwmgr->hwmgr_func->read_sensor == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+ *((uint32_t *)value) = hwmgr->pstate_sclk;
return 0;
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+ *((uint32_t *)value) = hwmgr->pstate_mclk;
+ return 0;
+ default:
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
}
-
- mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
- mutex_unlock(&pp_handle->pp_lock);
-
- return ret;
}
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return NULL;
- hwmgr = pp_handle->hwmgr;
-
if (hwmgr && idx < hwmgr->num_vce_state_tables)
return &hwmgr->vce_states[idx];
return NULL;
}
-static int pp_dpm_reset_power_profile_state(void *handle,
- struct amd_pp_profile *request)
+static int pp_get_power_profile_mode(void *handle, char *buf)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
- if (!request || pp_check(pp_handle))
+ if (!buf || pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
- if (hwmgr->hwmgr_func->set_power_profile_state == NULL) {
+ if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return snprintf(buf, PAGE_SIZE, "\n");
}
- if (request->type == AMD_PP_GFX_PROFILE) {
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- return hwmgr->hwmgr_func->set_power_profile_state(hwmgr,
- &hwmgr->gfx_power_profile);
- } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
- hwmgr->compute_power_profile =
- hwmgr->default_compute_power_profile;
- return hwmgr->hwmgr_func->set_power_profile_state(hwmgr,
- &hwmgr->compute_power_profile);
- } else
- return -EINVAL;
+ return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
-static int pp_dpm_get_power_profile_state(void *handle,
- struct amd_pp_profile *query)
+static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = -EINVAL;
- if (!query || pp_check(pp_handle))
+ if (pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
+ if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+ mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+}
- if (query->type == AMD_PP_GFX_PROFILE)
- memcpy(query, &hwmgr->gfx_power_profile,
- sizeof(struct amd_pp_profile));
- else if (query->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(query, &hwmgr->compute_power_profile,
- sizeof(struct amd_pp_profile));
- else
+static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (pp_check(hwmgr))
return -EINVAL;
- return 0;
+ if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}
-static int pp_dpm_set_power_profile_state(void *handle,
- struct amd_pp_profile *request)
+static int pp_dpm_switch_power_profile(void *handle,
+ enum PP_SMC_POWER_PROFILE type, bool en)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- int ret = -1;
+ struct pp_hwmgr *hwmgr = handle;
+ long workload;
+ uint32_t index;
- if (!request || pp_check(pp_handle))
+ if (pp_check(hwmgr))
return -EINVAL;
- hwmgr = pp_handle->hwmgr;
-
- if (hwmgr->hwmgr_func->set_power_profile_state == NULL) {
+ if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return -EINVAL;
}
- if (request->min_sclk ||
- request->min_mclk ||
- request->activity_threshold ||
- request->up_hyst ||
- request->down_hyst) {
- if (request->type == AMD_PP_GFX_PROFILE)
- memcpy(&hwmgr->gfx_power_profile, request,
- sizeof(struct amd_pp_profile));
- else if (request->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(&hwmgr->compute_power_profile, request,
- sizeof(struct amd_pp_profile));
- else
- return -EINVAL;
+ if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
- if (request->type == hwmgr->current_power_profile)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- request);
+ if (!en) {
+ hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+ index = fls(hwmgr->workload_mask);
+ index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+ workload = hwmgr->workload_setting[index];
} else {
- /* set power profile if it exists */
- switch (request->type) {
- case AMD_PP_GFX_PROFILE:
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->gfx_power_profile);
- break;
- case AMD_PP_COMPUTE_PROFILE:
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->compute_power_profile);
- break;
- default:
- return -EINVAL;
- }
+ hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
+ index = fls(hwmgr->workload_mask);
+ index = index <= Workload_Policy_Max ? index - 1 : 0;
+ workload = hwmgr->workload_setting[index];
}
- if (!ret)
- hwmgr->current_power_profile = request->type;
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
-static int pp_dpm_switch_power_profile(void *handle,
- enum amd_pp_profile_type type)
+static int pp_dpm_notify_smu_memory_info(void *handle,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
{
- struct pp_hwmgr *hwmgr;
- struct amd_pp_profile request = {0};
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
- if (pp_check(pp_handle))
- return -EINVAL;
+ ret = pp_check(hwmgr);
- hwmgr = pp_handle->hwmgr;
+ if (ret)
+ return ret;
- if (hwmgr->current_power_profile != type) {
- request.type = type;
- pp_dpm_set_power_profile_state(handle, &request);
+ if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
}
- return 0;
-}
+ mutex_lock(&hwmgr->smu_lock);
-const struct amd_pm_funcs pp_dpm_funcs = {
- .get_temperature = pp_dpm_get_temperature,
- .load_firmware = pp_dpm_load_fw,
- .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
- .force_performance_level = pp_dpm_force_performance_level,
- .get_performance_level = pp_dpm_get_performance_level,
- .get_current_power_state = pp_dpm_get_current_power_state,
- .get_sclk = pp_dpm_get_sclk,
- .get_mclk = pp_dpm_get_mclk,
- .powergate_vce = pp_dpm_powergate_vce,
- .powergate_uvd = pp_dpm_powergate_uvd,
- .dispatch_tasks = pp_dpm_dispatch_tasks,
- .set_fan_control_mode = pp_dpm_set_fan_control_mode,
- .get_fan_control_mode = pp_dpm_get_fan_control_mode,
- .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
- .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
- .get_pp_num_states = pp_dpm_get_pp_num_states,
- .get_pp_table = pp_dpm_get_pp_table,
- .set_pp_table = pp_dpm_set_pp_table,
- .force_clock_level = pp_dpm_force_clock_level,
- .print_clock_levels = pp_dpm_print_clock_levels,
- .get_sclk_od = pp_dpm_get_sclk_od,
- .set_sclk_od = pp_dpm_set_sclk_od,
- .get_mclk_od = pp_dpm_get_mclk_od,
- .set_mclk_od = pp_dpm_set_mclk_od,
- .read_sensor = pp_dpm_read_sensor,
- .get_vce_clock_state = pp_dpm_get_vce_clock_state,
- .reset_power_profile_state = pp_dpm_reset_power_profile_state,
- .get_power_profile_state = pp_dpm_get_power_profile_state,
- .set_power_profile_state = pp_dpm_set_power_profile_state,
- .switch_power_profile = pp_dpm_switch_power_profile,
- .set_clockgating_by_smu = pp_set_clockgating_by_smu,
-};
+ ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
+ virtual_addr_hi, mc_addr_low, mc_addr_hi,
+ size);
+
+ mutex_unlock(&hwmgr->smu_lock);
-int amd_powerplay_reset(void *handle)
+ return ret;
+}
+
+static int pp_set_power_limit(void *handle, uint32_t limit)
{
- struct pp_instance *instance = (struct pp_instance *)handle;
- int ret;
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
- ret = pp_check(instance);
- if (ret)
- return ret;
+ ret = pp_check(hwmgr);
- ret = pp_hw_fini(instance);
if (ret)
return ret;
- ret = hwmgr_hw_init(instance);
+ if (hwmgr->hwmgr_func->set_power_limit == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (limit == 0)
+ limit = hwmgr->default_power_limit;
+
+ if (limit > hwmgr->default_power_limit)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
+ hwmgr->power_limit = limit;
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
+}
+
+static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ ret = pp_check(hwmgr);
+
if (ret)
return ret;
- return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
-}
+ if (limit == NULL)
+ return -EINVAL;
-/* export this function to DAL */
+ mutex_lock(&hwmgr->smu_lock);
+
+ if (default_limit)
+ *limit = hwmgr->default_power_limit;
+ else
+ *limit = hwmgr->power_limit;
-int amd_powerplay_display_configuration_change(void *handle,
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
+static int pp_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
-int amd_powerplay_get_display_power_level(void *handle,
+static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (output == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_dal_power_level(hwmgr, output);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_get_current_clocks(void *handle,
+static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks;
struct pp_clock_info hw_clocks;
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
phm_get_dal_power_level(hwmgr, &simple_clocks);
@@ -1275,7 +1196,7 @@ int amd_powerplay_get_current_clocks(void *handle,
if (ret) {
pr_info("Error in phm_get_clock_info \n");
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1295,149 +1216,197 @@ int amd_powerplay_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
}
-int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_get_clock_by_type_with_latency(void *handle,
+static int pp_get_clock_by_type_with_latency(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clocks)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_get_clock_by_type_with_voltage(void *handle,
+static int pp_get_clock_by_type_with_voltage(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clocks)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle,
+static int pp_set_watermarks_for_clocks_ranges(void *handle,
struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!wm_with_clock_ranges)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
wm_with_clock_ranges);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_display_clock_voltage_request(void *handle,
+static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
if (!clock)
return -EINVAL;
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
ret = phm_display_clock_voltage_request(hwmgr, clock);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
-int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+static int pp_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *clocks)
{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(pp_handle);
+ ret = pp_check(hwmgr);
if (ret)
return ret;
- hwmgr = pp_handle->hwmgr;
-
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&pp_handle->pp_lock);
+ mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
- mutex_unlock(&pp_handle->pp_lock);
+ mutex_unlock(&hwmgr->smu_lock);
return ret;
}
+static int pp_set_mmhub_powergating_by_smu(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ ret = pp_check(hwmgr);
+
+ if (ret)
+ return ret;
+
+ if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+}
+
+static const struct amd_pm_funcs pp_dpm_funcs = {
+ .load_firmware = pp_dpm_load_fw,
+ .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
+ .force_performance_level = pp_dpm_force_performance_level,
+ .get_performance_level = pp_dpm_get_performance_level,
+ .get_current_power_state = pp_dpm_get_current_power_state,
+ .powergate_vce = pp_dpm_powergate_vce,
+ .powergate_uvd = pp_dpm_powergate_uvd,
+ .dispatch_tasks = pp_dpm_dispatch_tasks,
+ .set_fan_control_mode = pp_dpm_set_fan_control_mode,
+ .get_fan_control_mode = pp_dpm_get_fan_control_mode,
+ .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
+ .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
+ .get_pp_num_states = pp_dpm_get_pp_num_states,
+ .get_pp_table = pp_dpm_get_pp_table,
+ .set_pp_table = pp_dpm_set_pp_table,
+ .force_clock_level = pp_dpm_force_clock_level,
+ .print_clock_levels = pp_dpm_print_clock_levels,
+ .get_sclk_od = pp_dpm_get_sclk_od,
+ .set_sclk_od = pp_dpm_set_sclk_od,
+ .get_mclk_od = pp_dpm_get_mclk_od,
+ .set_mclk_od = pp_dpm_set_mclk_od,
+ .read_sensor = pp_dpm_read_sensor,
+ .get_vce_clock_state = pp_dpm_get_vce_clock_state,
+ .switch_power_profile = pp_dpm_switch_power_profile,
+ .set_clockgating_by_smu = pp_set_clockgating_by_smu,
+ .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
+ .get_power_profile_mode = pp_get_power_profile_mode,
+ .set_power_profile_mode = pp_set_power_profile_mode,
+ .odn_edit_dpm_table = pp_odn_edit_dpm_table,
+ .set_power_limit = pp_set_power_limit,
+ .get_power_limit = pp_get_power_limit,
+/* export to DC */
+ .get_sclk = pp_dpm_get_sclk,
+ .get_mclk = pp_dpm_get_mclk,
+ .display_configuration_change = pp_display_configuration_change,
+ .get_display_power_level = pp_get_display_power_level,
+ .get_current_clocks = pp_get_current_clocks,
+ .get_clock_by_type = pp_get_clock_by_type,
+ .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
+ .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = pp_display_clock_voltage_request,
+ .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
+ .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index a212c27..faf9c88 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -24,14 +24,16 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o \
- hardwaremanager.o pp_acpi.o cz_hwmgr.o \
- cz_clockpowergating.o pppcielanes.o\
+ hardwaremanager.o smu8_hwmgr.o \
+ pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
smu7_clockpowergating.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
- vega10_thermal.o rv_hwmgr.o pp_psm.o\
- pp_overdriver.o
+ vega10_thermal.o smu10_hwmgr.o pp_psm.o\
+ vega12_processpptables.o vega12_hwmgr.o \
+ vega12_powertune.o vega12_thermal.o \
+ pp_overdriver.o smu_helper.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
deleted file mode 100644
index 44de087..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "cz_clockpowergating.h"
-#include "cz_ppsmc.h"
-
-/* PhyID -> Status Mapping in DDI_PHY_GEN_STATUS
- 0 GFX0L (3:0), (27:24),
- 1 GFX0H (7:4), (31:28),
- 2 GFX1L (3:0), (19:16),
- 3 GFX1H (7:4), (23:20),
- 4 DDIL (3:0), (11: 8),
- 5 DDIH (7:4), (15:12),
- 6 DDI2L (3:0), ( 3: 0),
- 7 DDI2H (7:4), ( 7: 4),
-*/
-#define DDI_PHY_GEN_STATUS_VAL(phyID) (1 << ((3 - ((phyID & 0x07)/2))*8 + (phyID & 0x01)*4))
-#define IS_PHY_ID_USED_BY_PLL(PhyID) (((0xF3 & (1 << PhyID)) & 0xFF) ? true : false)
-
-
-int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
- int ret = 0;
-
- switch (block) {
- case PHM_AsicBlock_UVD_MVC:
- case PHM_AsicBlock_UVD:
- case PHM_AsicBlock_UVD_HD:
- case PHM_AsicBlock_UVD_SD:
- if (gating == PHM_ClockGateSetting_StaticOff)
- ret = cz_dpm_powerdown_uvd(hwmgr);
- else
- ret = cz_dpm_powerup_uvd(hwmgr);
- break;
- case PHM_AsicBlock_GFX:
- default:
- break;
- }
-
- return ret;
-}
-
-
-bool cz_phm_is_safe_for_asic_block(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, enum PHM_AsicBlock block)
-{
- return true;
-}
-
-
-int cz_phm_enable_disable_gfx_power_gating(struct pp_hwmgr *hwmgr, bool enable)
-{
- return 0;
-}
-
-int cz_phm_smu_power_up_down_pcie(struct pp_hwmgr *hwmgr, uint32_t target, bool up, uint32_t args)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_initialize_display_phy_access(struct pp_hwmgr *hwmgr, bool initialize, bool accesshw)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_get_display_phy_access_info(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_gate_unused_display_phys(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
-{
- /* TODO */
- return 0;
-}
-
-int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- uint32_t dpm_features = 0;
-
- if (enable &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM)) {
- cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
- dpm_features |= UVD_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
- } else {
- dpm_features |= UVD_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
- }
- return 0;
-}
-
-int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- uint32_t dpm_features = 0;
-
- if (enable && phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEDPM)) {
- cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
- dpm_features |= VCE_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
- } else {
- dpm_features |= VCE_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
- }
-
- return 0;
-}
-
-
-void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- cz_hwmgr->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- cz_dpm_update_uvd_dpm(hwmgr, true);
- cz_dpm_powerdown_uvd(hwmgr);
- } else {
- cz_dpm_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- cz_dpm_update_uvd_dpm(hwmgr, false);
- }
-
-}
-
-void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- if (bgate) {
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- cz_enable_disable_vce_dpm(hwmgr, false);
- cz_dpm_powerdown_vce(hwmgr);
- cz_hwmgr->vce_power_gated = true;
- } else {
- cz_dpm_powerup_vce(hwmgr);
- cz_hwmgr->vce_power_gated = false;
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- cz_dpm_update_vce_dpm(hwmgr);
- cz_enable_disable_vce_dpm(hwmgr, true);
- }
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
deleted file mode 100644
index 92f707b..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _CZ_CLOCK_POWER_GATING_H_
-#define _CZ_CLOCK_POWER_GATING_H_
-
-#include "cz_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 623cff9..ae2e933 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -79,6 +79,11 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
bool enabled;
PHM_FUNC_CHECK(hwmgr);
+ if (smum_is_dpm_running(hwmgr)) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
+
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
@@ -96,6 +101,11 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
+ if (!smum_is_dpm_running(hwmgr)) {
+ pr_info("dpm has been disabled\n");
+ return 0;
+ }
+
if (hwmgr->hwmgr_func->dynamic_state_management_disable)
ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
@@ -112,22 +122,8 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->force_dpm_level != NULL) {
+ if (hwmgr->hwmgr_func->force_dpm_level != NULL)
ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (ret)
- return ret;
-
- if (hwmgr->hwmgr_func->set_power_profile_state) {
- if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->gfx_power_profile);
- else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->compute_power_profile);
- }
- }
return ret;
}
@@ -206,12 +202,12 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
}
-int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
+int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL)
- return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
+ if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
+ return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
return 0;
}
@@ -220,26 +216,27 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
* Initializes the thermal controller subsystem.
*
* @param pHwMgr the address of the powerplay hardware manager.
-* @param pTemperatureRange the address of the structure holding the temperature range.
* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
*/
-int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range)
+int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
{
- struct PP_TemperatureRange range;
-
- if (temperature_range == NULL) {
- range.max = TEMP_RANGE_MAX;
- range.min = TEMP_RANGE_MIN;
- } else {
- range.max = temperature_range->max;
- range.min = temperature_range->min;
- }
+ int ret = 0;
+ struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ if (hwmgr->hwmgr_func->get_thermal_temperature_range)
+ hwmgr->hwmgr_func->get_thermal_temperature_range(
+ hwmgr, &range);
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController)
&& hwmgr->hwmgr_func->start_thermal_controller != NULL)
- return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
+ ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
- return 0;
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index ce59e0e..4298205 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,23 +30,26 @@
#include <drm/amdgpu_drm.h>
#include "power_state.h"
#include "hwmgr.h"
-#include "pppcielanes.h"
-#include "ppatomctrl.h"
#include "ppsmc.h"
-#include "pp_acpi.h"
#include "amd_acpi.h"
#include "pp_psm.h"
extern const struct pp_smumgr_func ci_smu_funcs;
-extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func smu8_smu_funcs;
extern const struct pp_smumgr_func iceland_smu_funcs;
extern const struct pp_smumgr_func tonga_smu_funcs;
extern const struct pp_smumgr_func fiji_smu_funcs;
extern const struct pp_smumgr_func polaris10_smu_funcs;
extern const struct pp_smumgr_func vega10_smu_funcs;
-extern const struct pp_smumgr_func rv_smu_funcs;
+extern const struct pp_smumgr_func vega12_smu_funcs;
+extern const struct pp_smumgr_func smu10_smu_funcs;
+
+extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
-extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
@@ -55,104 +58,37 @@ static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
-uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
- struct cgs_system_info *sys_info)
-{
- sys_info->size = sizeof(struct cgs_system_info);
- sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
-
- return cgs_query_system_info(hwmgr->device, sys_info);
-}
-
-static int phm_thermal_l2h_irq(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
-{
- struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
- struct cgs_system_info sys_info = {0};
- int result;
-
- result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
- if (result)
- return -EINVAL;
-
- pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n",
- PCI_BUS_NUM(sys_info.value),
- PCI_SLOT(sys_info.value),
- PCI_FUNC(sys_info.value));
- return 0;
-}
-
-static int phm_thermal_h2l_irq(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
-{
- struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
- struct cgs_system_info sys_info = {0};
- int result;
-
- result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
- if (result)
- return -EINVAL;
-
- pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
- PCI_BUS_NUM(sys_info.value),
- PCI_SLOT(sys_info.value),
- PCI_FUNC(sys_info.value));
- return 0;
-}
-static int phm_ctf_irq(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
+static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
- struct cgs_system_info sys_info = {0};
- int result;
-
- result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
- if (result)
- return -EINVAL;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
- pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n",
- PCI_BUS_NUM(sys_info.value),
- PCI_SLOT(sys_info.value),
- PCI_FUNC(sys_info.value));
- return 0;
+ hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
+ hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
}
-static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
- { .handler = phm_thermal_l2h_irq },
- { .handler = phm_thermal_h2l_irq },
- { .handler = phm_ctf_irq }
-};
-
-int hwmgr_early_init(struct pp_instance *handle)
+int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
if (hwmgr == NULL)
- return -ENOMEM;
+ return -EINVAL;
- handle->hwmgr = hwmgr;
- hwmgr->device = handle->device;
- hwmgr->chip_family = handle->chip_family;
- hwmgr->chip_id = handle->chip_id;
- hwmgr->feature_mask = handle->feature_mask;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
hwmgr->reload_fw = 1;
+ hwmgr_init_workload_prority(hwmgr);
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
@@ -161,11 +97,13 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
+ hwmgr->od_enabled = false;
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
- hwmgr->smumgr_funcs = &cz_smu_funcs;
- cz_init_function_pointers(hwmgr);
+ hwmgr->od_enabled = false;
+ hwmgr->smumgr_funcs = &smu8_smu_funcs;
+ smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
@@ -175,6 +113,7 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
+ hwmgr->od_enabled = false;
break;
case CHIP_TONGA:
hwmgr->smumgr_funcs = &tonga_smu_funcs;
@@ -205,6 +144,10 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
+ case CHIP_VEGA12:
+ hwmgr->smumgr_funcs = &vega12_smu_funcs;
+ vega12_hwmgr_init(hwmgr);
+ break;
default:
return -EINVAL;
}
@@ -212,8 +155,9 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_RV:
switch (hwmgr->chip_id) {
case CHIP_RAVEN:
- hwmgr->smumgr_funcs = &rv_smu_funcs;
- rv_init_function_pointers(hwmgr);
+ hwmgr->od_enabled = false;
+ hwmgr->smumgr_funcs = &smu10_smu_funcs;
+ smu10_init_function_pointers(hwmgr);
break;
default:
return -EINVAL;
@@ -226,16 +170,13 @@ int hwmgr_early_init(struct pp_instance *handle)
return 0;
}
-int hwmgr_hw_init(struct pp_instance *handle)
+int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
if (hwmgr->pptable_func == NULL ||
hwmgr->pptable_func->pptable_init == NULL ||
hwmgr->hwmgr_func->backend_init == NULL)
@@ -260,15 +201,11 @@ int hwmgr_hw_init(struct pp_instance *handle)
ret = phm_enable_dynamic_state_management(hwmgr);
if (ret)
goto err2;
- ret = phm_start_thermal_controller(hwmgr, NULL);
+ ret = phm_start_thermal_controller(hwmgr);
ret |= psm_set_performance_states(hwmgr);
if (ret)
goto err2;
- ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
- if (ret)
- goto err2;
-
return 0;
err2:
if (hwmgr->hwmgr_func->backend_fini)
@@ -281,15 +218,11 @@ err:
return ret;
}
-int hwmgr_hw_fini(struct pp_instance *handle)
+int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
phm_stop_thermal_controller(hwmgr);
psm_set_boot_states(hwmgr);
psm_adjust_power_state_dynamic(hwmgr, false, NULL);
@@ -303,15 +236,13 @@ int hwmgr_hw_fini(struct pp_instance *handle)
return psm_fini_power_state_table(hwmgr);
}
-int hwmgr_hw_suspend(struct pp_instance *handle)
+int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
phm_disable_smc_firmware_ctf(hwmgr);
ret = psm_set_boot_states(hwmgr);
if (ret)
@@ -324,15 +255,13 @@ int hwmgr_hw_suspend(struct pp_instance *handle)
return ret;
}
-int hwmgr_hw_resume(struct pp_instance *handle)
+int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
{
- struct pp_hwmgr *hwmgr;
int ret = 0;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
ret = phm_setup_asic(hwmgr);
if (ret)
return ret;
@@ -340,7 +269,7 @@ int hwmgr_hw_resume(struct pp_instance *handle)
ret = phm_enable_dynamic_state_management(hwmgr);
if (ret)
return ret;
- ret = phm_start_thermal_controller(hwmgr, NULL);
+ ret = phm_start_thermal_controller(hwmgr);
if (ret)
return ret;
@@ -367,17 +296,14 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
}
}
-int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
- void *input, void *output)
+int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
{
int ret = 0;
- struct pp_hwmgr *hwmgr;
- if (handle == NULL || handle->hwmgr == NULL)
+ if (hwmgr == NULL)
return -EINVAL;
- hwmgr = handle->hwmgr;
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = phm_set_cpu_power_state(hwmgr);
@@ -390,17 +316,15 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
break;
case AMD_PP_TASK_ENABLE_USER_STATE:
{
- enum amd_pm_state_type ps;
enum PP_StateUILabel requested_ui_label;
struct pp_power_state *requested_ps = NULL;
- if (input == NULL) {
+ if (user_state == NULL) {
ret = -EINVAL;
break;
}
- ps = *(unsigned long *)input;
- requested_ui_label = power_state_convert(ps);
+ requested_ui_label = power_state_convert(*user_state);
ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
if (ret)
return ret;
@@ -416,468 +340,6 @@ int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
}
return ret;
}
-/**
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL) {
- pr_err("Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == hwmgr->usec_timeout)
- return -1;
- return 0;
-}
-
-
-/**
- * Returns once the part of the register indicated by the mask has
- * reached the given value.The indirect space is described by giving
- * the memory-mapped index of the indirect index register.
- */
-int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL) {
- pr_err("Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
-}
-
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == hwmgr->usec_timeout)
- return -ETIME;
- return 0;
-}
-
-int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, indirect_port, index);
- return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
- value, mask);
-}
-
-bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
-{
- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
-}
-
-bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
-{
- return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
-}
-
-
-int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
-{
- uint32_t i, j;
- uint16_t vvalue;
- bool found = false;
- struct pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "Voltage Table empty.", return -EINVAL);
-
- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
- GFP_KERNEL);
-
- if (NULL == table)
- return -EINVAL;
-
- table->mask_low = vol_table->mask_low;
- table->phase_delay = vol_table->phase_delay;
-
- for (i = 0; i < vol_table->count; i++) {
- vvalue = vol_table->entries[i].value;
- found = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- vol_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
- kfree(table);
- table = NULL;
- return 0;
-}
-
-int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].mvdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = phm_trim_voltage_table(vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim MVDD table.", return result);
-
- return 0;
-}
-
-int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].vddci;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = phm_trim_voltage_table(vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result);
-
- return 0;
-}
-
-int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- int i = 0;
-
- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
- "Voltage Lookup Table empty.", return -EINVAL);
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "vol_table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
-
- vol_table->count = lookup_table->count;
-
- for (i = 0; i < vol_table->count; i++) {
- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
- struct pp_atomctrl_voltage_table *vol_table)
-{
- unsigned int i, diff;
-
- if (vol_table->count <= max_vol_steps)
- return;
-
- diff = vol_table->count - max_vol_steps;
-
- for (i = 0; i < max_vol_steps; i++)
- vol_table->entries[i] = vol_table->entries[i + diff];
-
- vol_table->count = max_vol_steps;
-
- return;
-}
-
-int phm_reset_single_dpm_table(void *table,
- uint32_t count, int max)
-{
- int i;
-
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- dpm_table->count = count > max ? max : count;
-
- for (i = 0; i < dpm_table->count; i++)
- dpm_table->dpm_level[i].enabled = false;
-
- return 0;
-}
-
-void phm_setup_pcie_table_entry(
- void *table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
- dpm_table->dpm_level[index].value = pcie_gen;
- dpm_table->dpm_level[index].param1 = pcie_lanes;
- dpm_table->dpm_level[index].enabled = 1;
-}
-
-int32_t phm_get_dpm_level_enable_mask_value(void *table)
-{
- int32_t i;
- int32_t mask = 0;
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask = mask << 1;
- if (dpm_table->dpm_level[i - 1].enabled)
- mask |= 0x1;
- else
- mask &= 0xFFFFFFFE;
- }
-
- return mask;
-}
-
-uint8_t phm_get_voltage_index(
- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
- uint8_t count = (uint8_t) (lookup_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != lookup_table),
- "Lookup Table empty.", return 0);
- PP_ASSERT_WITH_CODE((0 != count),
- "Lookup Table empty.", return 0);
-
- for (i = 0; i < lookup_table->count; i++) {
- /* find first voltage equal or bigger than requested */
- if (lookup_table->entries[i].us_vdd >= voltage)
- return i;
- }
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage)
-{
- uint8_t count = (uint8_t) (voltage_table->count);
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count),
- "Voltage Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage bigger than requested */
- if (voltage_table->entries[i].value >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
-{
- uint32_t i;
-
- for (i = 0; i < vddci_table->count; i++) {
- if (vddci_table->entries[i].value >= vddci)
- return vddci_table->entries[i].value;
- }
-
- pr_debug("vddci is larger than max value in vddci_table\n");
- return vddci_table->entries[i-1].value;
-}
-
-int phm_find_boot_level(void *table,
- uint32_t value, uint32_t *boot_level)
-{
- int result = -EINVAL;
- uint32_t i;
- struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
-
- for (i = 0; i < dpm_table->count; i++) {
- if (value == dpm_table->dpm_level[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
-
- return result;
-}
-
-int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk)
-{
- uint8_t entry_id;
- uint8_t voltage_id;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
- voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
- if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
- break;
- }
-
- if (entry_id >= table_info->vdd_dep_on_sclk->count) {
- pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
- return -EINVAL;
- }
-
- *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
-
- return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size;
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- if (pptable_info != NULL)
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
-uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
-{
- uint32_t level = 0;
-
- while (0 == (mask & (1 << level)))
- level++;
-
- return level;
-}
-
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table || table->count <= 0
- || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
- || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- pr_err("DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
{
@@ -886,9 +348,10 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
- if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
- acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+#if defined(CONFIG_ACPI)
+ if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+#endif
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPatchPowerState);
@@ -931,26 +394,10 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_CAC);
}
- return 0;
-}
-
-int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t id, uint16_t *voltage)
-{
- uint32_t vol;
- int ret = 0;
+ if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
+ hwmgr->od_enabled = true;
- if (hwmgr->chip_id < CHIP_TONGA) {
- ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
- } else if (hwmgr->chip_id < CHIP_POLARIS10) {
- ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
- if (*voltage >= 2000 || *voltage == 0)
- *voltage = 1150;
- } else {
- ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
- *voltage = (uint16_t)(vol/100);
- }
- return ret;
+ return 0;
}
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
deleted file mode 100644
index f6b4dd9..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include "hwmgr.h"
-#include "amd_acpi.h"
-#include "pp_acpi.h"
-
-bool acpi_atcs_functions_supported(void *device, uint32_t index)
-{
- int32_t result;
- struct atcs_verify_interface output_buf = {0};
-
- int32_t temp_buffer = 1;
-
- result = cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
- ATCS_FUNCTION_VERIFY_INTERFACE,
- &temp_buffer,
- &output_buf,
- 1,
- sizeof(temp_buffer),
- sizeof(output_buf));
-
- return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
-}
-
-bool acpi_atcs_notify_pcie_device_ready(void *device)
-{
- int32_t temp_buffer = 1;
-
- return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
- ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
- &temp_buffer,
- NULL,
- 0,
- sizeof(temp_buffer),
- 0);
-}
-
-
-int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
-{
- struct atcs_pref_req_input atcs_input;
- struct atcs_pref_req_output atcs_output;
- u32 retry = 3;
- int result;
- struct cgs_system_info info = {0};
-
- if (acpi_atcs_notify_pcie_device_ready(device))
- return -EINVAL;
-
- info.size = sizeof(struct cgs_system_info);
- info.info_id = CGS_SYSTEM_INFO_ADAPTER_BDF_ID;
- result = cgs_query_system_info(device, &info);
- if (result != 0)
- return -EINVAL;
- atcs_input.client_id = (uint16_t)info.value;
- atcs_input.size = sizeof(struct atcs_pref_req_input);
- atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
- atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
- if (advertise)
- atcs_input.flags |= ATCS_ADVERTISE_CAPS;
- atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
- atcs_input.perf_req = perf_req;
-
- atcs_output.size = sizeof(struct atcs_pref_req_input);
-
- while (retry--) {
- result = cgs_call_acpi_method(device,
- CGS_ACPI_METHOD_ATCS,
- ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
- &atcs_input,
- &atcs_output,
- 1,
- sizeof(atcs_input),
- sizeof(atcs_output));
- if (result != 0)
- return -EIO;
-
- switch (atcs_output.ret_val) {
- case ATCS_REQUEST_REFUSED:
- default:
- return -EINVAL;
- case ATCS_REQUEST_COMPLETE:
- return 0;
- case ATCS_REQUEST_IN_PROGRESS:
- udelay(10);
- break;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
index c6ba0d6..4112a93 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
@@ -43,4 +43,4 @@ struct phm_fuses_default {
extern int pp_override_get_default_fuse_value(uint64_t key,
struct phm_fuses_default *result);
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ffa44bb..0f2851b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -35,16 +35,21 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
int size;
if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
- return -EINVAL;
+ return 0;
if (hwmgr->hwmgr_func->get_power_state_size == NULL)
- return -EINVAL;
+ return 0;
hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
sizeof(struct pp_power_state);
+ if (table_entries == 0 || size == 0) {
+ pr_warn("Please check whether power state management is suppported on this asic\n");
+ return 0;
+ }
+
hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
if (hwmgr->ps == NULL)
return -ENOMEM;
@@ -91,6 +96,9 @@ int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
if (hwmgr == NULL)
return -EINVAL;
+ if (!hwmgr->ps)
+ return 0;
+
kfree(hwmgr->current_ps);
kfree(hwmgr->request_ps);
kfree(hwmgr->ps);
@@ -167,6 +175,9 @@ int psm_set_boot_states(struct pp_hwmgr *hwmgr)
unsigned long state_id;
int ret = -EINVAL;
+ if (!hwmgr->ps)
+ return 0;
+
if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
&state_id))
ret = psm_set_states(hwmgr, state_id);
@@ -179,6 +190,9 @@ int psm_set_performance_states(struct pp_hwmgr *hwmgr)
unsigned long state_id;
int ret = -EINVAL;
+ if (!hwmgr->ps)
+ return 0;
+
if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
&state_id))
ret = psm_set_states(hwmgr, state_id);
@@ -193,6 +207,9 @@ int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
int table_entries;
int i;
+ if (!hwmgr->ps)
+ return 0;
+
table_entries = hwmgr->num_ps;
*state = hwmgr->ps;
@@ -214,18 +231,13 @@ restart_search:
return -EINVAL;
}
-int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+static void power_state_management(struct pp_hwmgr *hwmgr,
struct pp_power_state *new_ps)
{
struct pp_power_state *pcurrent;
struct pp_power_state *requested;
bool equal;
- if (skip)
- return 0;
-
- phm_display_configuration_changed(hwmgr);
-
if (new_ps != NULL)
requested = new_ps;
else
@@ -242,9 +254,36 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
}
+}
+
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ struct pp_power_state *new_ps)
+{
+ uint32_t index;
+ long workload;
+
+ if (skip)
+ return 0;
+
+ phm_display_configuration_changed(hwmgr);
+
+ if (hwmgr->ps)
+ power_state_management(hwmgr, new_ps);
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ index = fls(hwmgr->workload_mask);
+ index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+ workload = hwmgr->workload_setting[index];
+
+ if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index c062844..55f9b30 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -532,6 +532,7 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
boot_values->usVddci = info->bootup_vddci_mv;
boot_values->usMvddc = info->bootup_mvddc_mv;
boot_values->usVddGfx = info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = info->coolingsolution_id;
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
@@ -542,4 +543,90 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
boot_values->ulDCEFClk = frequency;
return 0;
-} \ No newline at end of file
+}
+
+int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_smc_dpm_parameters *param)
+{
+ struct atom_smc_dpm_info_v4_1 *info;
+ uint16_t ix;
+
+ ix = GetIndexIntoMasterDataTable(smc_dpm_info);
+ info = (struct atom_smc_dpm_info_v4_1 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ ix, NULL, NULL, NULL);
+ if (!info) {
+ pr_info("Error retrieving BIOS Table Address!");
+ return -EINVAL;
+ }
+
+ param->liquid1_i2c_address = info->liquid1_i2c_address;
+ param->liquid2_i2c_address = info->liquid2_i2c_address;
+ param->vr_i2c_address = info->vr_i2c_address;
+ param->plx_i2c_address = info->plx_i2c_address;
+
+ param->liquid_i2c_linescl = info->liquid_i2c_linescl;
+ param->liquid_i2c_linesda = info->liquid_i2c_linesda;
+ param->vr_i2c_linescl = info->vr_i2c_linescl;
+ param->vr_i2c_linesda = info->vr_i2c_linesda;
+
+ param->plx_i2c_linescl = info->plx_i2c_linescl;
+ param->plx_i2c_linesda = info->plx_i2c_linesda;
+ param->vrsensorpresent = info->vrsensorpresent;
+ param->liquidsensorpresent = info->liquidsensorpresent;
+
+ param->maxvoltagestepgfx = info->maxvoltagestepgfx;
+ param->maxvoltagestepsoc = info->maxvoltagestepsoc;
+
+ param->vddgfxvrmapping = info->vddgfxvrmapping;
+ param->vddsocvrmapping = info->vddsocvrmapping;
+ param->vddmem0vrmapping = info->vddmem0vrmapping;
+ param->vddmem1vrmapping = info->vddmem1vrmapping;
+
+ param->gfxulvphasesheddingmask = info->gfxulvphasesheddingmask;
+ param->soculvphasesheddingmask = info->soculvphasesheddingmask;
+
+ param->gfxmaxcurrent = info->gfxmaxcurrent;
+ param->gfxoffset = info->gfxoffset;
+ param->padding_telemetrygfx = info->padding_telemetrygfx;
+
+ param->socmaxcurrent = info->socmaxcurrent;
+ param->socoffset = info->socoffset;
+ param->padding_telemetrysoc = info->padding_telemetrysoc;
+
+ param->mem0maxcurrent = info->mem0maxcurrent;
+ param->mem0offset = info->mem0offset;
+ param->padding_telemetrymem0 = info->padding_telemetrymem0;
+
+ param->mem1maxcurrent = info->mem1maxcurrent;
+ param->mem1offset = info->mem1offset;
+ param->padding_telemetrymem1 = info->padding_telemetrymem1;
+
+ param->acdcgpio = info->acdcgpio;
+ param->acdcpolarity = info->acdcpolarity;
+ param->vr0hotgpio = info->vr0hotgpio;
+ param->vr0hotpolarity = info->vr0hotpolarity;
+
+ param->vr1hotgpio = info->vr1hotgpio;
+ param->vr1hotpolarity = info->vr1hotpolarity;
+ param->padding1 = info->padding1;
+ param->padding2 = info->padding2;
+
+ param->ledpin0 = info->ledpin0;
+ param->ledpin1 = info->ledpin1;
+ param->ledpin2 = info->ledpin2;
+
+ param->gfxclkspreadenabled = info->gfxclkspreadenabled;
+ param->gfxclkspreadpercent = info->gfxclkspreadpercent;
+ param->gfxclkspreadfreq = info->gfxclkspreadfreq;
+
+ param->uclkspreadenabled = info->uclkspreadenabled;
+ param->uclkspreadpercent = info->uclkspreadpercent;
+ param->uclkspreadfreq = info->uclkspreadfreq;
+
+ param->socclkspreadenabled = info->socclkspreadenabled;
+ param->socclkspreadpercent = info->socclkspreadpercent;
+ param->socclkspreadfreq = info->socclkspreadfreq;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 8e6b1f0..a957d8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -140,6 +140,69 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint16_t usVddci;
uint16_t usMvddc;
uint16_t usVddGfx;
+ uint8_t ucCoolingID;
+};
+
+struct pp_atomfwctrl_smc_dpm_parameters
+{
+ uint8_t liquid1_i2c_address;
+ uint8_t liquid2_i2c_address;
+ uint8_t vr_i2c_address;
+ uint8_t plx_i2c_address;
+ uint8_t liquid_i2c_linescl;
+ uint8_t liquid_i2c_linesda;
+ uint8_t vr_i2c_linescl;
+ uint8_t vr_i2c_linesda;
+ uint8_t plx_i2c_linescl;
+ uint8_t plx_i2c_linesda;
+ uint8_t vrsensorpresent;
+ uint8_t liquidsensorpresent;
+ uint16_t maxvoltagestepgfx;
+ uint16_t maxvoltagestepsoc;
+ uint8_t vddgfxvrmapping;
+ uint8_t vddsocvrmapping;
+ uint8_t vddmem0vrmapping;
+ uint8_t vddmem1vrmapping;
+ uint8_t gfxulvphasesheddingmask;
+ uint8_t soculvphasesheddingmask;
+
+ uint16_t gfxmaxcurrent;
+ uint8_t gfxoffset;
+ uint8_t padding_telemetrygfx;
+ uint16_t socmaxcurrent;
+ uint8_t socoffset;
+ uint8_t padding_telemetrysoc;
+ uint16_t mem0maxcurrent;
+ uint8_t mem0offset;
+ uint8_t padding_telemetrymem0;
+ uint16_t mem1maxcurrent;
+ uint8_t mem1offset;
+ uint8_t padding_telemetrymem1;
+
+ uint8_t acdcgpio;
+ uint8_t acdcpolarity;
+ uint8_t vr0hotgpio;
+ uint8_t vr0hotpolarity;
+ uint8_t vr1hotgpio;
+ uint8_t vr1hotpolarity;
+ uint8_t padding1;
+ uint8_t padding2;
+
+ uint8_t ledpin0;
+ uint8_t ledpin1;
+ uint8_t ledpin2;
+
+ uint8_t gfxclkspreadenabled;
+ uint8_t gfxclkspreadpercent;
+ uint16_t gfxclkspreadfreq;
+
+ uint8_t uclkspreadenabled;
+ uint8_t uclkspreadpercent;
+ uint16_t uclkspreadfreq;
+
+ uint8_t socclkspreadenabled;
+ uint8_t socclkspreadpercent;
+ uint16_t socclkspreadfreq;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
@@ -161,6 +224,8 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values);
+int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_smc_dpm_parameters *param);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index a651ebc..c9eecce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -523,8 +523,7 @@ static int get_pcie_table(
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
else
- pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
- Disregarding the excess entries... \n");
+ pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! Disregarding the excess entries...\n");
pcie_table->count = pcie_count;
for (i = 0; i < pcie_count; i++) {
@@ -563,8 +562,7 @@ static int get_pcie_table(
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
else
- pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
- Disregarding the excess entries... \n");
+ pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! Disregarding the excess entries...\n");
pcie_table->count = pcie_count;
@@ -838,10 +836,10 @@ static int init_over_drive_limits(
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
hwmgr->platform_descriptor.overdriveVDDCStep = 0;
- if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
- && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACOverdriveSupport);
+ if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \
+ || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+ hwmgr->od_enabled = false;
+ pr_debug("OverDrive feature not support by VBIOS\n");
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index afae32e..36ca7c4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -394,8 +394,8 @@ static int get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
dep_table->entries[i].clk =
((unsigned long)table->entries[i].ucClockHigh << 16) |
le16_to_cpu(table->entries[i].usClockLow);
- dep_table->entries[i].v =
- (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
+ dep_table->entries[i].v =
+ (unsigned long)le16_to_cpu(table->entries[i].usVoltage);
}
*ptable = dep_table;
@@ -1042,7 +1042,7 @@ static int init_overdrive_limits_V2_1(struct pp_hwmgr *hwmgr,
static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
- int result;
+ int result = 0;
uint8_t frev, crev;
uint16_t size;
@@ -1074,12 +1074,11 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
powerplay_table,
(const ATOM_FIRMWARE_INFO_V2_1 *)fw_info);
- if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
- && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0
- && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OverdriveDisabledByPowerBudget))
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACOverdriveSupport);
+ if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0
+ && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+ hwmgr->od_enabled = false;
+ pr_debug("OverDrive feature not support by VBIOS\n");
+ }
return result;
}
@@ -1697,9 +1696,6 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
deleted file mode 100644
index 3e0b267..0000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ /dev/null
@@ -1,956 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "pp_debug.h"
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "atom-types.h"
-#include "atombios.h"
-#include "processpptables.h"
-#include "cgs_common.h"
-#include "smumgr.h"
-#include "hwmgr.h"
-#include "hardwaremanager.h"
-#include "rv_ppsmc.h"
-#include "rv_hwmgr.h"
-#include "power_state.h"
-#include "rv_smumgr.h"
-#include "pp_soc15.h"
-
-#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
-#define SCLK_MIN_DIV_INTV_SHIFT 12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
-#define SMC_RAM_END 0x40000
-
-static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
-
-
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
- struct pp_display_clock_request *clock_req);
-
-
-static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
-{
- if (PhwRaven_Magic != hw_ps->magic)
- return NULL;
-
- return (struct rv_power_state *)hw_ps;
-}
-
-static const struct rv_power_state *cast_const_rv_ps(
- const struct pp_hw_power_state *hw_ps)
-{
- if (PhwRaven_Magic != hw_ps->magic)
- return NULL;
-
- return (struct rv_power_state *)hw_ps;
-}
-
-static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
-
- rv_hwmgr->dce_slow_sclk_threshold = 30000;
- rv_hwmgr->thermal_auto_throttling_treshold = 0;
- rv_hwmgr->is_nb_dpm_enabled = 1;
- rv_hwmgr->dpm_flags = 1;
- rv_hwmgr->gfx_off_controled_by_driver = false;
- rv_hwmgr->need_min_deep_sleep_dcefclk = true;
- rv_hwmgr->num_active_display = 0;
- rv_hwmgr->deep_sleep_dcefclk = 0;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerPlaySupport);
- return 0;
-}
-
-static int rv_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
- struct phm_clock_and_voltage_limits *table)
-{
- return 0;
-}
-
-static int rv_init_dynamic_state_adjustment_rule_settings(
- struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size =
- sizeof(struct phm_clock_voltage_dependency_table) +
- (7 * sizeof(struct phm_clock_voltage_dependency_record));
-
- struct phm_clock_voltage_dependency_table *table_clk_vlt =
- kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- pr_err("Can not allocate memory!\n");
- return -ENOMEM;
- }
-
- table_clk_vlt->count = 8;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
- table_clk_vlt->entries[1].v = 1;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
- table_clk_vlt->entries[2].v = 2;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
- table_clk_vlt->entries[3].v = 3;
- table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
- table_clk_vlt->entries[4].v = 4;
- table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
- table_clk_vlt->entries[5].v = 5;
- table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
- table_clk_vlt->entries[6].v = 6;
- table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
- table_clk_vlt->entries[7].v = 7;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
-
- return 0;
-}
-
-static int rv_get_system_info_data(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)hwmgr->backend;
-
- rv_data->sys_info.htc_hyst_lmt = 5;
- rv_data->sys_info.htc_tmp_lmt = 203;
-
- if (rv_data->thermal_auto_throttling_treshold == 0)
- rv_data->thermal_auto_throttling_treshold = 203;
-
- rv_construct_max_power_limits_table (hwmgr,
- &hwmgr->dyn_state.max_clock_voltage_on_ac);
-
- rv_init_dynamic_state_adjustment_rule_settings(hwmgr);
-
- return 0;
-}
-
-static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct PP_Clocks clocks = {0};
- struct pp_display_clock_request clock_req;
-
- clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- clock_req.clock_type = amd_pp_dcf_clock;
- clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
-
- PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
- "Attempt to set DCF Clock Failed!", return -EINVAL);
-
- if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
- ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
- rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
- rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinVcn,
- (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
- }
-
- if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
- ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetHardMinSocclkByFreq,
- hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
- (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoGfxclkFreq,
- hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.fclk != 0) &&
- (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoFclkFreq,
- hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
- }
-
- return 0;
-}
-
-static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
- rv_data->deep_sleep_dcefclk = clock/100;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinDeepSleepDcefclk,
- rv_data->deep_sleep_dcefclk);
- }
- return 0;
-}
-
-static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- if (rv_data->num_active_display != count) {
- rv_data->num_active_display = count;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDisplayCount,
- rv_data->num_active_display);
- }
-
- return 0;
-}
-
-static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
- return rv_set_clock_limit(hwmgr, input);
-}
-
-static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- rv_data->vcn_power_gated = true;
- rv_data->isp_tileA_power_gated = true;
- rv_data->isp_tileB_power_gated = true;
-
- return 0;
-}
-
-
-static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- return rv_init_power_gate_state(hwmgr);
-}
-
-static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- rv_data->separation_time = 0;
- rv_data->cc6_disable = false;
- rv_data->pstate_disable = false;
- rv_data->cc6_setting_changed = false;
-
- return 0;
-}
-
-static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
-{
- return rv_reset_cc6_data(hwmgr);
-}
-
-static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_DisableGfxOff);
-
- return 0;
-}
-
-static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- return rv_disable_gfx_off(hwmgr);
-}
-
-static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
-
- if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableGfxOff);
-
- return 0;
-}
-
-static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- return rv_enable_gfx_off(hwmgr);
-}
-
-static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *prequest_ps,
- const struct pp_power_state *pcurrent_ps)
-{
- return 0;
-}
-
-/* temporary hardcoded clock voltage breakdown tables */
-static const DpmClock_t VddDcfClk[]= {
- { 300, 2600},
- { 600, 3200},
- { 600, 3600},
-};
-
-static const DpmClock_t VddSocClk[]= {
- { 478, 2600},
- { 722, 3200},
- { 722, 3600},
-};
-
-static const DpmClock_t VddFClk[]= {
- { 400, 2600},
- {1200, 3200},
- {1200, 3600},
-};
-
-static const DpmClock_t VddDispClk[]= {
- { 435, 2600},
- { 661, 3200},
- {1086, 3600},
-};
-
-static const DpmClock_t VddDppClk[]= {
- { 435, 2600},
- { 661, 3200},
- { 661, 3600},
-};
-
-static const DpmClock_t VddPhyClk[]= {
- { 540, 2600},
- { 810, 3200},
- { 810, 3600},
-};
-
-static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
- struct rv_voltage_dependency_table **pptable,
- uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
-{
- uint32_t table_size, i;
- struct rv_voltage_dependency_table *ptable;
-
- table_size = sizeof(uint32_t) + sizeof(struct rv_voltage_dependency_table) * num_entry;
- ptable = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == ptable)
- return -ENOMEM;
-
- ptable->count = num_entry;
-
- for (i = 0; i < ptable->count; i++) {
- ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
- ptable->entries[i].vol = pclk_dependency_table->Vol;
- pclk_dependency_table++;
- }
-
- *pptable = ptable;
-
- return 0;
-}
-
-
-static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- DpmClocks_t *table = &(rv_data->clock_table);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-
- result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "Attempt to copy clock table from smc failed",
- return result);
-
- if (0 == result && table->DcefClocks[0].Freq != 0) {
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
- NUM_DCEFCLK_DPM_LEVELS,
- &rv_data->clock_table.DcefClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
- NUM_SOCCLK_DPM_LEVELS,
- &rv_data->clock_table.SocClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
- NUM_FCLK_DPM_LEVELS,
- &rv_data->clock_table.FClocks[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
- NUM_MEMCLK_DPM_LEVELS,
- &rv_data->clock_table.MemClocks[0]);
- } else {
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
- ARRAY_SIZE(VddDcfClk),
- &VddDcfClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
- ARRAY_SIZE(VddSocClk),
- &VddSocClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
- ARRAY_SIZE(VddFClk),
- &VddFClk[0]);
- }
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
- ARRAY_SIZE(VddDispClk),
- &VddDispClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
- ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
- rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
- ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
-
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetMinGfxclkFrequency),
- "Attempt to get min GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &result),
- "Attempt to get min GFXCLK Failed!",
- return -1);
- rv_data->gfx_min_freq_limit = result * 100;
-
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetMaxGfxclkFrequency),
- "Attempt to get max GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &result),
- "Attempt to get max GFXCLK Failed!",
- return -1);
- rv_data->gfx_max_freq_limit = result * 100;
-
- return 0;
-}
-
-static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- struct rv_hwmgr *data;
-
- data = kzalloc(sizeof(struct rv_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- result = rv_initialize_dpm_defaults(hwmgr);
- if (result != 0) {
- pr_err("rv_initialize_dpm_defaults failed\n");
- return result;
- }
-
- rv_populate_clock_table(hwmgr);
-
- result = rv_get_system_info_data(hwmgr);
- if (result != 0) {
- pr_err("rv_get_system_info_data failed\n");
- return result;
- }
-
- rv_construct_boot_state(hwmgr);
-
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- RAVEN_MAX_HARDWARE_POWERLEVELS;
-
- hwmgr->platform_descriptor.hardwarePerformanceLevels =
- RAVEN_MAX_HARDWARE_POWERLEVELS;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0;
-
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
-
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
-
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- return result;
-}
-
-static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
-
- kfree(pinfo->vdd_dep_on_dcefclk);
- pinfo->vdd_dep_on_dcefclk = NULL;
- kfree(pinfo->vdd_dep_on_socclk);
- pinfo->vdd_dep_on_socclk = NULL;
- kfree(pinfo->vdd_dep_on_fclk);
- pinfo->vdd_dep_on_fclk = NULL;
- kfree(pinfo->vdd_dep_on_dispclk);
- pinfo->vdd_dep_on_dispclk = NULL;
- kfree(pinfo->vdd_dep_on_dppclk);
- pinfo->vdd_dep_on_dppclk = NULL;
- kfree(pinfo->vdd_dep_on_phyclk);
- pinfo->vdd_dep_on_phyclk = NULL;
-
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
-
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
-
- return 0;
-}
-
-static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- return 0;
-}
-
-static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- return 0;
-}
-
-static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- return 0;
-}
-
-static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- return 0;
-}
-
-static int rv_dpm_get_pp_table_entry_callback(
- struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps,
- unsigned int index,
- const void *clock_info)
-{
- struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
-
- rv_ps->levels[index].engine_clock = 0;
-
- rv_ps->levels[index].vddc_index = 0;
- rv_ps->level = index + 1;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- rv_ps->levels[index].ds_divider_index = 5;
- rv_ps->levels[index].ss_divider_index = 5;
- }
-
- return 0;
-}
-
-static int rv_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
-{
- int result;
- unsigned long ret = 0;
-
- result = pp_tables_get_num_of_entries(hwmgr, &ret);
-
- return result ? 0 : ret;
-}
-
-static int rv_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry, struct pp_power_state *ps)
-{
- int result;
- struct rv_power_state *rv_ps;
-
- ps->hardware.magic = PhwRaven_Magic;
-
- rv_ps = cast_rv_ps(&(ps->hardware));
-
- result = pp_tables_get_entry(hwmgr, entry, ps,
- rv_dpm_get_pp_table_entry_callback);
-
- rv_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
- rv_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
-
- return result;
-}
-
-static int rv_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct rv_power_state);
-}
-
-static int rv_set_cpu_power_state(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-
-static int rv_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
- bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
-{
- return 0;
-}
-
-static int rv_get_dal_power_level(struct pp_hwmgr *hwmgr,
- struct amd_pp_simple_clock_info *info)
-{
- return -EINVAL;
-}
-
-static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- return 0;
-}
-
-static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_voltage_dependency_table *mclk_table =
- data->clock_vol_info.vdd_dep_on_fclk;
- int i, now, size = 0;
-
- switch (type) {
- case PP_SCLK:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetGfxclkFrequency),
- "Attempt to get current GFXCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to get current GFXCLK Failed!",
- return -1);
-
- size += sprintf(buf + size, "0: %uMhz %s\n",
- data->gfx_min_freq_limit / 100,
- ((data->gfx_min_freq_limit / 100)
- == now) ? "*" : "");
- size += sprintf(buf + size, "1: %uMhz %s\n",
- data->gfx_max_freq_limit / 100,
- ((data->gfx_max_freq_limit / 100)
- == now) ? "*" : "");
- break;
- case PP_MCLK:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetFclkFrequency),
- "Attempt to get current MEMCLK Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to get current MEMCLK Failed!",
- return -1);
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i,
- mclk_table->entries[i].clk / 100,
- ((mclk_table->entries[i].clk / 100)
- == now) ? "*" : "");
- break;
- default:
- break;
- }
-
- return size;
-}
-
-static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
- PHM_PerformanceLevelDesignation designation, uint32_t index,
- PHM_PerformanceLevel *level)
-{
- struct rv_hwmgr *data;
-
- if (level == NULL || hwmgr == NULL || state == NULL)
- return -EINVAL;
-
- data = (struct rv_hwmgr *)(hwmgr->backend);
-
- if (index == 0) {
- level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
- level->coreClock = data->gfx_min_freq_limit;
- } else {
- level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
- data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
- level->coreClock = data->gfx_max_freq_limit;
- }
-
- level->nonLocalMemoryFreq = 0;
- level->nonLocalMemoryWidth = 0;
-
- return 0;
-}
-
-static int rv_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
- const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
-{
- const struct rv_power_state *ps = cast_const_rv_ps(state);
-
- clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
- clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
-
- return 0;
-}
-
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-#define MEM_LATENCY_HIGH 245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
-
-static uint32_t rv_get_mem_latency(struct pp_hwmgr *hwmgr,
- uint32_t clock)
-{
- if (clock >= MEM_FREQ_LOW_LATENCY &&
- clock < MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_HIGH;
- else if (clock >= MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_LOW;
- else
- return MEM_LATENCY_ERR;
-}
-
-static int rv_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_latency *clocks)
-{
- uint32_t i;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- struct rv_voltage_dependency_table *pclk_vol_table;
- bool latency_required = false;
-
- if (pinfo == NULL)
- return -EINVAL;
-
- switch (type) {
- case amd_pp_mem_clock:
- pclk_vol_table = pinfo->vdd_dep_on_mclk;
- latency_required = true;
- break;
- case amd_pp_f_clock:
- pclk_vol_table = pinfo->vdd_dep_on_fclk;
- latency_required = true;
- break;
- case amd_pp_dcf_clock:
- pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
- break;
- case amd_pp_disp_clock:
- pclk_vol_table = pinfo->vdd_dep_on_dispclk;
- break;
- case amd_pp_phy_clock:
- pclk_vol_table = pinfo->vdd_dep_on_phyclk;
- break;
- case amd_pp_dpp_clock:
- pclk_vol_table = pinfo->vdd_dep_on_dppclk;
- default:
- return -EINVAL;
- }
-
- if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
- return -EINVAL;
-
- clocks->num_levels = 0;
- for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
- clocks->data[i].latency_in_us = latency_required ?
- rv_get_mem_latency(hwmgr,
- pclk_vol_table->entries[i].clk) :
- 0;
- clocks->num_levels++;
- }
-
- return 0;
-}
-
-static int rv_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_voltage *clocks)
-{
- uint32_t i;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- struct rv_voltage_dependency_table *pclk_vol_table = NULL;
-
- if (pinfo == NULL)
- return -EINVAL;
-
- switch (type) {
- case amd_pp_mem_clock:
- pclk_vol_table = pinfo->vdd_dep_on_mclk;
- break;
- case amd_pp_f_clock:
- pclk_vol_table = pinfo->vdd_dep_on_fclk;
- break;
- case amd_pp_dcf_clock:
- pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
- break;
- case amd_pp_soc_clock:
- pclk_vol_table = pinfo->vdd_dep_on_socclk;
- break;
- default:
- return -EINVAL;
- }
-
- if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
- return -EINVAL;
-
- clocks->num_levels = 0;
- for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
- clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
- clocks->num_levels++;
- }
-
- return 0;
-}
-
-int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
- struct pp_display_clock_request *clock_req)
-{
- int result = 0;
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- PPSMC_Msg msg;
-
- switch (clk_type) {
- case amd_pp_dcf_clock:
- if (clk_freq == rv_data->dcf_actual_hard_min_freq)
- return 0;
- msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
- rv_data->dcf_actual_hard_min_freq = clk_freq;
- break;
- case amd_pp_soc_clock:
- msg = PPSMC_MSG_SetHardMinSocclkByFreq;
- break;
- case amd_pp_f_clock:
- if (clk_freq == rv_data->f_actual_hard_min_freq)
- return 0;
- rv_data->f_actual_hard_min_freq = clk_freq;
- msg = PPSMC_MSG_SetHardMinFclkByFreq;
- break;
- default:
- pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
- return -EINVAL;
- }
-
- result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
- clk_freq);
-
- return result;
-}
-
-static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
-{
- clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
- return 0;
-}
-
-static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
- mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
- uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
- int cur_temp =
- (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
-
- if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
- cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- else
- cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- return cur_temp;
-}
-
-static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
- void *value, int *size)
-{
- uint32_t sclk, mclk;
- int ret = 0;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- if (!ret) {
- rv_read_arg_from_smc(hwmgr, &sclk);
- /* in units of 10KHZ */
- *((uint32_t *)value) = sclk * 100;
- *size = 4;
- }
- break;
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- if (!ret) {
- rv_read_arg_from_smc(hwmgr, &mclk);
- /* in units of 10KHZ */
- *((uint32_t *)value) = mclk * 100;
- *size = 4;
- }
- break;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static const struct pp_hwmgr_func rv_hwmgr_funcs = {
- .backend_init = rv_hwmgr_backend_init,
- .backend_fini = rv_hwmgr_backend_fini,
- .asic_setup = NULL,
- .apply_state_adjust_rules = rv_apply_state_adjust_rules,
- .force_dpm_level = rv_dpm_force_dpm_level,
- .get_power_state_size = rv_get_power_state_size,
- .powerdown_uvd = NULL,
- .powergate_uvd = NULL,
- .powergate_vce = NULL,
- .get_mclk = rv_dpm_get_mclk,
- .get_sclk = rv_dpm_get_sclk,
- .patch_boot_state = rv_dpm_patch_boot_state,
- .get_pp_table_entry = rv_dpm_get_pp_table_entry,
- .get_num_of_pp_table_entries = rv_dpm_get_num_of_pp_table_entries,
- .set_cpu_power_state = rv_set_cpu_power_state,
- .store_cc6_data = rv_store_cc6_data,
- .force_clock_level = rv_force_clock_level,
- .print_clock_levels = rv_print_clock_levels,
- .get_dal_power_level = rv_get_dal_power_level,
- .get_performance_level = rv_get_performance_level,
- .get_current_shallow_sleep_clocks = rv_get_current_shallow_sleep_clocks,
- .get_clock_by_type_with_latency = rv_get_clock_by_type_with_latency,
- .get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
- .get_max_high_clocks = rv_get_max_high_clocks,
- .read_sensor = rv_read_sensor,
- .set_active_display_count = rv_set_active_display_count,
- .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
- .dynamic_state_management_enable = rv_enable_dpm_tasks,
- .power_off_asic = rv_power_off_asic,
- .asic_setup = rv_setup_asic_task,
- .power_state_set = rv_set_power_state_tasks,
- .dynamic_state_management_disable = rv_disable_dpm_tasks,
-};
-
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &rv_hwmgr_funcs;
- hwmgr->pptable_func = &pptable_funcs;
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
new file mode 100644
index 0000000..10253b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "pp_debug.h"
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "atom-types.h"
+#include "atombios.h"
+#include "processpptables.h"
+#include "cgs_common.h"
+#include "smumgr.h"
+#include "hwmgr.h"
+#include "hardwaremanager.h"
+#include "rv_ppsmc.h"
+#include "smu10_hwmgr.h"
+#include "power_state.h"
+#include "pp_soc15.h"
+
+#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
+#define SCLK_MIN_DIV_INTV_SHIFT 12
+#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
+#define SMC_RAM_END 0x40000
+
+static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
+
+
+static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ struct pp_display_clock_request *clock_req);
+
+
+static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
+{
+ if (SMU10_Magic != hw_ps->magic)
+ return NULL;
+
+ return (struct smu10_power_state *)hw_ps;
+}
+
+static const struct smu10_power_state *cast_const_smu10_ps(
+ const struct pp_hw_power_state *hw_ps)
+{
+ if (SMU10_Magic != hw_ps->magic)
+ return NULL;
+
+ return (struct smu10_power_state *)hw_ps;
+}
+
+static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ smu10_data->dce_slow_sclk_threshold = 30000;
+ smu10_data->thermal_auto_throttling_treshold = 0;
+ smu10_data->is_nb_dpm_enabled = 1;
+ smu10_data->dpm_flags = 1;
+ smu10_data->gfx_off_controled_by_driver = false;
+ smu10_data->need_min_deep_sleep_dcefclk = true;
+ smu10_data->num_active_display = 0;
+ smu10_data->deep_sleep_dcefclk = 0;
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerPlaySupport);
+ return 0;
+}
+
+static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+ struct phm_clock_and_voltage_limits *table)
+{
+ return 0;
+}
+
+static int smu10_init_dynamic_state_adjustment_rule_settings(
+ struct pp_hwmgr *hwmgr)
+{
+ uint32_t table_size =
+ sizeof(struct phm_clock_voltage_dependency_table) +
+ (7 * sizeof(struct phm_clock_voltage_dependency_record));
+
+ struct phm_clock_voltage_dependency_table *table_clk_vlt =
+ kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == table_clk_vlt) {
+ pr_err("Can not allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ table_clk_vlt->count = 8;
+ table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
+ table_clk_vlt->entries[0].v = 0;
+ table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
+ table_clk_vlt->entries[1].v = 1;
+ table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
+ table_clk_vlt->entries[2].v = 2;
+ table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
+ table_clk_vlt->entries[3].v = 3;
+ table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
+ table_clk_vlt->entries[4].v = 4;
+ table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
+ table_clk_vlt->entries[5].v = 5;
+ table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
+ table_clk_vlt->entries[6].v = 6;
+ table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
+ table_clk_vlt->entries[7].v = 7;
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+
+ return 0;
+}
+
+static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
+
+ smu10_data->sys_info.htc_hyst_lmt = 5;
+ smu10_data->sys_info.htc_tmp_lmt = 203;
+
+ if (smu10_data->thermal_auto_throttling_treshold == 0)
+ smu10_data->thermal_auto_throttling_treshold = 203;
+
+ smu10_construct_max_power_limits_table (hwmgr,
+ &hwmgr->dyn_state.max_clock_voltage_on_ac);
+
+ smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
+
+ return 0;
+}
+
+static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
+{
+ struct PP_Clocks clocks = {0};
+ struct pp_display_clock_request clock_req;
+
+ clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
+ clock_req.clock_type = amd_pp_dcf_clock;
+ clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
+
+ PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
+ "Attempt to set DCF Clock Failed!", return -EINVAL);
+
+ return 0;
+}
+
+static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
+ smu10_data->deep_sleep_dcefclk = clock/100;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ smu10_data->deep_sleep_dcefclk);
+ }
+ return 0;
+}
+
+static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->num_active_display != count) {
+ smu10_data->num_active_display = count;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDisplayCount,
+ smu10_data->num_active_display);
+ }
+
+ return 0;
+}
+
+static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ return smu10_set_clock_limit(hwmgr, input);
+}
+
+static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ smu10_data->vcn_power_gated = true;
+ smu10_data->isp_tileA_power_gated = true;
+ smu10_data->isp_tileB_power_gated = true;
+
+ return 0;
+}
+
+
+static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ return smu10_init_power_gate_state(hwmgr);
+}
+
+static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ smu10_data->separation_time = 0;
+ smu10_data->cc6_disable = false;
+ smu10_data->pstate_disable = false;
+ smu10_data->cc6_setting_changed = false;
+
+ return 0;
+}
+
+static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ return smu10_reset_cc6_data(hwmgr);
+}
+
+static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->gfx_off_controled_by_driver)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+
+ return 0;
+}
+
+static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return smu10_disable_gfx_off(hwmgr);
+}
+
+static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->gfx_off_controled_by_driver)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+
+ return 0;
+}
+
+static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return smu10_enable_gfx_off(hwmgr);
+}
+
+static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *prequest_ps,
+ const struct pp_power_state *pcurrent_ps)
+{
+ return 0;
+}
+
+/* temporary hardcoded clock voltage breakdown tables */
+static const DpmClock_t VddDcfClk[]= {
+ { 300, 2600},
+ { 600, 3200},
+ { 600, 3600},
+};
+
+static const DpmClock_t VddSocClk[]= {
+ { 478, 2600},
+ { 722, 3200},
+ { 722, 3600},
+};
+
+static const DpmClock_t VddFClk[]= {
+ { 400, 2600},
+ {1200, 3200},
+ {1200, 3600},
+};
+
+static const DpmClock_t VddDispClk[]= {
+ { 435, 2600},
+ { 661, 3200},
+ {1086, 3600},
+};
+
+static const DpmClock_t VddDppClk[]= {
+ { 435, 2600},
+ { 661, 3200},
+ { 661, 3600},
+};
+
+static const DpmClock_t VddPhyClk[]= {
+ { 540, 2600},
+ { 810, 3200},
+ { 810, 3600},
+};
+
+static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
+ struct smu10_voltage_dependency_table **pptable,
+ uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
+{
+ uint32_t table_size, i;
+ struct smu10_voltage_dependency_table *ptable;
+
+ table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
+ ptable = kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == ptable)
+ return -ENOMEM;
+
+ ptable->count = num_entry;
+
+ for (i = 0; i < ptable->count; i++) {
+ ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
+ ptable->entries[i].vol = pclk_dependency_table->Vol;
+ pclk_dependency_table++;
+ }
+
+ *pptable = ptable;
+
+ return 0;
+}
+
+
+static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ DpmClocks_t *table = &(smu10_data->clock_table);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Attempt to copy clock table from smc failed",
+ return result);
+
+ if (0 == result && table->DcefClocks[0].Freq != 0) {
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+ NUM_DCEFCLK_DPM_LEVELS,
+ &smu10_data->clock_table.DcefClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+ NUM_SOCCLK_DPM_LEVELS,
+ &smu10_data->clock_table.SocClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+ NUM_FCLK_DPM_LEVELS,
+ &smu10_data->clock_table.FClocks[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
+ NUM_MEMCLK_DPM_LEVELS,
+ &smu10_data->clock_table.MemClocks[0]);
+ } else {
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
+ ARRAY_SIZE(VddDcfClk),
+ &VddDcfClk[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
+ ARRAY_SIZE(VddSocClk),
+ &VddSocClk[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
+ ARRAY_SIZE(VddFClk),
+ &VddFClk[0]);
+ }
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
+ ARRAY_SIZE(VddDispClk),
+ &VddDispClk[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
+ ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
+ smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
+ ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
+ result = smum_get_argument(hwmgr);
+ smu10_data->gfx_min_freq_limit = result * 100;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
+ result = smum_get_argument(hwmgr);
+ smu10_data->gfx_max_freq_limit = result * 100;
+
+ return 0;
+}
+
+static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct smu10_hwmgr *data;
+
+ data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ result = smu10_initialize_dpm_defaults(hwmgr);
+ if (result != 0) {
+ pr_err("smu10_initialize_dpm_defaults failed\n");
+ return result;
+ }
+
+ smu10_populate_clock_table(hwmgr);
+
+ result = smu10_get_system_info_data(hwmgr);
+ if (result != 0) {
+ pr_err("smu10_get_system_info_data failed\n");
+ return result;
+ }
+
+ smu10_construct_boot_state(hwmgr);
+
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ SMU10_MAX_HARDWARE_POWERLEVELS;
+
+ hwmgr->platform_descriptor.hardwarePerformanceLevels =
+ SMU10_MAX_HARDWARE_POWERLEVELS;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0;
+
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+
+ return result;
+}
+
+static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+
+ kfree(pinfo->vdd_dep_on_dcefclk);
+ pinfo->vdd_dep_on_dcefclk = NULL;
+ kfree(pinfo->vdd_dep_on_socclk);
+ pinfo->vdd_dep_on_socclk = NULL;
+ kfree(pinfo->vdd_dep_on_fclk);
+ pinfo->vdd_dep_on_fclk = NULL;
+ kfree(pinfo->vdd_dep_on_dispclk);
+ pinfo->vdd_dep_on_dispclk = NULL;
+ kfree(pinfo->vdd_dep_on_dppclk);
+ pinfo->vdd_dep_on_dppclk = NULL;
+ kfree(pinfo->vdd_dep_on_phyclk);
+ pinfo->vdd_dep_on_phyclk = NULL;
+
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
+
+ return 0;
+}
+
+static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ if (hwmgr->smu_version < 0x1E3700) {
+ pr_info("smu firmware version too old, can not set dpm level\n");
+ return 0;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ SMU10_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ SMU10_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ SMU10_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ SMU10_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ SMU10_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ SMU10_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ SMU10_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ SMU10_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ SMU10_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ SMU10_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ SMU10_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ SMU10_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ SMU10_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ SMU10_UMD_PSTATE_MIN_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ SMU10_UMD_PSTATE_MIN_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ SMU10_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ SMU10_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ SMU10_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ SMU10_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct smu10_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ else
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+}
+
+static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct smu10_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->gfx_min_freq_limit;
+ else
+ return data->gfx_max_freq_limit;
+}
+
+static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps)
+{
+ return 0;
+}
+
+static int smu10_dpm_get_pp_table_entry_callback(
+ struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps,
+ unsigned int index,
+ const void *clock_info)
+{
+ struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
+
+ smu10_ps->levels[index].engine_clock = 0;
+
+ smu10_ps->levels[index].vddc_index = 0;
+ smu10_ps->level = index + 1;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ smu10_ps->levels[index].ds_divider_index = 5;
+ smu10_ps->levels[index].ss_divider_index = 5;
+ }
+
+ return 0;
+}
+
+static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned long ret = 0;
+
+ result = pp_tables_get_num_of_entries(hwmgr, &ret);
+
+ return result ? 0 : ret;
+}
+
+static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+ unsigned long entry, struct pp_power_state *ps)
+{
+ int result;
+ struct smu10_power_state *smu10_ps;
+
+ ps->hardware.magic = SMU10_Magic;
+
+ smu10_ps = cast_smu10_ps(&(ps->hardware));
+
+ result = pp_tables_get_entry(hwmgr, entry, ps,
+ smu10_dpm_get_pp_table_entry_callback);
+
+ smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+ smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+
+ return result;
+}
+
+static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+ return sizeof(struct smu10_power_state);
+}
+
+static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+
+static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+ bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
+{
+ return 0;
+}
+
+static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
+ struct amd_pp_simple_clock_info *info)
+{
+ return -EINVAL;
+}
+
+static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ return 0;
+}
+
+static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_voltage_dependency_table *mclk_table =
+ data->clock_vol_info.vdd_dep_on_fclk;
+ int i, now, size = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ now = smum_get_argument(hwmgr);
+
+ size += sprintf(buf + size, "0: %uMhz %s\n",
+ data->gfx_min_freq_limit / 100,
+ ((data->gfx_min_freq_limit / 100)
+ == now) ? "*" : "");
+ size += sprintf(buf + size, "1: %uMhz %s\n",
+ data->gfx_max_freq_limit / 100,
+ ((data->gfx_max_freq_limit / 100)
+ == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ now = smum_get_argument(hwmgr);
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i,
+ mclk_table->entries[i].clk / 100,
+ ((mclk_table->entries[i].clk / 100)
+ == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level)
+{
+ struct smu10_hwmgr *data;
+
+ if (level == NULL || hwmgr == NULL || state == NULL)
+ return -EINVAL;
+
+ data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (index == 0) {
+ level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ level->coreClock = data->gfx_min_freq_limit;
+ } else {
+ level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+ level->coreClock = data->gfx_max_freq_limit;
+ }
+
+ level->nonLocalMemoryFreq = 0;
+ level->nonLocalMemoryWidth = 0;
+
+ return 0;
+}
+
+static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+ const struct smu10_power_state *ps = cast_const_smu10_ps(state);
+
+ clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
+ clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
+
+ return 0;
+}
+
+#define MEM_FREQ_LOW_LATENCY 25000
+#define MEM_FREQ_HIGH_LATENCY 80000
+#define MEM_LATENCY_HIGH 245
+#define MEM_LATENCY_LOW 35
+#define MEM_LATENCY_ERR 0xFFFF
+
+
+static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
+ uint32_t clock)
+{
+ if (clock >= MEM_FREQ_LOW_LATENCY &&
+ clock < MEM_FREQ_HIGH_LATENCY)
+ return MEM_LATENCY_HIGH;
+ else if (clock >= MEM_FREQ_HIGH_LATENCY)
+ return MEM_LATENCY_LOW;
+ else
+ return MEM_LATENCY_ERR;
+}
+
+static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ uint32_t i;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+ struct smu10_voltage_dependency_table *pclk_vol_table;
+ bool latency_required = false;
+
+ if (pinfo == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case amd_pp_mem_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_mclk;
+ latency_required = true;
+ break;
+ case amd_pp_f_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_fclk;
+ latency_required = true;
+ break;
+ case amd_pp_dcf_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
+ break;
+ case amd_pp_disp_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_dispclk;
+ break;
+ case amd_pp_phy_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_phyclk;
+ break;
+ case amd_pp_dpp_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+ default:
+ return -EINVAL;
+ }
+
+ if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
+ return -EINVAL;
+
+ clocks->num_levels = 0;
+ for (i = 0; i < pclk_vol_table->count; i++) {
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].latency_in_us = latency_required ?
+ smu10_get_mem_latency(hwmgr,
+ pclk_vol_table->entries[i].clk) :
+ 0;
+ clocks->num_levels++;
+ }
+
+ return 0;
+}
+
+static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ uint32_t i;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
+ struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
+
+ if (pinfo == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case amd_pp_mem_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_mclk;
+ break;
+ case amd_pp_f_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_fclk;
+ break;
+ case amd_pp_dcf_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
+ break;
+ case amd_pp_soc_clock:
+ pclk_vol_table = pinfo->vdd_dep_on_socclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
+ return -EINVAL;
+
+ clocks->num_levels = 0;
+ for (i = 0; i < pclk_vol_table->count; i++) {
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
+ clocks->num_levels++;
+ }
+
+ return 0;
+}
+
+static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ struct pp_display_clock_request *clock_req)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ enum amd_pp_clock_type clk_type = clock_req->clock_type;
+ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+ PPSMC_Msg msg;
+
+ switch (clk_type) {
+ case amd_pp_dcf_clock:
+ if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
+ return 0;
+ msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
+ smu10_data->dcf_actual_hard_min_freq = clk_freq;
+ break;
+ case amd_pp_soc_clock:
+ msg = PPSMC_MSG_SetHardMinSocclkByFreq;
+ break;
+ case amd_pp_f_clock:
+ if (clk_freq == smu10_data->f_actual_hard_min_freq)
+ return 0;
+ smu10_data->f_actual_hard_min_freq = clk_freq;
+ msg = PPSMC_MSG_SetHardMinFclkByFreq;
+ break;
+ default:
+ pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
+ return -EINVAL;
+ }
+
+ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+
+ return 0;
+}
+
+static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+ clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
+ return 0;
+}
+
+static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
+ mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
+ uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
+ int cur_temp =
+ (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
+
+ if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
+ cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return cur_temp;
+}
+
+static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ void *value, int *size)
+{
+ uint32_t sclk, mclk;
+ int ret = 0;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ sclk = smum_get_argument(hwmgr);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = sclk * 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ mclk = smum_get_argument(hwmgr);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = mclk * 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+}
+
+static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+ .backend_init = smu10_hwmgr_backend_init,
+ .backend_fini = smu10_hwmgr_backend_fini,
+ .asic_setup = NULL,
+ .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
+ .force_dpm_level = smu10_dpm_force_dpm_level,
+ .get_power_state_size = smu10_get_power_state_size,
+ .powerdown_uvd = NULL,
+ .powergate_uvd = NULL,
+ .powergate_vce = NULL,
+ .get_mclk = smu10_dpm_get_mclk,
+ .get_sclk = smu10_dpm_get_sclk,
+ .patch_boot_state = smu10_dpm_patch_boot_state,
+ .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
+ .set_cpu_power_state = smu10_set_cpu_power_state,
+ .store_cc6_data = smu10_store_cc6_data,
+ .force_clock_level = smu10_force_clock_level,
+ .print_clock_levels = smu10_print_clock_levels,
+ .get_dal_power_level = smu10_get_dal_power_level,
+ .get_performance_level = smu10_get_performance_level,
+ .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
+ .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
+ .get_max_high_clocks = smu10_get_max_high_clocks,
+ .read_sensor = smu10_read_sensor,
+ .set_active_display_count = smu10_set_active_display_count,
+ .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .dynamic_state_management_enable = smu10_enable_dpm_tasks,
+ .power_off_asic = smu10_power_off_asic,
+ .asic_setup = smu10_setup_asic_task,
+ .power_state_set = smu10_set_power_state_tasks,
+ .dynamic_state_management_disable = smu10_disable_dpm_tasks,
+ .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+};
+
+int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
+{
+ hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
+ hwmgr->pptable_func = &pptable_funcs;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 9dc5030..175c3a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -21,17 +21,17 @@
*
*/
-#ifndef RAVEN_HWMGR_H
-#define RAVEN_HWMGR_H
+#ifndef SMU10_HWMGR_H
+#define SMU10_HWMGR_H
#include "hwmgr.h"
-#include "rv_inc.h"
+#include "smu10_inc.h"
#include "smu10_driver_if.h"
#include "rv_ppsmc.h"
-#define RAVEN_MAX_HARDWARE_POWERLEVELS 8
-#define PHMRAVEN_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
+#define SMU10_MAX_HARDWARE_POWERLEVELS 8
+#define SMU10_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
#define DPMFlags_SCLK_Enabled 0x00000001
#define DPMFlags_UVD_Enabled 0x00000002
@@ -47,10 +47,10 @@
#define SMU_PHYID_SHIFT 8
-#define RAVEN_PCIE_POWERGATING_TARGET_GFX 0
-#define RAVEN_PCIE_POWERGATING_TARGET_DDI 1
-#define RAVEN_PCIE_POWERGATING_TARGET_PLLCASCADE 2
-#define RAVEN_PCIE_POWERGATING_TARGET_PHY 3
+#define SMU10_PCIE_POWERGATING_TARGET_GFX 0
+#define SMU10_PCIE_POWERGATING_TARGET_DDI 1
+#define SMU10_PCIE_POWERGATING_TARGET_PLLCASCADE 2
+#define SMU10_PCIE_POWERGATING_TARGET_PHY 3
enum VQ_TYPE {
CLOCK_TYPE_DCLK = 0L,
@@ -65,14 +65,14 @@ enum VQ_TYPE {
#define SUSTAINABLE_CU_MASK 0xff000000
#define SUSTAINABLE_CU_SHIFT 24
-struct rv_dpm_entry {
+struct smu10_dpm_entry {
uint32_t soft_min_clk;
uint32_t hard_min_clk;
uint32_t soft_max_clk;
uint32_t hard_max_clk;
};
-struct rv_power_level {
+struct smu10_power_level {
uint32_t engine_clock;
uint8_t vddc_index;
uint8_t ds_divider_index;
@@ -86,14 +86,14 @@ struct rv_power_level {
uint8_t rsv[3];
};
-/*used for the nbpsFlags field in rv_power state*/
-#define RAVEN_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
-#define RAVEN_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
+/*used for the nbpsFlags field in smu10_power state*/
+#define SMU10_POWERSTATE_FLAGS_NBPS_FORCEHIGH (1<<0)
+#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOHIGH (1<<1)
+#define SMU10_POWERSTATE_FLAGS_NBPS_LOCKTOLOW (1<<2)
-#define RAVEN_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
+#define SMU10_POWERSTATE_FLAGS_BAPM_DISABLE (1<<0)
-struct rv_uvd_clocks {
+struct smu10_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
uint32_t vclk_low_divider;
@@ -118,16 +118,16 @@ struct pp_disable_nbpslo_flags {
};
-enum rv_pstate_previous_action {
+enum smu10_pstate_previous_action {
DO_NOTHING = 1,
FORCE_HIGH,
CANCEL_FORCE_HIGH
};
-struct rv_power_state {
+struct smu10_power_state {
unsigned int magic;
uint32_t level;
- struct rv_uvd_clocks uvd_clocks;
+ struct smu10_uvd_clocks uvd_clocks;
uint32_t evclk;
uint32_t ecclk;
uint32_t samclk;
@@ -141,79 +141,79 @@ struct rv_power_state {
uint8_t dpm_x_nbps_low;
uint8_t dpm_x_nbps_high;
- enum rv_pstate_previous_action action;
+ enum smu10_pstate_previous_action action;
- struct rv_power_level levels[RAVEN_MAX_HARDWARE_POWERLEVELS];
+ struct smu10_power_level levels[SMU10_MAX_HARDWARE_POWERLEVELS];
struct pp_disable_nbpslo_flags nbpslo_flags;
};
-#define RAVEN_NUM_NBPSTATES 4
-#define RAVEN_NUM_NBPMEMORYCLOCK 2
+#define SMU10_NUM_NBPSTATES 4
+#define SMU10_NUM_NBPMEMORYCLOCK 2
-struct rv_display_phy_info_entry {
+struct smu10_display_phy_info_entry {
uint8_t phy_present;
uint8_t active_lane_mapping;
uint8_t display_config_type;
uint8_t active_num_of_lanes;
};
-#define RAVEN_MAX_DISPLAYPHY_IDS 10
+#define SMU10_MAX_DISPLAYPHY_IDS 10
-struct rv_display_phy_info {
+struct smu10_display_phy_info {
bool display_phy_access_initialized;
- struct rv_display_phy_info_entry entries[RAVEN_MAX_DISPLAYPHY_IDS];
+ struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS];
};
#define MAX_DISPLAY_CLOCK_LEVEL 8
-struct rv_system_info{
+struct smu10_system_info{
uint8_t htc_tmp_lmt;
uint8_t htc_hyst_lmt;
};
#define MAX_REGULAR_DPM_NUMBER 8
-struct rv_mclk_latency_entries {
+struct smu10_mclk_latency_entries {
uint32_t frequency;
uint32_t latency;
};
-struct rv_mclk_latency_table {
+struct smu10_mclk_latency_table {
uint32_t count;
- struct rv_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
+ struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
};
-struct rv_clock_voltage_dependency_record {
+struct smu10_clock_voltage_dependency_record {
uint32_t clk;
uint32_t vol;
};
-struct rv_voltage_dependency_table {
+struct smu10_voltage_dependency_table {
uint32_t count;
- struct rv_clock_voltage_dependency_record entries[1];
+ struct smu10_clock_voltage_dependency_record entries[1];
};
-struct rv_clock_voltage_information {
- struct rv_voltage_dependency_table *vdd_dep_on_dcefclk;
- struct rv_voltage_dependency_table *vdd_dep_on_socclk;
- struct rv_voltage_dependency_table *vdd_dep_on_fclk;
- struct rv_voltage_dependency_table *vdd_dep_on_mclk;
- struct rv_voltage_dependency_table *vdd_dep_on_dispclk;
- struct rv_voltage_dependency_table *vdd_dep_on_dppclk;
- struct rv_voltage_dependency_table *vdd_dep_on_phyclk;
+struct smu10_clock_voltage_information {
+ struct smu10_voltage_dependency_table *vdd_dep_on_dcefclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_socclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_fclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_mclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_dispclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_dppclk;
+ struct smu10_voltage_dependency_table *vdd_dep_on_phyclk;
};
-struct rv_hwmgr {
+struct smu10_hwmgr {
uint32_t disable_driver_thermal_policy;
uint32_t thermal_auto_throttling_treshold;
- struct rv_system_info sys_info;
- struct rv_mclk_latency_table mclk_latency_table;
+ struct smu10_system_info sys_info;
+ struct smu10_mclk_latency_table mclk_latency_table;
uint32_t ddi_power_gating_disabled;
- struct rv_display_phy_info_entry display_phy_info;
+ struct smu10_display_phy_info_entry display_phy_info;
uint32_t dce_slow_sclk_threshold;
bool disp_clk_bypass;
@@ -255,10 +255,10 @@ struct rv_hwmgr {
uint32_t fps_low_threshold;
uint32_t dpm_flags;
- struct rv_dpm_entry sclk_dpm;
- struct rv_dpm_entry uvd_dpm;
- struct rv_dpm_entry vce_dpm;
- struct rv_dpm_entry acp_dpm;
+ struct smu10_dpm_entry sclk_dpm;
+ struct smu10_dpm_entry uvd_dpm;
+ struct smu10_dpm_entry vce_dpm;
+ struct smu10_dpm_entry acp_dpm;
bool acp_power_up_no_dsp;
uint32_t max_sclk_level;
@@ -291,7 +291,7 @@ struct rv_hwmgr {
bool gfx_off_controled_by_driver;
Watermarks_t water_marks_table;
- struct rv_clock_voltage_information clock_vol_info;
+ struct smu10_clock_voltage_information clock_vol_info;
DpmClocks_t clock_table;
uint32_t active_process_mask;
@@ -302,6 +302,21 @@ struct rv_hwmgr {
struct pp_hwmgr;
-int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
+int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
+
+/* UMD PState SMU10 Msg Parameters in MHz */
+#define SMU10_UMD_PSTATE_GFXCLK 700
+#define SMU10_UMD_PSTATE_SOCCLK 626
+#define SMU10_UMD_PSTATE_FCLK 933
+#define SMU10_UMD_PSTATE_VCE 0x03C00320
+
+#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
+#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
+#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
+
+#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
+#define SMU10_UMD_PSTATE_MIN_FCLK 400
+#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
+#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
index 9a01493..edb68e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
@@ -21,21 +21,21 @@
*
*/
-#ifndef RAVEN_INC_H
-#define RAVEN_INC_H
+#ifndef SMU10_INC_H
+#define SMU10_INC_H
-#include "asic_reg/raven1/MP/mp_10_0_default.h"
-#include "asic_reg/raven1/MP/mp_10_0_offset.h"
-#include "asic_reg/raven1/MP/mp_10_0_sh_mask.h"
+#include "asic_reg/mp/mp_10_0_default.h"
+#include "asic_reg/mp/mp_10_0_offset.h"
+#include "asic_reg/mp/mp_10_0_sh_mask.h"
-#include "asic_reg/raven1/NBIO/nbio_7_0_default.h"
-#include "asic_reg/raven1/NBIO/nbio_7_0_offset.h"
-#include "asic_reg/raven1/NBIO/nbio_7_0_sh_mask.h"
+#include "asic_reg/nbio/nbio_7_0_default.h"
+#include "asic_reg/nbio/nbio_7_0_offset.h"
+#include "asic_reg/nbio/nbio_7_0_sh_mask.h"
-#include "asic_reg/raven1/THM/thm_10_0_default.h"
-#include "asic_reg/raven1/THM/thm_10_0_offset.h"
-#include "asic_reg/raven1/THM/thm_10_0_sh_mask.h"
+#include "asic_reg/thm/thm_10_0_default.h"
+#include "asic_reg/thm/thm_10_0_offset.h"
+#include "asic_reg/thm/thm_10_0_sh_mask.h"
#define ixDDI_PHY_GEN_STATUS 0x3FCE8
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 69a0678..f4cbaee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -162,7 +162,7 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
+ AMD_PG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false);
}
@@ -472,23 +472,12 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
*/
int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{
- struct cgs_system_info sys_info = {0};
- uint32_t active_cus;
- int result;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
-
- result = cgs_query_system_info(hwmgr->device, &sys_info);
-
- if (result)
- return -EINVAL;
-
- active_cus = sys_info.value;
+ struct amdgpu_device *adev = hwmgr->adev;
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
+ PPSMC_MSG_GFX_CU_PG_ENABLE,
+ adev->gfx.cu_info.number);
else
return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GFX_CU_PG_DISABLE);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 7b54d48..1ddce02 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -25,7 +25,6 @@
#define _SMU7_CLOCK_POWER_GATING_H_
#include "smu7_hwmgr.h"
-#include "pp_asicblocks.h"
void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
index f967613..3477d4d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -50,6 +50,6 @@
#define SMU7_CGULVCONTROL_DFLT 0x00007450
#define SMU7_TARGETACTIVITY_DFLT 50
#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
-
+#define SMU7_SCLK_TARGETACTIVITY_DFLT 30
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index e33ec7f..2b0c366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <asm/div64.h>
#include <drm/amdgpu_drm.h>
-#include "pp_acpi.h"
#include "ppatomctrl.h"
#include "atombios.h"
#include "pptable_v1_0.h"
@@ -41,13 +40,13 @@
#include "hwmgr.h"
#include "smu7_hwmgr.h"
-#include "smu7_smumgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu7_powertune.h"
#include "smu7_dyn_defaults.h"
#include "smu7_thermal.h"
#include "smu7_clockpowergating.h"
#include "processpptables.h"
+#include "pp_thermal.h"
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -80,6 +79,13 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
+static const struct profile_mode_setting smu7_profiling[5] =
+ {{1, 0, 100, 30, 1, 0, 100, 10},
+ {1, 10, 0, 30, 0, 0, 0, 0},
+ {0, 0, 0, 0, 1, 10, 16, 31},
+ {1, 0, 11, 50, 1, 0, 100, 10},
+ {1, 0, 5, 30, 0, 0, 0, 0},
+ };
/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
enum DPM_EVENT_SRC {
@@ -90,7 +96,6 @@ enum DPM_EVENT_SRC {
DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
};
-static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable);
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
@@ -792,6 +797,77 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
+static int smu7_get_voltage_dependency_table(
+ const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint8_t i = 0;
+ PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
+ "Voltage Lookup Table empty",
+ return -EINVAL);
+
+ dep_table->count = allowed_dep_table->count;
+ for (i=0; i<dep_table->count; i++) {
+ dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
+ dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
+ dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
+ dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
+ dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
+ dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
+ dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
+ dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
+ dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
+ dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
+ }
+
+ return 0;
+}
+
+static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+ struct phm_odn_performance_level *entries;
+
+ if (table_info == NULL)
+ return -EINVAL;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+ odn_table->odn_core_clock_dpm_levels.num_of_pl =
+ data->golden_dpm_table.sclk_table.count;
+ entries = odn_table->odn_core_clock_dpm_levels.entries;
+ for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
+ entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
+ entries[i].enabled = true;
+ entries[i].vddc = dep_sclk_table->entries[i].vddc;
+ }
+
+ smu7_get_voltage_dependency_table(dep_sclk_table,
+ (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
+
+ odn_table->odn_memory_clock_dpm_levels.num_of_pl =
+ data->golden_dpm_table.mclk_table.count;
+ entries = odn_table->odn_memory_clock_dpm_levels.entries;
+ for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
+ entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
+ entries[i].enabled = true;
+ entries[i].vddc = dep_mclk_table->entries[i].vddc;
+ }
+
+ smu7_get_voltage_dependency_table(dep_mclk_table,
+ (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
+
+ return 0;
+}
+
static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -808,31 +884,12 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct smu7_dpm_table));
- return 0;
-}
-uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
-{
- uint32_t reference_clock, tmp;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info = {0};
-
- info.mode_info = &mode_info;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
- if (tmp)
- return TCLK;
+ /* initialize ODN table */
+ if (hwmgr->od_enabled)
+ smu7_odn_initial_default_setting(hwmgr);
- cgs_get_active_displays_info(hwmgr->device, &info);
- reference_clock = mode_info.ref_clock;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
- if (0 != tmp)
- return reference_clock / 4;
-
- return reference_clock;
+ return 0;
}
static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
@@ -1164,11 +1221,6 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int tmp_result = 0;
int result = 0;
- tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is already running",
- );
-
if (smu7_voltage_control(hwmgr)) {
tmp_result = smu7_enable_voltage_control(hwmgr);
PP_ASSERT_WITH_CODE(tmp_result == 0,
@@ -1275,15 +1327,53 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return 0;
}
+static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+ if (!hwmgr->avfs_supported)
+ return 0;
+
+ if (enable) {
+ if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
+ hwmgr, PPSMC_MSG_EnableAvfs),
+ "Failed to enable AVFS!",
+ return -EINVAL);
+ }
+ } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
+ hwmgr, PPSMC_MSG_DisableAvfs),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (!hwmgr->avfs_supported)
+ return 0;
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ smu7_avfs_control(hwmgr, false);
+ } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ smu7_avfs_control(hwmgr, false);
+ smu7_avfs_control(hwmgr, true);
+ } else {
+ smu7_avfs_control(hwmgr, true);
+ }
+
+ return 0;
+}
+
int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
- tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController))
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -1352,12 +1442,10 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct cgs_system_info sys_info = {0};
- int result;
+ struct amdgpu_device *adev = hwmgr->adev;
data->dll_default_on = false;
data->mclk_dpm0_activity_target = 0xa;
- data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
data->vddc_vddgfx_delta = 300;
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
@@ -1381,6 +1469,17 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->enable_pkg_pwr_tracking_feature = true;
data->force_pcie_gen = PP_PCIEGenInvalid;
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+ data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.sclk_up_hyst = 0;
+ data->current_profile_setting.sclk_down_hyst = 100;
+ data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
+ data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.mclk_up_hyst = 0;
+ data->current_profile_setting.mclk_down_hyst = 100;
+ data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
uint8_t tmp1, tmp2;
@@ -1467,17 +1566,13 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->pcie_lane_power_saving.max = 0;
data->pcie_lane_power_saving.min = 16;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_UVD)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- if (sys_info.value & AMD_PG_SUPPORT_VCE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
}
/**
@@ -1912,7 +2007,7 @@ static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *lookup_table;
uint32_t i;
uint32_t hw_revision, sub_vendor_id, sub_sys_id;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
if (table_info != NULL) {
dep_mclk_table = table_info->vdd_dep_on_mclk;
@@ -1920,19 +2015,9 @@ static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
} else
return 0;
- sys_info.size = sizeof(struct cgs_system_info);
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- hw_revision = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
- cgs_query_system_info(hwmgr->device, &sys_info);
- sub_sys_id = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
- cgs_query_system_info(hwmgr->device, &sys_info);
- sub_vendor_id = (uint32_t)sys_info.value;
+ hw_revision = adev->pdev->revision;
+ sub_sys_id = adev->pdev->subsystem_device;
+ sub_vendor_id = adev->pdev->subsystem_vendor;
if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
@@ -2266,14 +2351,18 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
- "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+ "VDDC dependency on SCLK table is missing. This table is mandatory",
+ return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
- "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+ "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
+ return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
- "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+ "VDDC dependency on MCLK table is missing. This table is mandatory",
+ return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+ "VDD dependency on MCLK table has to have is missing. This table is mandatory",
+ return -EINVAL);
data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
@@ -2371,7 +2460,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
if (0 == result) {
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
data->is_tlu_enabled = false;
@@ -2380,22 +2469,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
+ data->pcie_gen_cap = adev->pm.pcie_gen_mask;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
+ data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
@@ -2574,8 +2651,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
+ tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
+ }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -2590,8 +2669,10 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
+ tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+ }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -2603,6 +2684,9 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
+ hwmgr->pstate_sclk = tmp_sclk;
+ hwmgr->pstate_mclk = tmp_mclk;
+
return 0;
}
@@ -2614,6 +2698,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
+ if (hwmgr->pstate_sclk == 0)
+ smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -2722,9 +2809,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
@@ -2754,47 +2838,17 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- smu7_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- smu7_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching = ((1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock ||
- smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
- (mode_info.refresh_rate > 120));
+ if (info.display_count == 0)
+ disable_mclk_switching = false;
+ else
+ disable_mclk_switching = ((1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock ||
+ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -3347,7 +3401,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
uint32_t sclk, mclk, activity_percent;
- uint32_t offset;
+ uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
/* size must be at least 4 bytes for all sensors */
@@ -3395,6 +3449,16 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return -EINVAL;
*size = sizeof(struct pp_gpu_power);
return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ if ((data->vr_config & 0xff) == 0x2)
+ val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
+ else
+ val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
+
+ *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
+ return 0;
default:
return -EINVAL;
}
@@ -3417,8 +3481,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
uint32_t i;
struct cgs_display_info info = {0};
- data->need_update_smu7_dpm_table = 0;
-
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@@ -3501,15 +3563,17 @@ static int smu7_request_link_speed_change_before_state_change(
if (target_link_speed > current_link_speed) {
switch (target_link_speed) {
+#ifdef CONFIG_ACPI
case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+ if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
break;
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+ if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
+#endif
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
@@ -3560,108 +3624,27 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
struct pp_hwmgr *hwmgr, const void *input)
{
int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct smu7_power_state *smu7_ps =
- cast_const_phw_smu7_power_state(states->pnew_state);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t sclk = smu7_ps->performance_levels
- [smu7_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = smu7_ps->performance_levels
- [smu7_ps->performance_level_count - 1].memory_clock;
struct smu7_dpm_table *dpm_table = &data->dpm_table;
-
- struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
+ uint32_t count;
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
+ struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
if (0 == data->need_update_smu7_dpm_table)
return 0;
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -EINVAL);
- dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
-
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk
- - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
- ) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
- - sclk) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
+ if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ for (count = 0; count < dpm_table->sclk_table.count; count++) {
+ dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
+ dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
}
}
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -EINVAL);
- dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = (
- (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
- * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
+ if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ for (count = 0; count < dpm_table->mclk_table.count; count++) {
+ dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
+ dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
}
}
@@ -3783,7 +3766,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
return -EINVAL);
}
- data->need_update_smu7_dpm_table = 0;
+ data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
return 0;
}
@@ -3811,12 +3794,14 @@ static int smu7_notify_link_speed_change_after_state_change(
smu7_get_current_pcie_speed(hwmgr) > 0)
return 0;
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+#ifdef CONFIG_ACPI
+ if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
if (PP_PCIEGen2 == target_link_speed)
pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
else
pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
}
+#endif
}
return 0;
@@ -3860,6 +3845,11 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ tmp_result = smu7_update_avfs(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update avfs voltages!",
+ result = tmp_result);
+
tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to generate DPM level enabled mask!",
@@ -3957,7 +3947,8 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
- ref_clock = mode_info.ref_clock;
+ ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+
refresh_rate = mode_info.refresh_rate;
if (0 == refresh_rate)
@@ -4008,9 +3999,35 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
-static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
+static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
+ .process = phm_irq_process,
+};
+
+static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_irq_src *source =
+ kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+
+ if (!source)
+ return -ENOMEM;
+
+ source->funcs = &smu7_irq_funcs;
+
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+ 230,
+ source);
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+ 231,
+ source);
+
+ /* Register CTF(GPIO_19) interrupt */
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ AMDGPU_IH_CLIENTID_LEGACY,
+ 83,
+ source);
+
return 0;
}
@@ -4051,6 +4068,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
const struct smu7_power_state *psa;
const struct smu7_power_state *psb;
int i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
@@ -4075,6 +4093,10 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
*equal &= (psa->sclk_threshold == psb->sclk_threshold);
+ /* For OD call, set value based on flag */
+ *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
+ DPMTABLE_OD_UPDATE_MCLK |
+ DPMTABLE_OD_UPDATE_VDDC));
return 0;
}
@@ -4246,9 +4268,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
+ if (mask == 0)
return -EINVAL;
switch (type) {
@@ -4267,15 +4287,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_PCIE:
{
uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr,
+ if (!data->pcie_dpm_key_disabled) {
+ if (fls(tmp) != ffs(tmp))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ else
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
+ fls(tmp) - 1);
+ }
break;
}
default:
@@ -4292,6 +4312,9 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
+ struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
int i, now, size = 0;
uint32_t clock, pcie_speed;
@@ -4339,11 +4362,29 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
break;
+ case OD_SCLK:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s: \n", "OD_SCLK");
+ for (i = 0; i < odn_sclk_table->num_of_pl; i++)
+ size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
+ i, odn_sclk_table->entries[i].clock / 100,
+ odn_sclk_table->entries[i].vddc);
+ }
+ break;
+ case OD_MCLK:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s: \n", "OD_MCLK");
+ for (i = 0; i < odn_mclk_table->num_of_pl; i++)
+ size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
+ i, odn_mclk_table->entries[i].clock / 100,
+ odn_mclk_table->entries[i].vddc);
+ }
+ break;
default:
break;
}
@@ -4541,148 +4582,401 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
return 0;
}
-static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr,
- uint32_t *sclk_mask, uint32_t *mclk_mask,
- uint32_t min_sclk, uint32_t min_mclk)
+static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &(data->dpm_table);
- uint32_t i;
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].enabled &&
- dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
- *sclk_mask |= 1 << i;
- }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_H),
+ mc_addr_hi);
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- if (dpm_table->mclk_table.dpm_levels[i].enabled &&
- dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
- *mclk_mask |= 1 << i;
- }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_L),
+ mc_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
+ virtual_addr_hi);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
+ virtual_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
+ size);
+ return 0;
}
-static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
+static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
+ struct amd_pp_simple_clock_info *clocks)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- int tmp_result, result = 0;
- uint32_t sclk_mask = 0, mclk_mask = 0;
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+
+ if (clocks == NULL)
+ return -EINVAL;
+
+ clocks->memory_max_clock = mclk_table->count > 1 ?
+ mclk_table->dpm_levels[mclk_table->count-1].value :
+ mclk_table->dpm_levels[0].value;
+ clocks->engine_max_clock = sclk_table->count > 1 ?
+ sclk_table->dpm_levels[sclk_table->count-1].value :
+ sclk_table->dpm_levels[0].value;
+ return 0;
+}
+
+static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *thermal_data)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+
+ memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ uint32_t clk,
+ uint32_t voltage)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t min_vddc;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+
+ if (table_info == NULL)
+ return false;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ min_vddc = dep_sclk_table->entries[0].vddc;
- if (hwmgr->chip_id == CHIP_FIJI) {
- if (request->type == AMD_PP_GFX_PROFILE)
- smu7_enable_power_containment(hwmgr);
- else if (request->type == AMD_PP_COMPUTE_PROFILE)
- smu7_disable_power_containment(hwmgr);
+ if (voltage < min_vddc || voltage > 2000) {
+ pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
+ return false;
}
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
- return -EINVAL;
+ if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+ if (data->vbios_boot_state.sclk_bootup_value > clk ||
+ hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
+ pr_info("OD engine clock is out of range [%d - %d] MHz\n",
+ data->vbios_boot_state.sclk_bootup_value,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock / 100);
+ return false;
+ }
+ } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
+ if (data->vbios_boot_state.mclk_bootup_value > clk ||
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
+ pr_info("OD memory clock is out of range [%d - %d] MHz\n",
+ data->vbios_boot_state.mclk_bootup_value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100);
+ return false;
+ }
+ } else {
+ return false;
+ }
- tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to freeze SCLK MCLK DPM!",
- result = tmp_result);
+ return true;
+}
- tmp_result = smum_populate_requested_graphic_levels(hwmgr, request);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to populate requested graphic levels!",
- result = tmp_result);
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
- tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
- smu7_find_min_clock_masks(hwmgr, &sclk_mask, &mclk_mask,
- request->min_sclk, request->min_mclk);
+ if (table_info == NULL)
+ return;
- if (sclk_mask) {
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.
- sclk_dpm_enable_mask &
- sclk_mask);
+ for (i=0; i<data->dpm_table.sclk_table.count; i++) {
+ if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.sclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ break;
+ }
}
- if (mclk_mask) {
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.
- mclk_dpm_enable_mask &
- mclk_mask);
+ for (i=0; i<data->dpm_table.mclk_table.count; i++) {
+ if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+ data->dpm_table.mclk_table.dpm_levels[i].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ break;
+ }
}
- return result;
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+ for (i=0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+ break;
+ }
+ }
+ if (i == dep_table->count)
+ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+ for (i=0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+ break;
+ }
+ }
+ if (i == dep_table->count)
+ data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
}
-static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
+static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+ struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
+ struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t input_clk;
+ uint32_t input_vol;
+ uint32_t input_level;
- if (smu_data == NULL)
+ PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
+ return -EINVAL);
+
+ if (!hwmgr->od_enabled) {
+ pr_info("OverDrive feature not enabled\n");
return -EINVAL;
+ }
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
+ podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
+ podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
+ PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
+ "Failed to get ODN SCLK and Voltage tables",
+ return -EINVAL);
+ } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
+ podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
+ podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
+
+ PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
+ "Failed to get ODN MCLK and Voltage tables",
+ return -EINVAL);
+ } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
+ smu7_odn_initial_default_setting(hwmgr);
return 0;
+ } else if (PP_OD_COMMIT_DPM_TABLE == type) {
+ smu7_check_dpm_table_updated(hwmgr);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
- if (enable) {
- if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_EnableAvfs),
- "Failed to enable AVFS!",
- return -EINVAL);
- } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_DisableAvfs),
- "Failed to disable AVFS!",
- return -EINVAL);
+ for (i = 0; i < size; i += 3) {
+ if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
+ pr_info("invalid clock voltage input \n");
+ return 0;
+ }
+ input_level = input[i];
+ input_clk = input[i+1] * 100;
+ input_vol = input[i+2];
+
+ if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
+ podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
+ podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
+ podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
+ podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
+ } else {
+ return -EINVAL;
+ }
+ }
return 0;
}
-static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
- uint32_t virtual_addr_low,
- uint32_t virtual_addr_hi,
- uint32_t mc_addr_low,
- uint32_t mc_addr_hi,
- uint32_t size)
+static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t i, size = 0;
+ uint32_t len;
+
+ static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+
+ static const char *title[8] = {"NUM",
+ "MODE_NAME",
+ "SCLK_UP_HYST",
+ "SCLK_DOWN_HYST",
+ "SCLK_ACTIVE_LEVEL",
+ "MCLK_UP_HYST",
+ "MCLK_DOWN_HYST",
+ "MCLK_ACTIVE_LEVEL"};
+
+ if (!buf)
+ return -EINVAL;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- smum_get_offsetof(hwmgr,
- SMU_SoftRegisters, DRAM_LOG_ADDR_H),
- mc_addr_hi);
+ size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
+ title[0], title[1], title[2], title[3],
+ title[4], title[5], title[6], title[7]);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- smum_get_offsetof(hwmgr,
- SMU_SoftRegisters, DRAM_LOG_ADDR_L),
- mc_addr_low);
+ len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- smum_get_offsetof(hwmgr,
- SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
- virtual_addr_hi);
+ for (i = 0; i < len; i++) {
+ if (smu7_profiling[i].bupdate_sclk)
+ size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
+ i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
+ smu7_profiling[i].sclk_down_hyst,
+ smu7_profiling[i].sclk_activity);
+ else
+ size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
+ i, profile_name[i], "-", "-", "-");
+
+ if (smu7_profiling[i].bupdate_mclk)
+ size += sprintf(buf + size, "%16d %16d %16d\n",
+ smu7_profiling[i].mclk_up_hyst,
+ smu7_profiling[i].mclk_down_hyst,
+ smu7_profiling[i].mclk_activity);
+ else
+ size += sprintf(buf + size, "%16s %16s %16s\n",
+ "-", "-", "-");
+ }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- smum_get_offsetof(hwmgr,
- SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
- virtual_addr_low);
+ size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
+ i, profile_name[i],
+ data->custom_profile_setting.sclk_up_hyst,
+ data->custom_profile_setting.sclk_down_hyst,
+ data->custom_profile_setting.sclk_activity,
+ data->custom_profile_setting.mclk_up_hyst,
+ data->custom_profile_setting.mclk_down_hyst,
+ data->custom_profile_setting.mclk_activity);
+
+ size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
+ "*", "CURRENT",
+ data->current_profile_setting.sclk_up_hyst,
+ data->current_profile_setting.sclk_down_hyst,
+ data->current_profile_setting.sclk_activity,
+ data->current_profile_setting.mclk_up_hyst,
+ data->current_profile_setting.mclk_down_hyst,
+ data->current_profile_setting.mclk_activity);
+
+ return size;
+}
+
+static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
+ enum PP_SMC_POWER_PROFILE requst)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t tmp, level;
+
+ if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+ if (level > 0)
+ smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
+ }
+ } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
+ smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ }
+}
+
+static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct profile_mode_setting tmp;
+ enum PP_SMC_POWER_PROFILE mode;
+
+ if (input == NULL)
+ return -EINVAL;
+
+ mode = input[size];
+ switch (mode) {
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ if (size < 8)
+ return -EINVAL;
+
+ data->custom_profile_setting.bupdate_sclk = input[0];
+ data->custom_profile_setting.sclk_up_hyst = input[1];
+ data->custom_profile_setting.sclk_down_hyst = input[2];
+ data->custom_profile_setting.sclk_activity = input[3];
+ data->custom_profile_setting.bupdate_mclk = input[4];
+ data->custom_profile_setting.mclk_up_hyst = input[5];
+ data->custom_profile_setting.mclk_down_hyst = input[6];
+ data->custom_profile_setting.mclk_activity = input[7];
+ if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
+ memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
+ hwmgr->power_profile_mode = mode;
+ }
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ case PP_SMC_POWER_PROFILE_VR:
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ if (mode == hwmgr->power_profile_mode)
+ return 0;
+
+ memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
+ if (!smum_update_dpm_settings(hwmgr, &tmp)) {
+ if (tmp.bupdate_sclk) {
+ data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
+ data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
+ data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
+ data->current_profile_setting.sclk_activity = tmp.sclk_activity;
+ }
+ if (tmp.bupdate_mclk) {
+ data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
+ data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
+ data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
+ data->current_profile_setting.mclk_activity = tmp.mclk_activity;
+ }
+ smu7_patch_compute_profile_mode(hwmgr, mode);
+ hwmgr->power_profile_mode = mode;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- smum_get_offsetof(hwmgr,
- SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
- size);
return 0;
}
@@ -4709,7 +5003,6 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.display_config_changed = smu7_display_configuration_changed_task,
.set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
- .get_temperature = smu7_thermal_get_temperature,
.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
.get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
@@ -4718,7 +5011,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
.uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
+ .register_irq_handlers = smu7_register_irq_handlers,
.check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
.check_states_equal = smu7_check_states_equal,
.set_fan_control_mode = smu7_set_fan_control_mode,
@@ -4733,11 +5026,16 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
.dynamic_state_management_disable = smu7_disable_dpm_tasks,
- .set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
.start_thermal_controller = smu7_start_thermal_controller,
.notify_cac_buffer_info = smu7_notify_cac_buffer_info,
+ .get_max_high_clocks = smu7_get_max_high_clocks,
+ .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
+ .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
+ .set_power_limit = smu7_set_power_limit,
+ .get_power_profile_mode = smu7_get_power_profile_mode,
+ .set_power_profile_mode = smu7_set_power_profile_mode,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
@@ -4769,4 +5067,3 @@ int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
return ret;
}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index e021154..f40179c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -34,11 +34,6 @@
#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
#define SMU7_VOLTAGE_CONTROL_MERGED 0x3
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
enum gpu_pt_config_reg_type {
GPU_CONFIGREG_MMR = 0,
GPU_CONFIGREG_SMC_IND,
@@ -178,9 +173,34 @@ struct smu7_pcie_perf_range {
uint16_t min;
};
+struct smu7_odn_clock_voltage_dependency_table {
+ uint32_t count;
+ phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct smu7_odn_dpm_table {
+ struct phm_odn_clock_levels odn_core_clock_dpm_levels;
+ struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
+ struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
+ struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
+ uint32_t odn_mclk_min_limit;
+};
+
+struct profile_mode_setting {
+ uint8_t bupdate_sclk;
+ uint8_t sclk_up_hyst;
+ uint8_t sclk_down_hyst;
+ uint16_t sclk_activity;
+ uint8_t bupdate_mclk;
+ uint8_t mclk_up_hyst;
+ uint8_t mclk_down_hyst;
+ uint16_t mclk_activity;
+};
+
struct smu7_hwmgr {
struct smu7_dpm_table dpm_table;
struct smu7_dpm_table golden_dpm_table;
+ struct smu7_odn_dpm_table odn_dpm_table;
uint32_t voting_rights_clients[8];
uint32_t static_screen_threshold_unit;
@@ -280,7 +300,6 @@ struct smu7_hwmgr {
struct smu7_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
- uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold;
uint32_t last_mclk_dpm_enable_mask;
@@ -305,6 +324,9 @@ struct smu7_hwmgr {
uint32_t frame_time_x2;
uint16_t mem_latency_high;
uint16_t mem_latency_low;
+ uint32_t vr_config;
+ struct profile_mode_setting custom_profile_setting;
+ struct profile_mode_setting current_profile_setting;
};
/* To convert to Q8.8 format for firmware */
@@ -339,7 +361,6 @@ enum SMU7_I2CLineID {
#define SMU7_I2C_DDCVGACLK 0x4d
#define SMU7_UNUSED_GPIO_PIN 0x7F
-uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
uint32_t clock_insr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 85ca16a..03bc745 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -731,14 +731,9 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
int result;
uint32_t num_se = 0;
uint32_t count, value, value2;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
-
- if (result == 0)
- num_se = sys_info.value;
+ num_se = adev->gfx.config.max_shader_engines;
if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
PP_CAP(PHM_PlatformCaps_DBRamping) ||
@@ -857,6 +852,8 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ n = (n & 0xff) << 8;
+
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -903,12 +900,12 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
- uint32_t default_limit =
- (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+ hwmgr->default_power_limit = hwmgr->power_limit =
+ cac_table->usMaximumPowerDeliveryLimit;
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
- if (smu7_set_power_limit(hwmgr, default_limit))
+ if (smu7_set_power_limit(hwmgr, hwmgr->power_limit))
pr_err("Failed to set Default Power Limit in SMC!");
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index d7aa643..44527755 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -95,7 +95,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (tach_period == 0)
return -EINVAL;
- crystal_clock_freq = smu7_get_xclk(hwmgr);
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
*speed = 60 * crystal_clock_freq * 10000 / tach_period;
@@ -267,7 +267,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
- crystal_clock_freq = smu7_get_xclk(hwmgr);
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -308,11 +308,11 @@ int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
* @exception PP_Result_BadInput if the input data is not valid.
*/
static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- uint32_t low_temp, uint32_t high_temp)
+ int low_temp, int high_temp)
{
- uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
+ int low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
+ int high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index ad1f6b5..75a465f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -35,9 +35,9 @@
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "cz_ppsmc.h"
-#include "cz_hwmgr.h"
+#include "smu8_hwmgr.h"
#include "power_state.h"
-#include "cz_clockpowergating.h"
+#include "pp_thermal.h"
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
#define CURRENT_NB_VID_MASK 0xff000000
@@ -46,26 +46,26 @@
#define CURRENT_GFX_VID_MASK 0xff000000
#define CURRENT_GFX_VID__SHIFT 24
-static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
+static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
-static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
+static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
{
- if (PhwCz_Magic != hw_ps->magic)
+ if (smu8_magic != hw_ps->magic)
return NULL;
- return (struct cz_power_state *)hw_ps;
+ return (struct smu8_power_state *)hw_ps;
}
-static const struct cz_power_state *cast_const_PhwCzPowerState(
+static const struct smu8_power_state *cast_const_smu8_power_state(
const struct pp_hw_power_state *hw_ps)
{
- if (PhwCz_Magic != hw_ps->magic)
+ if (smu8_magic != hw_ps->magic)
return NULL;
- return (struct cz_power_state *)hw_ps;
+ return (struct smu8_power_state *)hw_ps;
}
-static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -96,7 +96,7 @@ static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -126,7 +126,7 @@ static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
+static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
@@ -157,47 +157,42 @@ static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
return i;
}
-static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- if (cz_hwmgr->max_sclk_level == 0) {
+ if (data->max_sclk_level == 0) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
- cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
+ data->max_sclk_level = smum_get_argument(hwmgr) + 1;
}
- return cz_hwmgr->max_sclk_level;
+ return data->max_sclk_level;
}
-static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- uint32_t i;
- struct cgs_system_info sys_info = {0};
- int result;
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct amdgpu_device *adev = hwmgr->adev;
- cz_hwmgr->gfx_ramp_step = 256*25/100;
- cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
-
- for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
- cz_hwmgr->activity_target[i] = CZ_AT_DFLT;
-
- cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
- cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
- cz_hwmgr->clock_slow_down_freq = 25000;
- cz_hwmgr->skip_clock_slow_down = 1;
- cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
- cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
- cz_hwmgr->voting_rights_clients = 0x00C00033;
- cz_hwmgr->static_screen_threshold = 8;
- cz_hwmgr->ddi_power_gating_disabled = 0;
- cz_hwmgr->bapm_enabled = 1;
- cz_hwmgr->voltage_drop_threshold = 0;
- cz_hwmgr->gfx_power_gating_threshold = 500;
- cz_hwmgr->vce_slow_sclk_threshold = 20000;
- cz_hwmgr->dce_slow_sclk_threshold = 30000;
- cz_hwmgr->disable_driver_thermal_policy = 1;
- cz_hwmgr->disable_nb_ps3_in_battery = 0;
+ data->gfx_ramp_step = 256*25/100;
+ data->gfx_ramp_delay = 1; /* by default, we delay 1us */
+
+ data->mgcg_cgtt_local0 = 0x00000000;
+ data->mgcg_cgtt_local1 = 0x00000000;
+ data->clock_slow_down_freq = 25000;
+ data->skip_clock_slow_down = 1;
+ data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
+ data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
+ data->voting_rights_clients = 0x00C00033;
+ data->static_screen_threshold = 8;
+ data->ddi_power_gating_disabled = 0;
+ data->bapm_enabled = 1;
+ data->voltage_drop_threshold = 0;
+ data->gfx_power_gating_threshold = 500;
+ data->vce_slow_sclk_threshold = 20000;
+ data->dce_slow_sclk_threshold = 30000;
+ data->disable_driver_thermal_policy = 1;
+ data->disable_nb_ps3_in_battery = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ABM);
@@ -208,14 +203,14 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicM3Arbiter);
- cz_hwmgr->override_dynamic_mgpg = 1;
+ data->override_dynamic_mgpg = 1;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPatchPowerState);
- cz_hwmgr->thermal_auto_throttling_treshold = 0;
- cz_hwmgr->tdr_clock = 0;
- cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
+ data->thermal_auto_throttling_treshold = 0;
+ data->tdr_clock = 0;
+ data->disable_gfx_power_gating_in_uvd = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
@@ -225,10 +220,10 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM);
- cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
- cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
- cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
- cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
+ data->cc6_settings.cpu_cc6_disable = false;
+ data->cc6_settings.cpu_pstate_disable = false;
+ data->cc6_settings.nb_pstate_switch_disable = false;
+ data->cc6_settings.cpu_pstate_separation_time = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableVoltageIsland);
@@ -237,45 +232,42 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_UVD)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- if (sys_info.value & AMD_PG_SUPPORT_VCE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+
return 0;
}
-static uint32_t cz_convert_8Bit_index_to_voltage(
+static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
return 6200 - (voltage * 25);
}
-static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
+static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *table)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
- struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct smu8_sys_info *sys_info = &data->sys_info;
struct phm_clock_voltage_dependency_table *dep_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
if (dep_table->count > 0) {
table->sclk = dep_table->entries[dep_table->count-1].clk;
- table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
+ table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
(uint16_t)dep_table->entries[dep_table->count-1].v);
}
table->mclk = sys_info->nbp_memory_clock[0];
return 0;
}
-static int cz_init_dynamic_state_adjustment_rule_settings(
+static int smu8_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr,
ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
@@ -313,9 +305,9 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
return 0;
}
-static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
+static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
+ struct smu8_hwmgr *data = hwmgr->backend;
ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
uint32_t i;
int result = 0;
@@ -337,67 +329,67 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
return -EINVAL;
}
- cz_hwmgr->sys_info.bootup_uma_clock =
+ data->sys_info.bootup_uma_clock =
le32_to_cpu(info->ulBootUpUMAClock);
- cz_hwmgr->sys_info.bootup_engine_clock =
+ data->sys_info.bootup_engine_clock =
le32_to_cpu(info->ulBootUpEngineClock);
- cz_hwmgr->sys_info.dentist_vco_freq =
+ data->sys_info.dentist_vco_freq =
le32_to_cpu(info->ulDentistVCOFreq);
- cz_hwmgr->sys_info.system_config =
+ data->sys_info.system_config =
le32_to_cpu(info->ulSystemConfig);
- cz_hwmgr->sys_info.bootup_nb_voltage_index =
+ data->sys_info.bootup_nb_voltage_index =
le16_to_cpu(info->usBootUpNBVoltage);
- cz_hwmgr->sys_info.htc_hyst_lmt =
+ data->sys_info.htc_hyst_lmt =
(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
- cz_hwmgr->sys_info.htc_tmp_lmt =
+ data->sys_info.htc_tmp_lmt =
(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
- if (cz_hwmgr->sys_info.htc_tmp_lmt <=
- cz_hwmgr->sys_info.htc_hyst_lmt) {
+ if (data->sys_info.htc_tmp_lmt <=
+ data->sys_info.htc_hyst_lmt) {
pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
return -EINVAL;
}
- cz_hwmgr->sys_info.nb_dpm_enable =
- cz_hwmgr->enable_nb_ps_policy &&
+ data->sys_info.nb_dpm_enable =
+ data->enable_nb_ps_policy &&
(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
- for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
- if (i < CZ_NUM_NBPMEMORYCLOCK) {
- cz_hwmgr->sys_info.nbp_memory_clock[i] =
+ for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
+ if (i < SMU8_NUM_NBPMEMORYCLOCK) {
+ data->sys_info.nbp_memory_clock[i] =
le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
}
- cz_hwmgr->sys_info.nbp_n_clock[i] =
+ data->sys_info.nbp_n_clock[i] =
le32_to_cpu(info->ulNbpStateNClkFreq[i]);
}
for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
- cz_hwmgr->sys_info.display_clock[i] =
+ data->sys_info.display_clock[i] =
le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
}
/* Here use 4 levels, make sure not exceed */
- for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
- cz_hwmgr->sys_info.nbp_voltage_index[i] =
+ for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
+ data->sys_info.nbp_voltage_index[i] =
le16_to_cpu(info->usNBPStateVoltage[i]);
}
- if (!cz_hwmgr->sys_info.nb_dpm_enable) {
- for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
- if (i < CZ_NUM_NBPMEMORYCLOCK) {
- cz_hwmgr->sys_info.nbp_memory_clock[i] =
- cz_hwmgr->sys_info.nbp_memory_clock[0];
+ if (!data->sys_info.nb_dpm_enable) {
+ for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
+ if (i < SMU8_NUM_NBPMEMORYCLOCK) {
+ data->sys_info.nbp_memory_clock[i] =
+ data->sys_info.nbp_memory_clock[0];
}
- cz_hwmgr->sys_info.nbp_n_clock[i] =
- cz_hwmgr->sys_info.nbp_n_clock[0];
- cz_hwmgr->sys_info.nbp_voltage_index[i] =
- cz_hwmgr->sys_info.nbp_voltage_index[0];
+ data->sys_info.nbp_n_clock[i] =
+ data->sys_info.nbp_n_clock[0];
+ data->sys_info.nbp_voltage_index[i] =
+ data->sys_info.nbp_voltage_index[0];
}
}
@@ -407,40 +399,40 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_EnableDFSBypass);
}
- cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
+ data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
- cz_construct_max_power_limits_table (hwmgr,
+ smu8_construct_max_power_limits_table (hwmgr,
&hwmgr->dyn_state.max_clock_voltage_on_ac);
- cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
+ smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
&info->sDISPCLK_Voltage[0]);
return result;
}
-static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
+static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->boot_power_level.engineClock =
- cz_hwmgr->sys_info.bootup_engine_clock;
+ data->boot_power_level.engineClock =
+ data->sys_info.bootup_engine_clock;
- cz_hwmgr->boot_power_level.vddcIndex =
- (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
+ data->boot_power_level.vddcIndex =
+ (uint8_t)data->sys_info.bootup_nb_voltage_index;
- cz_hwmgr->boot_power_level.dsDividerIndex = 0;
- cz_hwmgr->boot_power_level.ssDividerIndex = 0;
- cz_hwmgr->boot_power_level.allowGnbSlow = 1;
- cz_hwmgr->boot_power_level.forceNBPstate = 0;
- cz_hwmgr->boot_power_level.hysteresis_up = 0;
- cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
- cz_hwmgr->boot_power_level.display_wm = 0;
- cz_hwmgr->boot_power_level.vce_wm = 0;
+ data->boot_power_level.dsDividerIndex = 0;
+ data->boot_power_level.ssDividerIndex = 0;
+ data->boot_power_level.allowGnbSlow = 1;
+ data->boot_power_level.forceNBPstate = 0;
+ data->boot_power_level.hysteresis_up = 0;
+ data->boot_power_level.numSIMDToPowerDown = 0;
+ data->boot_power_level.display_wm = 0;
+ data->boot_power_level.vce_wm = 0;
return 0;
}
-static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
+static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
@@ -470,18 +462,18 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
clock_table = (struct SMU8_Fusion_ClkTable *)table;
/* patch clock table */
- PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
+ PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
- for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
+ for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
/* vddc_sclk */
clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
@@ -559,9 +551,9 @@ static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
return ret;
}
-static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
@@ -569,25 +561,25 @@ static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ data->sclk_dpm.hard_min_clk = table->entries[0].clk;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+ data->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -595,8 +587,8 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->uvd_dpm.soft_min_clk = 0;
- cz_hwmgr->uvd_dpm.hard_min_clk = 0;
+ data->uvd_dpm.soft_min_clk = 0;
+ data->uvd_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
level = smum_get_argument(hwmgr);
@@ -606,15 +598,15 @@ static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].vclk;
- cz_hwmgr->uvd_dpm.soft_max_clk = clock;
- cz_hwmgr->uvd_dpm.hard_max_clk = clock;
+ data->uvd_dpm.soft_max_clk = clock;
+ data->uvd_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -622,8 +614,8 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->vce_dpm.soft_min_clk = 0;
- cz_hwmgr->vce_dpm.hard_min_clk = 0;
+ data->vce_dpm.soft_min_clk = 0;
+ data->vce_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
level = smum_get_argument(hwmgr);
@@ -633,15 +625,15 @@ static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].ecclk;
- cz_hwmgr->vce_dpm.soft_max_clk = clock;
- cz_hwmgr->vce_dpm.hard_max_clk = clock;
+ data->vce_dpm.soft_max_clk = clock;
+ data->vce_dpm.hard_max_clk = clock;
return 0;
}
-static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
+static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
unsigned long clock = 0, level;
@@ -649,8 +641,8 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->acp_dpm.soft_min_clk = 0;
- cz_hwmgr->acp_dpm.hard_min_clk = 0;
+ data->acp_dpm.soft_min_clk = 0;
+ data->acp_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
level = smum_get_argument(hwmgr);
@@ -660,32 +652,32 @@ static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
else
clock = table->entries[table->count - 1].acpclk;
- cz_hwmgr->acp_dpm.soft_max_clk = clock;
- cz_hwmgr->acp_dpm.hard_max_clk = clock;
+ data->acp_dpm.soft_max_clk = clock;
+ data->acp_dpm.hard_max_clk = clock;
return 0;
}
-static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
+static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->uvd_power_gated = false;
- cz_hwmgr->vce_power_gated = false;
- cz_hwmgr->samu_power_gated = false;
- cz_hwmgr->acp_power_gated = false;
- cz_hwmgr->pgacpinit = true;
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+ data->acp_power_gated = false;
+ data->pgacpinit = true;
}
-static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->low_sclk_interrupt_threshold = 0;
+ data->low_sclk_interrupt_threshold = 0;
}
-static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
+static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -694,29 +686,29 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
unsigned long stable_pstate_sclk;
unsigned long percentage;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
- cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk;
+ data->sclk_dpm.soft_max_clk = table->entries[level].clk;
else
- cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
+ data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config.min_core_set_clock;
if (clock == 0)
pr_debug("min_core_set_clock not set\n");
- if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
- cz_hwmgr->sclk_dpm.hard_min_clk = clock;
+ if (data->sclk_dpm.hard_min_clk != clock) {
+ data->sclk_dpm.hard_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.hard_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.hard_min_clk,
PPSMC_MSG_SetSclkHardMin));
}
- clock = cz_hwmgr->sclk_dpm.soft_min_clk;
+ clock = data->sclk_dpm.soft_min_clk;
/* update minimum clocks for Stable P-State feature */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -728,41 +720,38 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
if (clock < stable_pstate_sclk)
clock = stable_pstate_sclk;
- } else {
- if (clock < hwmgr->gfx_arbiter.sclk)
- clock = hwmgr->gfx_arbiter.sclk;
}
- if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
- cz_hwmgr->sclk_dpm.soft_min_clk = clock;
+ if (data->sclk_dpm.soft_min_clk != clock) {
+ data->sclk_dpm.soft_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState) &&
- cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.soft_max_clk != clock) {
+ data->sclk_dpm.soft_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
}
return 0;
}
-static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
if (clks == 0)
- clks = CZ_MIN_DEEP_SLEEP_SCLK;
+ clks = SMU8_MIN_DEEP_SLEEP_SCLK;
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
@@ -774,21 +763,21 @@ static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
+static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr =
- (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data =
+ hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
- cz_hwmgr->sclk_dpm.soft_max_clk);
+ data->sclk_dpm.soft_max_clk);
return 0;
}
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
if (hw_data->is_nb_dpm_enabled) {
if (enable) {
@@ -809,35 +798,35 @@ static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, b
return 0;
}
-static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (cz_hwmgr->is_nb_dpm_enabled) {
- cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ if (data->is_nb_dpm_enabled) {
+ smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = false;
+ data->is_nb_dpm_enabled = false;
}
return ret;
}
-static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (!cz_hwmgr->is_nb_dpm_enabled) {
+ if (!data->is_nb_dpm_enabled) {
PP_DBG_LOG("enabling ALL SMU features.\n");
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
@@ -845,94 +834,94 @@ static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = true;
+ data->is_nb_dpm_enabled = true;
}
return ret;
}
-static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
+static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
- const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
+ const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
if (hw_data->sys_info.nb_dpm_enable) {
disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
if (pnew_state->action == FORCE_HIGH)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
else if (pnew_state->action == CANCEL_FORCE_HIGH)
- cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
else
- cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
}
return 0;
}
-static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int ret = 0;
- cz_update_sclk_limit(hwmgr);
- cz_set_deep_sleep_sclk_threshold(hwmgr);
- cz_set_watermark_threshold(hwmgr);
- ret = cz_enable_nb_dpm(hwmgr);
+ smu8_update_sclk_limit(hwmgr);
+ smu8_set_deep_sleep_sclk_threshold(hwmgr);
+ smu8_set_watermark_threshold(hwmgr);
+ ret = smu8_enable_nb_dpm(hwmgr);
if (ret)
return ret;
- cz_update_low_mem_pstate(hwmgr, input);
+ smu8_update_low_mem_pstate(hwmgr, input);
return 0;
};
-static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int ret;
- ret = cz_upload_pptable_to_smu(hwmgr);
+ ret = smu8_upload_pptable_to_smu(hwmgr);
if (ret)
return ret;
- ret = cz_init_sclk_limit(hwmgr);
+ ret = smu8_init_sclk_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_uvd_limit(hwmgr);
+ ret = smu8_init_uvd_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_vce_limit(hwmgr);
+ ret = smu8_init_vce_limit(hwmgr);
if (ret)
return ret;
- ret = cz_init_acp_limit(hwmgr);
+ ret = smu8_init_acp_limit(hwmgr);
if (ret)
return ret;
- cz_init_power_gate_state(hwmgr);
- cz_init_sclk_threshold(hwmgr);
+ smu8_init_power_gate_state(hwmgr);
+ smu8_init_sclk_threshold(hwmgr);
return 0;
}
-static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
+static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
}
-static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
+static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->is_nb_dpm_enabled = false;
}
-static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
+static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->cc6_settings.cc6_setting_changed = false;
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
@@ -940,45 +929,47 @@ static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
hw_data->cc6_settings.cpu_pstate_disable = false;
}
-static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
{
- cz_power_up_display_clock_sys_pll(hwmgr);
- cz_clear_nb_dpm_flag(hwmgr);
- cz_reset_cc6_data(hwmgr);
+ smu8_power_up_display_clock_sys_pll(hwmgr);
+ smu8_clear_nb_dpm_flag(hwmgr);
+ smu8_reset_cc6_data(hwmgr);
return 0;
};
-static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
+static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
{
- PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
- PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0,
+ SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
}
-static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
- PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
}
-static int cz_start_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
+ data->dpm_flags |= DPMFlags_SCLK_Enabled;
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
SCLK_DPM_MASK);
}
-static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
- if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+ if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
dpm_features |= SCLK_DPM_MASK;
- cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+ data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
@@ -986,118 +977,80 @@ static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
return ret;
}
-static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
+static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
- cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
+ data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
+ data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- cz_hwmgr->acp_boot_level = 0xff;
-}
-
-static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
- unsigned long check_feature)
+static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
- int result;
- unsigned long features;
-
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
- if (result == 0) {
- features = smum_get_argument(hwmgr);
- if (features & check_feature)
- return true;
- }
+ struct smu8_hwmgr *data = hwmgr->backend;
- return false;
+ data->acp_boot_level = 0xff;
}
-static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
+static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return true;
- return false;
-}
-
-static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- if (!cz_check_for_dpm_enabled(hwmgr)) {
- pr_info("dpm has been disabled\n");
- return 0;
- }
- cz_disable_nb_dpm(hwmgr);
+ smu8_disable_nb_dpm(hwmgr);
- cz_clear_voting_clients(hwmgr);
- if (cz_stop_dpm(hwmgr))
+ smu8_clear_voting_clients(hwmgr);
+ if (smu8_stop_dpm(hwmgr))
return -EINVAL;
return 0;
};
-static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- if (cz_check_for_dpm_enabled(hwmgr)) {
- pr_info("dpm has been enabled\n");
- return 0;
- }
-
- cz_program_voting_clients(hwmgr);
- if (cz_start_dpm(hwmgr))
+ smu8_program_voting_clients(hwmgr);
+ if (smu8_start_dpm(hwmgr))
return -EINVAL;
- cz_program_bootup_state(hwmgr);
- cz_reset_acp_boot_level(hwmgr);
+ smu8_program_bootup_state(hwmgr);
+ smu8_reset_acp_boot_level(hwmgr);
return 0;
};
-static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
- struct cz_power_state *cz_ps =
- cast_PhwCzPowerState(&prequest_ps->hardware);
+ struct smu8_power_state *smu8_ps =
+ cast_smu8_power_state(&prequest_ps->hardware);
- const struct cz_power_state *cz_current_ps =
- cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
+ const struct smu8_power_state *smu8_current_ps =
+ cast_const_smu8_power_state(&pcurrent_ps->hardware);
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
uint32_t num_of_active_displays = 0;
struct cgs_display_info info = {0};
- cz_ps->evclk = hwmgr->vce_arbiter.evclk;
- cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
+ smu8_ps->need_dfs_bypass = true;
- cz_ps->need_dfs_bypass = true;
-
- cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 ||
- hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0);
-
- cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+ data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
hwmgr->display_config.min_mem_set_clock :
- cz_hwmgr->sys_info.nbp_memory_clock[1];
+ data->sys_info.nbp_memory_clock[1];
cgs_get_active_displays_info(hwmgr->device, &info);
num_of_active_displays = info.display_count;
@@ -1105,59 +1058,56 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
- if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
+ force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
|| (num_of_active_displays >= 3);
- cz_ps->action = cz_current_ps->action;
+ smu8_ps->action = smu8_current_ps->action;
if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
- else if (!force_high && (cz_ps->action == FORCE_HIGH))
- cz_ps->action = CANCEL_FORCE_HIGH;
- else if (force_high && (cz_ps->action != FORCE_HIGH))
- cz_ps->action = FORCE_HIGH;
+ smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ else if (!force_high && (smu8_ps->action == FORCE_HIGH))
+ smu8_ps->action = CANCEL_FORCE_HIGH;
+ else if (force_high && (smu8_ps->action != FORCE_HIGH))
+ smu8_ps->action = FORCE_HIGH;
else
- cz_ps->action = DO_NOTHING;
+ smu8_ps->action = DO_NOTHING;
return 0;
}
-static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct cz_hwmgr *data;
+ struct smu8_hwmgr *data;
- data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+ data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- result = cz_initialize_dpm_defaults(hwmgr);
+ result = smu8_initialize_dpm_defaults(hwmgr);
if (result != 0) {
- pr_err("cz_initialize_dpm_defaults failed\n");
+ pr_err("smu8_initialize_dpm_defaults failed\n");
return result;
}
- result = cz_get_system_info_data(hwmgr);
+ result = smu8_get_system_info_data(hwmgr);
if (result != 0) {
- pr_err("cz_get_system_info_data failed\n");
+ pr_err("smu8_get_system_info_data failed\n");
return result;
}
- cz_construct_boot_state(hwmgr);
+ smu8_construct_boot_state(hwmgr);
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
return result;
}
-static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
@@ -1169,28 +1119,28 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
+static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
@@ -1198,54 +1148,56 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
if (NULL == table || table->count <= 0)
return -EINVAL;
- cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
- cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
+ data->sclk_dpm.soft_min_clk = table->entries[0].clk;
+ data->sclk_dpm.hard_min_clk = table->entries[0].clk;
+ hwmgr->pstate_sclk = table->entries[0].clk;
+ hwmgr->pstate_mclk = 0;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
- cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- cz_hwmgr->sclk_dpm.hard_max_clk = clock;
+ data->sclk_dpm.soft_max_clk = clock;
+ data->sclk_dpm.hard_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax));
return 0;
}
-static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax));
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_min_clk,
+ smu8_get_sclk_level(hwmgr,
+ data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
return 0;
}
-static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
@@ -1253,15 +1205,15 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = cz_phm_force_dpm_highest(hwmgr);
+ ret = smu8_phm_force_dpm_highest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = cz_phm_force_dpm_lowest(hwmgr);
+ ret = smu8_phm_force_dpm_lowest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = cz_phm_unforce_dpm_levels(hwmgr);
+ ret = smu8_phm_unforce_dpm_levels(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1272,14 +1224,14 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return ret;
}
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
return 0;
}
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
return smum_send_msg_to_smc_with_parameter(
@@ -1291,75 +1243,36 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- struct phm_uvd_clock_voltage_dependency_table *ptable =
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
- if (!bgate) {
- /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
- if (PP_CAP(PHM_PlatformCaps_StablePState) ||
- hwmgr->en_umd_pstate) {
- cz_hwmgr->uvd_dpm.hard_min_clk =
- ptable->entries[ptable->count - 1].vclk;
-
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUvdHardMin,
- cz_get_uvd_level(hwmgr,
- cz_hwmgr->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
-
- cz_enable_disable_uvd_dpm(hwmgr, true);
- } else {
- cz_enable_disable_uvd_dpm(hwmgr, true);
- }
- } else {
- cz_enable_disable_uvd_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (PP_CAP(PHM_PlatformCaps_StablePState) ||
hwmgr->en_umd_pstate) {
- cz_hwmgr->vce_dpm.hard_min_clk =
+ data->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
+ smu8_get_eclk_level(hwmgr,
+ data->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*Program HardMin based on the vce_arbiter.ecclk */
- if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
- } else {
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
- }
}
return 0;
}
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
@@ -1367,7 +1280,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
@@ -1375,17 +1288,17 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
- return cz_hwmgr->sys_info.bootup_uma_clock;
+ return data->sys_info.bootup_uma_clock;
}
-static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
- struct cz_power_state *cz_ps;
+ struct smu8_power_state *smu8_ps;
if (hwmgr == NULL)
return -EINVAL;
@@ -1395,59 +1308,59 @@ static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
if (ps == NULL)
return -EINVAL;
- cz_ps = cast_PhwCzPowerState(&ps->hardware);
+ smu8_ps = cast_smu8_power_state(&ps->hardware);
if (low)
- return cz_ps->levels[0].engineClock;
+ return smu8_ps->levels[0].engineClock;
else
- return cz_ps->levels[cz_ps->level-1].engineClock;
+ return smu8_ps->levels[smu8_ps->level-1].engineClock;
}
-static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
- cz_ps->level = 1;
- cz_ps->nbps_flags = 0;
- cz_ps->bapm_flags = 0;
- cz_ps->levels[0] = cz_hwmgr->boot_power_level;
+ smu8_ps->level = 1;
+ smu8_ps->nbps_flags = 0;
+ smu8_ps->bapm_flags = 0;
+ smu8_ps->levels[0] = data->boot_power_level;
return 0;
}
-static int cz_dpm_get_pp_table_entry_callback(
+static int smu8_dpm_get_pp_table_entry_callback(
struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps,
unsigned int index,
const void *clock_info)
{
- struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
+ struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
- const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
+ const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
- uint8_t clock_info_index = cz_clock_info->index;
+ uint8_t clock_info_index = smu8_clock_info->index;
if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
- cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
- cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
+ smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
+ smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
- cz_ps->level = index + 1;
+ smu8_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cz_ps->levels[index].dsDividerIndex = 5;
- cz_ps->levels[index].ssDividerIndex = 5;
+ smu8_ps->levels[index].dsDividerIndex = 5;
+ smu8_ps->levels[index].ssDividerIndex = 5;
}
return 0;
}
-static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
+static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
{
int result;
unsigned long ret = 0;
@@ -1457,31 +1370,31 @@ static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
return result ? 0 : ret;
}
-static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry, struct pp_power_state *ps)
{
int result;
- struct cz_power_state *cz_ps;
+ struct smu8_power_state *smu8_ps;
- ps->hardware.magic = PhwCz_Magic;
+ ps->hardware.magic = smu8_magic;
- cz_ps = cast_PhwCzPowerState(&(ps->hardware));
+ smu8_ps = cast_smu8_power_state(&(ps->hardware));
result = pp_tables_get_entry(hwmgr, entry, ps,
- cz_dpm_get_pp_table_entry_callback);
+ smu8_dpm_get_pp_table_entry_callback);
- cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
- cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
+ smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
+ smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
return result;
}
-static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
+static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
{
- return sizeof(struct cz_power_state);
+ return sizeof(struct smu8_power_state);
}
-static void cz_hw_print_display_cfg(
+static void smu8_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
PP_DBG_LOG("New Display Configuration:\n");
@@ -1496,16 +1409,16 @@ static void cz_hw_print_display_cfg(
cc6_settings->cpu_pstate_separation_time);
}
- static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
+ static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
uint32_t data = 0;
if (hw_data->cc6_settings.cc6_setting_changed) {
hw_data->cc6_settings.cc6_setting_changed = false;
- cz_hw_print_display_cfg(&hw_data->cc6_settings);
+ smu8_hw_print_display_cfg(&hw_data->cc6_settings);
data |= (hw_data->cc6_settings.cpu_pstate_separation_time
& PWRMGT_SEPARATION_TIME_MASK)
@@ -1529,10 +1442,10 @@ static void cz_hw_print_display_cfg(
}
-static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *hw_data = hwmgr->backend;
if (separation_time !=
hw_data->cc6_settings.cpu_pstate_separation_time ||
@@ -1556,7 +1469,7 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
return 0;
}
-static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
+static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
uint32_t i;
@@ -1577,12 +1490,9 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
-static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
+static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -1599,9 +1509,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
+static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *sclk_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
int i, now, size = 0;
@@ -1619,26 +1530,38 @@ static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
+ case PP_MCLK:
+ now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX,
+ CURR_MCLK_INDEX);
+
+ for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
+ (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
+ break;
default:
break;
}
return size;
}
-static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- const struct cz_power_state *ps;
- struct cz_hwmgr *data;
+ const struct smu8_power_state *ps;
+ struct smu8_hwmgr *data;
uint32_t level_index;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- data = (struct cz_hwmgr *)(hwmgr->backend);
- ps = cast_const_PhwCzPowerState(state);
+ data = hwmgr->backend;
+ ps = cast_const_smu8_power_state(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
level->coreClock = ps->levels[level_index].engineClock;
@@ -1653,21 +1576,21 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
}
if (level_index == 0)
- level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
+ level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
else
level->memory_clock = data->sys_info.nbp_memory_clock[0];
- level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
+ level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
return 0;
}
-static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
- const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
+ const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
@@ -1675,14 +1598,14 @@ static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
- struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
int i;
struct phm_clock_voltage_dependency_table *table;
- clocks->count = cz_get_max_sclk_level(hwmgr);
+ clocks->count = smu8_get_max_sclk_level(hwmgr);
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
@@ -1694,7 +1617,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
clocks->clock[i] = table->entries[i].clk;
break;
case amd_pp_mem_clock:
- clocks->count = CZ_NUM_NBPMEMORYCLOCK;
+ clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
break;
@@ -1705,7 +1628,7 @@ static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type t
return 0;
}
-static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1716,7 +1639,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
return -EINVAL;
- level = cz_get_max_sclk_level(hwmgr) - 1;
+ level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clocks->engine_max_clock = table->entries[level].clk;
@@ -1728,7 +1651,7 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
-static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int actual_temp = 0;
uint32_t val = cgs_read_ind_register(hwmgr->device,
@@ -1743,10 +1666,10 @@ static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
return actual_temp;
}
-static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
@@ -1784,18 +1707,18 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->uvd_power_gated) {
+ if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
vclk = uvd_table->entries[uvd_index].vclk;
@@ -1806,8 +1729,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_UVD_DCLK:
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->uvd_power_gated) {
+ if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
dclk = uvd_table->entries[uvd_index].dclk;
@@ -1818,8 +1741,8 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_VCE_ECCLK:
- if (!cz_hwmgr->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ if (!data->vce_power_gated) {
+ if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
ecclk = vce_table->entries[vce_index].ecclk;
@@ -1840,20 +1763,20 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = activity_percent;
return 0;
case AMDGPU_PP_SENSOR_UVD_POWER:
- *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1;
+ *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_VCE_POWER:
- *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1;
+ *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr);
+ *((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
return 0;
default:
return -EINVAL;
}
}
-static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
uint32_t virtual_addr_hi,
uint32_t mc_addr_low,
@@ -1879,43 +1802,190 @@ static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
return 0;
}
+static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *thermal_data)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+
+ memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ thermal_data->max = (data->thermal_auto_throttling_treshold +
+ data->sys_info.htc_hyst_lmt) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ uint32_t dpm_features = 0;
+
+ if (enable &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM)) {
+ data->dpm_flags |= DPMFlags_UVD_Enabled;
+ dpm_features |= UVD_DPM_MASK;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ } else {
+ dpm_features |= UVD_DPM_MASK;
+ data->dpm_flags &= ~DPMFlags_UVD_Enabled;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ }
+ return 0;
+}
+
+int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct phm_uvd_clock_voltage_dependency_table *ptable =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ if (!bgate) {
+ /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
+ data->uvd_dpm.hard_min_clk =
+ ptable->entries[ptable->count - 1].vclk;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUvdHardMin,
+ smu8_get_uvd_level(hwmgr,
+ data->uvd_dpm.hard_min_clk,
+ PPSMC_MSG_SetUvdHardMin));
+
+ smu8_enable_disable_uvd_dpm(hwmgr, true);
+ } else {
+ smu8_enable_disable_uvd_dpm(hwmgr, true);
+ }
+ } else {
+ smu8_enable_disable_uvd_dpm(hwmgr, false);
+ }
+
+ return 0;
+}
+
+static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+ uint32_t dpm_features = 0;
+
+ if (enable && phm_cap_enabled(
+ hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEDPM)) {
+ data->dpm_flags |= DPMFlags_VCE_Enabled;
+ dpm_features |= VCE_DPM_MASK;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ } else {
+ dpm_features |= VCE_DPM_MASK;
+ data->dpm_flags &= ~DPMFlags_VCE_Enabled;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ }
+
+ return 0;
+}
+
+
+static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+
+ data->uvd_power_gated = bgate;
+
+ if (bgate) {
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
+ smu8_dpm_update_uvd_dpm(hwmgr, true);
+ smu8_dpm_powerdown_uvd(hwmgr);
+ } else {
+ smu8_dpm_powerup_uvd(hwmgr);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
+ smu8_dpm_update_uvd_dpm(hwmgr, false);
+ }
+
+}
+
+static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct smu8_hwmgr *data = hwmgr->backend;
+
+ if (bgate) {
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ smu8_enable_disable_vce_dpm(hwmgr, false);
+ smu8_dpm_powerdown_vce(hwmgr);
+ data->vce_power_gated = true;
+ } else {
+ smu8_dpm_powerup_vce(hwmgr);
+ data->vce_power_gated = false;
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ smu8_dpm_update_vce_dpm(hwmgr);
+ smu8_enable_disable_vce_dpm(hwmgr, true);
+ }
+}
-static const struct pp_hwmgr_func cz_hwmgr_funcs = {
- .backend_init = cz_hwmgr_backend_init,
- .backend_fini = cz_hwmgr_backend_fini,
- .apply_state_adjust_rules = cz_apply_state_adjust_rules,
- .force_dpm_level = cz_dpm_force_dpm_level,
- .get_power_state_size = cz_get_power_state_size,
- .powerdown_uvd = cz_dpm_powerdown_uvd,
- .powergate_uvd = cz_dpm_powergate_uvd,
- .powergate_vce = cz_dpm_powergate_vce,
- .get_mclk = cz_dpm_get_mclk,
- .get_sclk = cz_dpm_get_sclk,
- .patch_boot_state = cz_dpm_patch_boot_state,
- .get_pp_table_entry = cz_dpm_get_pp_table_entry,
- .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
- .set_cpu_power_state = cz_set_cpu_power_state,
- .store_cc6_data = cz_store_cc6_data,
- .force_clock_level = cz_force_clock_level,
- .print_clock_levels = cz_print_clock_levels,
- .get_dal_power_level = cz_get_dal_power_level,
- .get_performance_level = cz_get_performance_level,
- .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
- .get_clock_by_type = cz_get_clock_by_type,
- .get_max_high_clocks = cz_get_max_high_clocks,
- .get_temperature = cz_thermal_get_temperature,
- .read_sensor = cz_read_sensor,
- .power_off_asic = cz_power_off_asic,
- .asic_setup = cz_setup_asic_task,
- .dynamic_state_management_enable = cz_enable_dpm_tasks,
- .power_state_set = cz_set_power_state_tasks,
- .dynamic_state_management_disable = cz_disable_dpm_tasks,
- .notify_cac_buffer_info = cz_notify_cac_buffer_info,
+static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
+ .backend_init = smu8_hwmgr_backend_init,
+ .backend_fini = smu8_hwmgr_backend_fini,
+ .apply_state_adjust_rules = smu8_apply_state_adjust_rules,
+ .force_dpm_level = smu8_dpm_force_dpm_level,
+ .get_power_state_size = smu8_get_power_state_size,
+ .powerdown_uvd = smu8_dpm_powerdown_uvd,
+ .powergate_uvd = smu8_dpm_powergate_uvd,
+ .powergate_vce = smu8_dpm_powergate_vce,
+ .get_mclk = smu8_dpm_get_mclk,
+ .get_sclk = smu8_dpm_get_sclk,
+ .patch_boot_state = smu8_dpm_patch_boot_state,
+ .get_pp_table_entry = smu8_dpm_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
+ .set_cpu_power_state = smu8_set_cpu_power_state,
+ .store_cc6_data = smu8_store_cc6_data,
+ .force_clock_level = smu8_force_clock_level,
+ .print_clock_levels = smu8_print_clock_levels,
+ .get_dal_power_level = smu8_get_dal_power_level,
+ .get_performance_level = smu8_get_performance_level,
+ .get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
+ .get_clock_by_type = smu8_get_clock_by_type,
+ .get_max_high_clocks = smu8_get_max_high_clocks,
+ .read_sensor = smu8_read_sensor,
+ .power_off_asic = smu8_power_off_asic,
+ .asic_setup = smu8_setup_asic_task,
+ .dynamic_state_management_enable = smu8_enable_dpm_tasks,
+ .power_state_set = smu8_set_power_state_tasks,
+ .dynamic_state_management_disable = smu8_disable_dpm_tasks,
+ .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+ .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
-int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
+int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
{
- hwmgr->hwmgr_func = &cz_hwmgr_funcs;
+ hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
index 508b422..05a0608 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
@@ -21,19 +21,18 @@
*
*/
-#ifndef _CZ_HWMGR_H_
-#define _CZ_HWMGR_H_
+#ifndef _SMU8_HWMGR_H_
+#define _SMU8_HWMGR_H_
#include "cgs_common.h"
#include "ppatomctrl.h"
-#define CZ_NUM_NBPSTATES 4
-#define CZ_NUM_NBPMEMORYCLOCK 2
+#define SMU8_NUM_NBPSTATES 4
+#define SMU8_NUM_NBPMEMORYCLOCK 2
#define MAX_DISPLAY_CLOCK_LEVEL 8
-#define CZ_AT_DFLT 30
-#define CZ_MAX_HARDWARE_POWERLEVELS 8
-#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-#define CZ_MIN_DEEP_SLEEP_SCLK 800
+#define SMU8_MAX_HARDWARE_POWERLEVELS 8
+#define SMU8_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define SMU8_MIN_DEEP_SLEEP_SCLK 800
/* Carrizo device IDs */
#define DEVICE_ID_CZ_9870 0x9870
@@ -42,24 +41,21 @@
#define DEVICE_ID_CZ_9876 0x9876
#define DEVICE_ID_CZ_9877 0x9877
-#define PHMCZ_WRITE_SMC_REGISTER(device, reg, value) \
- cgs_write_ind_register(device, CGS_IND_REG__SMC, ix##reg, value)
-
-struct cz_dpm_entry {
+struct smu8_dpm_entry {
uint32_t soft_min_clk;
uint32_t hard_min_clk;
uint32_t soft_max_clk;
uint32_t hard_max_clk;
};
-struct cz_sys_info {
+struct smu8_sys_info {
uint32_t bootup_uma_clock;
uint32_t bootup_engine_clock;
uint32_t dentist_vco_freq;
uint32_t nb_dpm_enable;
- uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK];
- uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
- uint16_t nbp_voltage_index[CZ_NUM_NBPSTATES];
+ uint32_t nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK];
+ uint32_t nbp_n_clock[SMU8_NUM_NBPSTATES];
+ uint16_t nbp_voltage_index[SMU8_NUM_NBPSTATES];
uint32_t display_clock[MAX_DISPLAY_CLOCK_LEVEL];
uint16_t bootup_nb_voltage_index;
uint8_t htc_tmp_lmt;
@@ -86,21 +82,21 @@ struct cz_sys_info {
((tx) ? DISPLAYPHY_TX_SELECT : 0) | \
((core) ? DISPLAYPHY_CORE_SELECT : 0))
-struct cz_display_phy_info_entry {
+struct smu8_display_phy_info_entry {
uint8_t phy_present;
uint8_t active_lane_mapping;
uint8_t display_config_type;
uint8_t active_number_of_lanes;
};
-#define CZ_MAX_DISPLAYPHY_IDS 10
+#define SMU8_MAX_DISPLAYPHY_IDS 10
-struct cz_display_phy_info {
+struct smu8_display_phy_info {
bool display_phy_access_initialized;
- struct cz_display_phy_info_entry entries[CZ_MAX_DISPLAYPHY_IDS];
+ struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS];
};
-struct cz_power_level {
+struct smu8_power_level {
uint32_t engineClock;
uint8_t vddcIndex;
uint8_t dsDividerIndex;
@@ -114,7 +110,7 @@ struct cz_power_level {
uint8_t rsv[3];
};
-struct cz_uvd_clocks {
+struct smu8_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
uint32_t vclk_low_divider;
@@ -123,7 +119,7 @@ struct cz_uvd_clocks {
uint32_t dclk_high_divider;
};
-enum cz_pstate_previous_action {
+enum smu8_pstate_previous_action {
DO_NOTHING = 1,
FORCE_HIGH,
CANCEL_FORCE_HIGH
@@ -144,10 +140,10 @@ struct pp_disable_nb_ps_flags {
};
};
-struct cz_power_state {
+struct smu8_power_state {
unsigned int magic;
uint32_t level;
- struct cz_uvd_clocks uvd_clocks;
+ struct smu8_uvd_clocks uvd_clocks;
uint32_t evclk;
uint32_t ecclk;
uint32_t samclk;
@@ -159,8 +155,8 @@ struct cz_power_state {
uint8_t dpm_0_pg_nb_ps_high;
uint8_t dpm_x_nb_ps_low;
uint8_t dpm_x_nb_ps_high;
- enum cz_pstate_previous_action action;
- struct cz_power_level levels[CZ_MAX_HARDWARE_POWERLEVELS];
+ enum smu8_pstate_previous_action action;
+ struct smu8_power_level levels[SMU8_MAX_HARDWARE_POWERLEVELS];
struct pp_disable_nb_ps_flags disable_nb_ps_flag;
};
@@ -172,7 +168,6 @@ struct cz_power_state {
#define DPMFlags_Debug 0x80000000
#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 /* bit 0 */
-#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 /* bit 23 */
#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 /* bit 24 */
@@ -184,8 +179,7 @@ struct cc6_settings {
uint32_t cpu_pstate_separation_time;
};
-struct cz_hwmgr {
- uint32_t activity_target[CZ_MAX_HARDWARE_POWERLEVELS];
+struct smu8_hwmgr {
uint32_t dpm_interval;
uint32_t voltage_drop_threshold;
@@ -205,11 +199,11 @@ struct cz_hwmgr {
uint32_t thermal_auto_throttling_treshold;
- struct cz_sys_info sys_info;
+ struct smu8_sys_info sys_info;
- struct cz_power_level boot_power_level;
- struct cz_power_state *cz_current_ps;
- struct cz_power_state *cz_requested_ps;
+ struct smu8_power_level boot_power_level;
+ struct smu8_power_state *smu8_current_ps;
+ struct smu8_power_state *smu8_requested_ps;
uint32_t mgcg_cgtt_local0;
uint32_t mgcg_cgtt_local1;
@@ -222,7 +216,7 @@ struct cz_hwmgr {
uint32_t lock_nb_ps_in_uvd_play_back;
- struct cz_display_phy_info display_phy_info;
+ struct smu8_display_phy_info display_phy_info;
uint32_t vce_slow_sclk_threshold; /* default 200mhz */
uint32_t dce_slow_sclk_threshold; /* default 300mhz */
uint32_t min_sclk_did; /* minimum sclk divider */
@@ -273,10 +267,10 @@ struct cz_hwmgr {
uint32_t fps_low_threshold;
uint32_t dpm_flags;
- struct cz_dpm_entry sclk_dpm;
- struct cz_dpm_entry uvd_dpm;
- struct cz_dpm_entry vce_dpm;
- struct cz_dpm_entry acp_dpm;
+ struct smu8_dpm_entry sclk_dpm;
+ struct smu8_dpm_entry uvd_dpm;
+ struct smu8_dpm_entry vce_dpm;
+ struct smu8_dpm_entry acp_dpm;
uint8_t uvd_boot_level;
uint8_t vce_boot_level;
@@ -314,12 +308,4 @@ struct cz_hwmgr {
uint32_t num_of_clk_entries;
};
-struct pp_hwmgr;
-
-int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr);
-int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr);
-#endif /* _CZ_HWMGR_H_ */
+#endif /* _SMU8_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
new file mode 100644
index 0000000..5981228
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "hwmgr.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "ppsmc.h"
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
+
+uint16_t convert_to_vddc(uint8_t vid)
+{
+ return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
+}
+
+uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
+{
+ u32 mask = 0;
+ u32 shift = 0;
+
+ shift = (offset % 4) << 3;
+ if (size == sizeof(uint8_t))
+ mask = 0xFF << shift;
+ else if (size == sizeof(uint16_t))
+ mask = 0xFFFF << shift;
+
+ original_data &= ~mask;
+ original_data |= (field << shift);
+ return original_data;
+}
+
+/**
+ * Returns once the part of the register indicated by the mask has
+ * reached the given value.
+ */
+int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL) {
+ pr_err("Invalid Hardware Manager!");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device, index);
+ if ((cur_value & mask) == (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic*/
+ if (i == hwmgr->usec_timeout)
+ return -1;
+ return 0;
+}
+
+
+/**
+ * Returns once the part of the register indicated by the mask has
+ * reached the given value.The indirect space is described by giving
+ * the memory-mapped index of the indirect index register.
+ */
+int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL) {
+ pr_err("Invalid Hardware Manager!");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+}
+
+int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device,
+ index);
+ if ((cur_value & mask) != (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == hwmgr->usec_timeout)
+ return -ETIME;
+ return 0;
+}
+
+int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+ value, mask);
+}
+
+bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
+{
+ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
+}
+
+bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
+{
+ return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
+}
+
+
+int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
+{
+ uint32_t i, j;
+ uint16_t vvalue;
+ bool found = false;
+ struct pp_atomctrl_voltage_table *table;
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "Voltage Table empty.", return -EINVAL);
+
+ table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
+ GFP_KERNEL);
+
+ if (NULL == table)
+ return -EINVAL;
+
+ table->mask_low = vol_table->mask_low;
+ table->phase_delay = vol_table->phase_delay;
+
+ for (i = 0; i < vol_table->count; i++) {
+ vvalue = vol_table->entries[i].value;
+ found = false;
+
+ for (j = 0; j < table->count; j++) {
+ if (vvalue == table->entries[j].value) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ table->entries[table->count].value = vvalue;
+ table->entries[table->count].smio_low =
+ vol_table->entries[i].smio_low;
+ table->count++;
+ }
+ }
+
+ memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
+ kfree(table);
+ table = NULL;
+ return 0;
+}
+
+int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint32_t i;
+ int result;
+
+ PP_ASSERT_WITH_CODE((0 != dep_table->count),
+ "Voltage Dependency Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+ vol_table->count = dep_table->count;
+
+ for (i = 0; i < dep_table->count; i++) {
+ vol_table->entries[i].value = dep_table->entries[i].mvdd;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ result = phm_trim_voltage_table(vol_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to trim MVDD table.", return result);
+
+ return 0;
+}
+
+int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint32_t i;
+ int result;
+
+ PP_ASSERT_WITH_CODE((0 != dep_table->count),
+ "Voltage Dependency Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+ vol_table->count = dep_table->count;
+
+ for (i = 0; i < dep_table->count; i++) {
+ vol_table->entries[i].value = dep_table->entries[i].vddci;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ result = phm_trim_voltage_table(vol_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to trim VDDCI table.", return result);
+
+ return 0;
+}
+
+int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
+ phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+ int i = 0;
+
+ PP_ASSERT_WITH_CODE((0 != lookup_table->count),
+ "Voltage Lookup Table empty.", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE((NULL != vol_table),
+ "vol_table empty.", return -EINVAL);
+
+ vol_table->mask_low = 0;
+ vol_table->phase_delay = 0;
+
+ vol_table->count = lookup_table->count;
+
+ for (i = 0; i < vol_table->count; i++) {
+ vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
+ vol_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
+ struct pp_atomctrl_voltage_table *vol_table)
+{
+ unsigned int i, diff;
+
+ if (vol_table->count <= max_vol_steps)
+ return;
+
+ diff = vol_table->count - max_vol_steps;
+
+ for (i = 0; i < max_vol_steps; i++)
+ vol_table->entries[i] = vol_table->entries[i + diff];
+
+ vol_table->count = max_vol_steps;
+
+ return;
+}
+
+int phm_reset_single_dpm_table(void *table,
+ uint32_t count, int max)
+{
+ int i;
+
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ dpm_table->count = count > max ? max : count;
+
+ for (i = 0; i < dpm_table->count; i++)
+ dpm_table->dpm_level[i].enabled = false;
+
+ return 0;
+}
+
+void phm_setup_pcie_table_entry(
+ void *table,
+ uint32_t index, uint32_t pcie_gen,
+ uint32_t pcie_lanes)
+{
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+ dpm_table->dpm_level[index].value = pcie_gen;
+ dpm_table->dpm_level[index].param1 = pcie_lanes;
+ dpm_table->dpm_level[index].enabled = 1;
+}
+
+int32_t phm_get_dpm_level_enable_mask_value(void *table)
+{
+ int32_t i;
+ int32_t mask = 0;
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask = mask << 1;
+ if (dpm_table->dpm_level[i - 1].enabled)
+ mask |= 0x1;
+ else
+ mask &= 0xFFFFFFFE;
+ }
+
+ return mask;
+}
+
+uint8_t phm_get_voltage_index(
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
+{
+ uint8_t count = (uint8_t) (lookup_table->count);
+ uint8_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != lookup_table),
+ "Lookup Table empty.", return 0);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Lookup Table empty.", return 0);
+
+ for (i = 0; i < lookup_table->count; i++) {
+ /* find first voltage equal or bigger than requested */
+ if (lookup_table->entries[i].us_vdd >= voltage)
+ return i;
+ }
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage)
+{
+ uint8_t count = (uint8_t) (voltage_table->count);
+ uint8_t i = 0;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Table empty.", return 0;);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Voltage Table empty.", return 0;);
+
+ for (i = 0; i < count; i++) {
+ /* find first voltage bigger than requested */
+ if (voltage_table->entries[i].value >= voltage)
+ return i;
+ }
+
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
+uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
+{
+ uint32_t i;
+
+ for (i = 0; i < vddci_table->count; i++) {
+ if (vddci_table->entries[i].value >= vddci)
+ return vddci_table->entries[i].value;
+ }
+
+ pr_debug("vddci is larger than max value in vddci_table\n");
+ return vddci_table->entries[i-1].value;
+}
+
+int phm_find_boot_level(void *table,
+ uint32_t value, uint32_t *boot_level)
+{
+ int result = -EINVAL;
+ uint32_t i;
+ struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if (value == dpm_table->dpm_level[i].value) {
+ *boot_level = i;
+ result = 0;
+ }
+ }
+
+ return result;
+}
+
+int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *lookup_table,
+ uint16_t virtual_voltage_id, int32_t *sclk)
+{
+ uint8_t entry_id;
+ uint8_t voltage_id;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+ return -EINVAL;
+ }
+
+ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
+
+ return 0;
+}
+
+/**
+ * Initialize Dynamic State Adjustment Rule Settings
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ */
+int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
+{
+ uint32_t table_size;
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* initialize vddc_dep_on_dal_pwrl table */
+ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == table_clk_vlt) {
+ pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
+ return -ENOMEM;
+ } else {
+ table_clk_vlt->count = 4;
+ table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
+ table_clk_vlt->entries[0].v = 0;
+ table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
+ table_clk_vlt->entries[1].v = 720;
+ table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
+ table_clk_vlt->entries[2].v = 810;
+ table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
+ table_clk_vlt->entries[3].v = 900;
+ if (pptable_info != NULL)
+ pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
+ }
+
+ return 0;
+}
+
+uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
+{
+ uint32_t level = 0;
+
+ while (0 == (mask & (1 << level)))
+ level++;
+
+ return level;
+}
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_clock_voltage_dependency_table *table =
+ table_info->vddc_dep_on_dal_pwrl;
+ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+ uint32_t req_vddc = 0, req_volt, i;
+
+ if (!table || table->count <= 0
+ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+ return;
+
+ for (i = 0; i < table->count; i++) {
+ if (dal_power_level == table->entries[i].clk) {
+ req_vddc = table->entries[i].v;
+ break;
+ }
+ }
+
+ vddc_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < vddc_table->count; i++) {
+ if (req_vddc <= vddc_table->entries[i].vddc) {
+ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VddC_Request, req_volt);
+ return;
+ }
+ }
+ pr_err("DAL requested level can not"
+ " found a available voltage in VDDC DPM Table \n");
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+ uint32_t vol;
+ int ret = 0;
+
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (*voltage >= 2000 || *voltage == 0)
+ *voltage = 1150;
+ } else {
+ ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+ *voltage = (uint16_t)(vol/100);
+ }
+ return ret;
+}
+
+
+int phm_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t client_id = entry->client_id;
+ uint32_t src_id = entry->src_id;
+
+ if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
+ if (src_id == 230)
+ pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+ else if (src_id == 231)
+ pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+ else if (src_id == 83)
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+ } else if (client_id == SOC15_IH_CLIENTID_THM) {
+ if (src_id == 0)
+ pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+ else
+ pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
+ PCI_BUS_NUM(adev->pdev->devfn),
+ PCI_SLOT(adev->pdev->devfn),
+ PCI_FUNC(adev->pdev->devfn));
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
+ .process = phm_irq_process,
+};
+
+int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_irq_src *source =
+ kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+
+ if (!source)
+ return -ENOMEM;
+
+ source->funcs = &smu9_irq_funcs;
+
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_THM,
+ 0,
+ source);
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_THM,
+ 1,
+ source);
+
+ /* Register CTF(GPIO_19) interrupt */
+ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
+ SOC15_IH_CLIENTID_ROM_SMUIO,
+ 83,
+ source);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
new file mode 100644
index 0000000..d37d16e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU_HELPER_H_
+#define _SMU_HELPER_H_
+
+struct pp_atomctrl_voltage_table;
+struct pp_hwmgr;
+struct phm_ppt_v1_voltage_lookup_table;
+
+uint8_t convert_to_vid(uint16_t vddc);
+uint16_t convert_to_vddc(uint8_t vid);
+
+extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+extern int phm_wait_for_indirect_register_unequal(
+ struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port, uint32_t index,
+ uint32_t value, uint32_t mask);
+
+
+extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
+extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
+extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
+
+extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
+extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
+extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
+extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
+extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
+extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage);
+extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
+extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
+extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
+extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
+ uint16_t virtual_voltage_id, int32_t *sclk);
+extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
+extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
+
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage);
+
+extern uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size);
+
+extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
+ uint32_t value, uint32_t mask);
+
+extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask);
+
+int phm_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+
+int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
+
+#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define PHM_SET_FIELD(origval, reg, field, fieldval) \
+ (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
+ (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
+
+#define PHM_GET_FIELD(value, reg, field) \
+ (((value) & PHM_FIELD_MASK(reg, field)) >> \
+ PHM_FIELD_SHIFT(reg, field))
+
+
+/* Operations on named fields. */
+
+#define PHM_READ_FIELD(device, reg, field) \
+ PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
+
+#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
+ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
+#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
+ PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
+#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
+ cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
+ cgs_read_register(device, mm##reg), reg, field, fieldval))
+
+#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
+
+
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
+ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field) )
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ index, value, mask) \
+ phm_wait_for_register_unequal(hwmgr, \
+ index, value, mask)
+
+#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ mm##reg, value, mask)
+
+#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#endif /* _SMU_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f8d838c..7cbb56b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -28,7 +28,6 @@
#include "hwmgr.h"
#include "amd_powerplay.h"
-#include "vega10_smumgr.h"
#include "hardwaremanager.h"
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
@@ -44,11 +43,13 @@
#include "vega10_pptable.h"
#include "vega10_thermal.h"
#include "pp_debug.h"
-#include "pp_acpi.h"
#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
#include "ppinterrupt.h"
#include "pp_overdriver.h"
+#include "pp_thermal.h"
+
+#include "smuio/smuio_9_0_offset.h"
+#include "smuio/smuio_9_0_sh_mask.h"
#define VOLTAGE_SCALE 4
#define VOLTAGE_VID_OFFSET_SCALE1 625
@@ -105,8 +106,7 @@ const struct vega10_power_state *cast_const_phw_vega10_power_state(
static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->registry_data.sclk_dpm_key_disabled =
hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
@@ -183,12 +183,10 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
- struct cgs_system_info sys_info = {0};
- int result;
+ struct amdgpu_device *adev = hwmgr->adev;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -203,15 +201,11 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
-
- if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
- if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
@@ -299,8 +293,10 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
int i;
+ uint32_t sub_vendor_id, hw_revision;
+ struct amdgpu_device *adev = hwmgr->adev;
vega10_initialize_power_tune_defaults(hwmgr);
@@ -365,6 +361,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
FEATURE_FAN_CONTROL_BIT;
data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
+ data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
if (!data->registry_data.prefetcher_dpm_key_disabled)
data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
@@ -426,14 +423,23 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_VR0HOT].supported = true;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
+ hwmgr->smu_version = smum_get_argument(hwmgr);
/* ACG firmware has major version 5 */
- if ((data->smu_version & 0xff000000) == 0x5000000)
+ if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
if (data->registry_data.didt_support)
data->smu_features[GNLD_DIDT].supported = true;
+ hw_revision = adev->pdev->revision;
+ sub_vendor_id = adev->pdev->subsystem_vendor;
+
+ if ((hwmgr->chip_id == 0x6862 ||
+ hwmgr->chip_id == 0x6861 ||
+ hwmgr->chip_id == 0x6868) &&
+ (hw_revision == 0) &&
+ (sub_vendor_id != 0x1002))
+ data->smu_features[GNLD_PCC_LIMIT].supported = true;
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -475,7 +481,7 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
*/
static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint16_t vv_id;
uint32_t vddc = 0;
uint16_t i, j;
@@ -546,8 +552,7 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_info("Voltage value looks like a Leakage ID \
- but it's not patched\n");
+ pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
/**
@@ -667,7 +672,7 @@ static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
#ifdef PPLIB_VEGA10_EVV_SUPPORT
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
table_info->vddc_lookup_table, &(data->vddc_leakage));
@@ -701,18 +706,14 @@ static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
+ "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table is empty. \
- This table is mandatory", return -EINVAL);
+ "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
+ "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table is empty. \
- This table is mandatory", return -EINVAL);
+ "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
table_info->max_clock_voltage_on_ac.sclk =
allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
@@ -752,7 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data;
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
@@ -761,10 +762,12 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
- vega10_set_default_registry_data(hwmgr);
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
- data->workload_mask = 0xff;
/* need to set voltage control types before EVV patching */
data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
@@ -842,10 +845,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.engineClock = 500;
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- data->total_active_cus = sys_info.value;
+ data->total_active_cus = adev->gfx.cu_info.number;
/* Setup default Overdrive Fan control settings */
data->odn_fan_table.target_fan_speed =
hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
@@ -875,8 +875,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->low_sclk_interrupt_threshold = 0;
@@ -885,8 +884,7 @@ static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table table;
@@ -926,18 +924,9 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to set up led dpm config!",
return -EINVAL);
- return 0;
-}
-
-static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- uint32_t features_enabled;
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
- if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
- if (features_enabled & SMC_DPM_FEATURES)
- return true;
- }
- return false;
+ return 0;
}
/**
@@ -1098,7 +1087,7 @@ static void vega10_trim_voltage_table_to_fit_state_table(
*/
static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
int result;
@@ -1186,8 +1175,7 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
}
static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1236,8 +1224,7 @@ static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
*/
static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct vega10_single_dpm_table *dpm_table;
@@ -1385,14 +1372,12 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
- data->odn_dpm_table.odn_core_clock_dpm_levels.
- number_of_performance_levels = data->dpm_table.gfx_table.count;
+ data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
+ data->dpm_table.gfx_table.count;
for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
- data->odn_dpm_table.odn_core_clock_dpm_levels.
- performance_level_entries[i].clock =
+ data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
data->dpm_table.gfx_table.dpm_levels[i].value;
- data->odn_dpm_table.odn_core_clock_dpm_levels.
- performance_level_entries[i].enabled = true;
+ data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
}
data->odn_dpm_table.vdd_dependency_on_sclk.count =
@@ -1408,14 +1393,12 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dep_gfx_table->entries[i].cks_voffset;
}
- data->odn_dpm_table.odn_memory_clock_dpm_levels.
- number_of_performance_levels = data->dpm_table.mem_table.count;
+ data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
+ data->dpm_table.mem_table.count;
for (i = 0; i < data->dpm_table.mem_table.count; i++) {
- data->odn_dpm_table.odn_memory_clock_dpm_levels.
- performance_level_entries[i].clock =
+ data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
data->dpm_table.mem_table.dpm_levels[i].value;
- data->odn_dpm_table.odn_memory_clock_dpm_levels.
- performance_level_entries[i].enabled = true;
+ data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
}
data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
@@ -1441,8 +1424,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
*/
static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1483,8 +1465,7 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
{
int result = -1;
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_pcie_table *pcie_table =
&(data->dpm_table.pcie_table);
@@ -1535,8 +1516,7 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
table_info->vdd_dep_on_sclk;
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t gfx_max_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
@@ -1648,8 +1628,7 @@ uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
*/
static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
@@ -1723,8 +1702,7 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
uint32_t mem_clock, uint8_t *current_mem_vid,
PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
@@ -1782,8 +1760,7 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
*/
static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
@@ -1826,8 +1803,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
DSPCLK_e disp_clock)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)
@@ -1922,8 +1898,7 @@ static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
int result = -EINVAL;
@@ -1986,8 +1961,7 @@ static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *vclk_dpm_table =
&(data->dpm_table.vclk_table);
@@ -2058,8 +2032,7 @@ static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -2078,8 +2051,7 @@ static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -2270,8 +2242,7 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t agc_btc_response;
if (data->smu_features[GNLD_ACG].supported) {
@@ -2282,7 +2253,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
- vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
+ agc_btc_response = smum_get_argument(hwmgr);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
@@ -2303,8 +2274,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_ACG].supported &&
data->smu_features[GNLD_ACG].enabled)
@@ -2317,8 +2287,7 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
int result;
@@ -2353,8 +2322,7 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_AVFS].supported) {
if (enable) {
@@ -2385,14 +2353,14 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
uint32_t top32, bottom32;
struct phm_fuses_default fuse;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- vega10_read_arg_from_smc(hwmgr, &top32);
+ top32 = smum_get_argument(hwmgr);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- vega10_read_arg_from_smc(hwmgr, &bottom32);
+ bottom32 = smum_get_argument(hwmgr);
serial_number = ((uint64_t)bottom32 << 32) | top32;
@@ -2406,8 +2374,8 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
avfs_fuse_table->VFT2_b = fuse.VFT2_b;
avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
- result = vega10_copy_table_to_smc(hwmgr,
- (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
+ AVFSFUSETABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload FuseOVerride!",
);
@@ -2416,34 +2384,6 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
return result;
}
-static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
- uint32_t min_level;
-
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available)
- */
- if (dpm_table->count > 2)
- min_level = dpm_table->count - 2;
- else if (dpm_table->count == 2)
- min_level = 1;
- else
- min_level = 0;
-
- hwmgr->default_compute_power_profile.min_sclk =
- dpm_table->dpm_levels[min_level].value;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
- return 0;
-}
-
/**
* Initializes the SMC table and uploads it
*
@@ -2454,8 +2394,7 @@ static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
@@ -2578,8 +2517,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
vega10_populate_and_upload_avfs_fuse_override(hwmgr);
- result = vega10_copy_table_to_smc(hwmgr,
- (uint8_t *)pp_table, PPTABLE);
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
+
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -2587,14 +2526,13 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
vega10_acg_enable(hwmgr);
- vega10_save_default_power_profile(hwmgr);
return 0;
}
static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_THERMAL].supported) {
if (data->smu_features[GNLD_THERMAL].enabled)
@@ -2614,7 +2552,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_THERMAL].supported) {
if (!data->smu_features[GNLD_THERMAL].enabled)
@@ -2634,8 +2572,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
if (data->smu_features[GNLD_VR0HOT].supported) {
@@ -2663,8 +2600,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->registry_data.ulv_support) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2679,8 +2615,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->registry_data.ulv_support) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2695,8 +2630,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2735,8 +2669,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -2775,8 +2708,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
@@ -2813,8 +2745,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
*/
static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
for (i = 0; i < GNLD_DPM_MAX; i++) {
@@ -2864,34 +2795,37 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
return 0;
}
-static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
- int tmp_result, result = 0;
+ struct vega10_hwmgr *data = hwmgr->backend;
- tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to configure telemetry!",
- return tmp_result);
+ if (data->smu_features[GNLD_PCC_LIMIT].supported) {
+ if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
+ pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
+ "Attempt to Enable PCC Limit feature Failed!",
+ return -EINVAL);
+ data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
+ }
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ return 0;
+}
- tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(!tmp_result,
- "DPM is already running right , skipping re-enablement!",
- return 0);
+static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ int tmp_result, result = 0;
+
+ vega10_enable_disable_PCC_limit_feature(hwmgr, true);
- if ((data->smu_version == 0x001c2c00) ||
- (data->smu_version == 0x001c2d00)) {
- tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ if ((hwmgr->smu_version == 0x001c2c00) ||
+ (hwmgr->smu_version == 0x001c2d00))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
- PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to set package power PID!",
- return tmp_result);
- }
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3097,7 +3031,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct cgs_display_info info = {0};
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
int32_t count;
@@ -3129,9 +3063,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
@@ -3170,48 +3101,19 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- vega10_ps->performance_levels[1].gfx_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- vega10_ps->performance_levels[1].mem_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
+ disable_mclk_switching_for_frame_lock =
+ PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+ disable_mclk_switching_for_vr =
+ PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
- disable_mclk_switching = (info.display_count > 1) ||
- disable_mclk_switching_for_frame_lock ||
- disable_mclk_switching_for_vr ||
- force_mclk_high;
+ if (info.display_count == 0)
+ disable_mclk_switching = false;
+ else
+ disable_mclk_switching = (info.display_count > 1) ||
+ disable_mclk_switching_for_frame_lock ||
+ disable_mclk_switching_for_vr ||
+ force_mclk_high;
sclk = vega10_ps->performance_levels[0].gfx_clock;
mclk = vega10_ps->performance_levels[0].mem_clock;
@@ -3273,8 +3175,7 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
(const struct phm_set_power_state_input *)input;
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table =
&(data->dpm_table.gfx_table);
uint32_t sclk = vega10_ps->performance_levels
@@ -3362,8 +3263,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
(const struct phm_set_power_state_input *)input;
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t sclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock;
uint32_t mclk = vega10_ps->performance_levels
@@ -3388,11 +3288,9 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_count < dpm_table->gfx_table.count;
dpm_count++) {
dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
- data->odn_dpm_table.odn_core_clock_dpm_levels.
- performance_level_entries[dpm_count].enabled;
+ data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
dpm_table->gfx_table.dpm_levels[dpm_count].value =
- data->odn_dpm_table.odn_core_clock_dpm_levels.
- performance_level_entries[dpm_count].clock;
+ data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
}
}
@@ -3402,11 +3300,9 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_count < dpm_table->mem_table.count;
dpm_count++) {
dpm_table->mem_table.dpm_levels[dpm_count].enabled =
- data->odn_dpm_table.odn_memory_clock_dpm_levels.
- performance_level_entries[dpm_count].enabled;
+ data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
dpm_table->mem_table.dpm_levels[dpm_count].value =
- data->odn_dpm_table.odn_memory_clock_dpm_levels.
- performance_level_entries[dpm_count].clock;
+ data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
}
}
@@ -3416,8 +3312,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
DPMTABLE_OD_UPDATE_SCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
- "Failed to populate SCLK during \
- PopulateNewDPMClocksStates Function!",
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
}
@@ -3426,8 +3321,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
DPMTABLE_OD_UPDATE_MCLK)){
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
- "Failed to populate MCLK during \
- PopulateNewDPMClocksStates Function!",
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
return result);
}
} else {
@@ -3440,8 +3334,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_table->
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
value = sclk;
- if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
- PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (hwmgr->od_enabled) {
/* Need to do calculation based on the golden DPM table
* as the Heatmap GPU Clock axis is also based on
* the default values
@@ -3495,9 +3388,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
mem_table.dpm_levels[dpm_table->mem_table.count - 1].
value = mclk;
- if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
- PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
+ if (hwmgr->od_enabled) {
PP_ASSERT_WITH_CODE(
golden_dpm_table->mem_table.dpm_levels
[golden_dpm_table->mem_table.count - 1].value,
@@ -3544,8 +3435,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
data->apply_optimized_settings) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
- "Failed to populate SCLK during \
- PopulateNewDPMClocksStates Function!",
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
}
@@ -3553,8 +3443,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
- "Failed to populate MCLK during \
- PopulateNewDPMClocksStates Function!",
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
return result);
}
}
@@ -3599,8 +3488,7 @@ static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct vega10_power_state *vega10_ps)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t high_limit_count;
PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
@@ -3678,8 +3566,7 @@ static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t socclk_idx;
vega10_apply_dal_minimum_voltage_request(hwmgr);
@@ -3687,12 +3574,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.sclk_dpm_key_disabled) {
if (data->smc_state_table.gfx_boot_level !=
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
- data->smc_state_table.gfx_boot_level),
- "Failed to set soft min sclk index!",
- return -EINVAL);
+ data->smc_state_table.gfx_boot_level);
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
}
@@ -3703,19 +3587,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_min_level) {
if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- socclk_idx),
- "Failed to set soft min uclk index!",
- return -EINVAL);
+ socclk_idx);
} else {
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level),
- "Failed to set soft min uclk index!",
- return -EINVAL);
+ data->smc_state_table.mem_boot_level);
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
@@ -3727,20 +3605,16 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
vega10_apply_dal_minimum_voltage_request(hwmgr);
if (!data->registry_data.sclk_dpm_key_disabled) {
if (data->smc_state_table.gfx_max_level !=
- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr,
+ data->dpm_table.gfx_table.dpm_state.soft_max_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
- data->smc_state_table.gfx_max_level),
- "Failed to set soft max sclk index!",
- return -EINVAL);
+ data->smc_state_table.gfx_max_level);
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->smc_state_table.gfx_max_level;
}
@@ -3748,13 +3622,10 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_max_level !=
- data->dpm_table.mem_table.dpm_state.soft_max_level) {
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr,
- PPSMC_MSG_SetSoftMaxUclkByIndex,
- data->smc_state_table.mem_max_level),
- "Failed to set soft max mclk index!",
- return -EINVAL);
+ data->dpm_table.mem_table.dpm_state.soft_max_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxUclkByIndex,
+ data->smc_state_table.mem_max_level);
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->smc_state_table.mem_max_level;
}
@@ -3766,8 +3637,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
static int vega10_generate_dpm_level_enable_mask(
struct pp_hwmgr *hwmgr, const void *input)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
const struct phm_set_power_state_input *states =
(const struct phm_set_power_state_input *)input;
const struct vega10_power_state *vega10_ps =
@@ -3805,8 +3675,7 @@ static int vega10_generate_dpm_level_enable_mask(
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DPM_VCE].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -3822,16 +3691,11 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
- int result = 0;
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t low_sclk_interrupt_threshold = 0;
if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
- (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -3839,20 +3703,19 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
cpu_to_le32(low_sclk_interrupt_threshold);
/* This message will also enable SmcToHost Interrupt */
- result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
(uint32_t)low_sclk_interrupt_threshold);
}
- return result;
+ return 0;
}
static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
const void *input)
{
int tmp_result, result = 0;
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
@@ -3875,8 +3738,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
"Failed to update SCLK threshold!",
result = tmp_result);
- result = vega10_copy_table_to_smc(hwmgr,
- (uint8_t *)pp_table, PPTABLE);
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -3935,13 +3797,11 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
{
uint32_t value;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrPkgPwr),
- "Failed to get current package power!",
- return -EINVAL);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
+ value = smum_get_argument(hwmgr);
- vega10_read_arg_from_smc(hwmgr, &value);
/* power value is an integer */
+ memset(query, 0, sizeof *query);
query->average_gpu_power = value << 8;
return 0;
@@ -3951,34 +3811,37 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
uint32_t sclk_idx, mclk_idx, activity_percent = 0;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_dpm_table *dpm_table = &data->dpm_table;
int ret = 0;
+ uint32_t reg, val_vid;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
- if (!ret) {
- vega10_read_arg_from_smc(hwmgr, &sclk_idx);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
+ sclk_idx = smum_get_argument(hwmgr);
+ if (sclk_idx < dpm_table->gfx_table.count) {
*((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
*size = 4;
+ } else {
+ ret = -EINVAL;
}
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- if (!ret) {
- vega10_read_arg_from_smc(hwmgr, &mclk_idx);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
+ mclk_idx = smum_get_argument(hwmgr);
+ if (mclk_idx < dpm_table->mem_table.count) {
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
+ } else {
+ ret = -EINVAL;
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
- if (!ret) {
- vega10_read_arg_from_smc(hwmgr, &activity_percent);
- *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
- *size = 4;
- }
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ activity_percent = smum_get_argument(hwmgr);
+ *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
+ *size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
@@ -4000,17 +3863,27 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
}
break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ reg = soc15_get_register_offset(SMUIO_HWID, 0,
+ mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX,
+ mmSMUSVI0_PLANE0_CURRENTVID);
+ val_vid = (cgs_read_register(hwmgr->device, reg) &
+ SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
+ SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
+ *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
+ return 0;
default:
ret = -EINVAL;
break;
}
+
return ret;
}
-static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
+static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
}
@@ -4045,7 +3918,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (!result) {
clk_request = (clk_freq << 16) | clk_select;
- result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
clk_request);
}
@@ -4076,8 +3949,7 @@ static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
static int vega10_notify_smc_display_config_after_ps_adjustment(
struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *dpm_table =
&data->dpm_table.dcef_table;
struct phm_ppt_v2_information *table_info =
@@ -4114,10 +3986,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
+ smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR /100),
- "Attempt to set divider for DCEFCLK Failed!",);
+ min_clocks.dcefClockInSR / 100);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
@@ -4136,8 +4007,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
data->smc_state_table.gfx_max_level =
@@ -4159,8 +4029,7 @@ static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
data->smc_state_table.gfx_max_level =
@@ -4183,7 +4052,7 @@ static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
@@ -4216,6 +4085,8 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4257,6 +4128,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
+ if (hwmgr->pstate_sclk == 0)
+ vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
@@ -4289,12 +4163,13 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
}
+
return ret;
}
static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
return AMD_FAN_CTRL_MANUAL;
@@ -4354,7 +4229,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i;
clocks->num_levels = 0;
@@ -4478,7 +4353,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
uint32_t i;
@@ -4534,27 +4409,12 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- int i;
-
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
+ struct vega10_hwmgr *data = hwmgr->backend;
switch (type) {
case PP_SCLK:
- for (i = 0; i < 32; i++) {
- if (mask & (1 << i))
- break;
- }
- data->smc_state_table.gfx_boot_level = i;
-
- for (i = 31; i >= 0; i--) {
- if (mask & (1 << i))
- break;
- }
- data->smc_state_table.gfx_max_level = i;
+ data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
@@ -4566,17 +4426,8 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
break;
case PP_MCLK:
- for (i = 0; i < 32; i++) {
- if (mask & (1 << i))
- break;
- }
- data->smc_state_table.mem_boot_level = i;
-
- for (i = 31; i >= 0; i--) {
- if (mask & (1 << i))
- break;
- }
- data->smc_state_table.mem_max_level = i;
+ data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
@@ -4599,7 +4450,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
@@ -4610,14 +4461,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentGfxclkIndex),
- "Attempt to get current sclk index Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to read sclk index Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
+ now = smum_get_argument(hwmgr);
for (i = 0; i < sclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4628,14 +4473,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentUclkIndex),
- "Attempt to get current mclk index Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to read mclk index Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
+ now = smum_get_argument(hwmgr);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4643,20 +4482,14 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_PCIE:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentLinkIndex),
- "Attempt to get current mclk index Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
- &now),
- "Attempt to read mclk index Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
+ now = smum_get_argument(hwmgr);
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
- (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
- (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
+ (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
+ (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
+ (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
break;
default:
@@ -4667,7 +4500,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
int result = 0;
uint32_t num_turned_on_displays = 1;
Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
@@ -4675,8 +4508,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega10_copy_table_to_smc(hwmgr,
- (uint8_t *)wm_table, WMTABLE);
+ result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
@@ -4693,8 +4525,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DPM_UVD].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -4709,7 +4540,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->vce_power_gated = bgate;
vega10_enable_disable_vce_dpm(hwmgr, !bgate);
@@ -4717,7 +4548,7 @@ static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
data->uvd_power_gated = bgate;
vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
@@ -4770,7 +4601,7 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
static bool
vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
bool is_update_required = false;
struct cgs_display_info info = {0, 0, NULL};
@@ -4791,11 +4622,6 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
- tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
@@ -4826,12 +4652,14 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
tmp_result = vega10_acg_disable(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable acg!", result = tmp_result);
+
+ vega10_enable_disable_PCC_limit_feature(hwmgr, false);
return result;
}
static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
int result;
result = vega10_disable_dpm_tasks(hwmgr);
@@ -4843,71 +4671,9 @@ static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
return result;
}
-static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
- uint32_t *sclk_idx, uint32_t *mclk_idx,
- uint32_t min_sclk, uint32_t min_mclk)
-{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- struct vega10_dpm_table *dpm_table = &(data->dpm_table);
- uint32_t i;
-
- for (i = 0; i < dpm_table->gfx_table.count; i++) {
- if (dpm_table->gfx_table.dpm_levels[i].enabled &&
- dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
- *sclk_idx = i;
- break;
- }
- }
-
- for (i = 0; i < dpm_table->mem_table.count; i++) {
- if (dpm_table->mem_table.dpm_levels[i].enabled &&
- dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
- *mclk_idx = i;
- break;
- }
- }
-}
-
-static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- uint32_t sclk_idx = ~0, mclk_idx = ~0;
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
- return -EINVAL;
-
- vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
- request->min_sclk, request->min_mclk);
-
- if (sclk_idx != ~0) {
- if (!data->registry_data.sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- !smum_send_msg_to_smc_with_parameter(
- hwmgr,
- PPSMC_MSG_SetSoftMinGfxclkByIndex,
- sclk_idx),
- "Failed to set soft min sclk index!",
- return -EINVAL);
- }
-
- if (mclk_idx != ~0) {
- if (!data->registry_data.mclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- !smum_send_msg_to_smc_with_parameter(
- hwmgr,
- PPSMC_MSG_SetSoftMinUclkByIndex,
- mclk_idx),
- "Failed to set soft min mclk index!",
- return -EINVAL);
- }
-
- return 0;
-}
-
static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
@@ -4925,7 +4691,7 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
struct pp_power_state *ps;
@@ -4958,7 +4724,7 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
@@ -4977,7 +4743,7 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
struct pp_power_state *ps;
@@ -5035,34 +4801,87 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
return 0;
}
-static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *info)
+static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *thermal_data)
{
- struct cgs_irq_src_funcs *irq_src =
- (struct cgs_irq_src_funcs *)info;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)hwmgr->pptable;
- if (hwmgr->thermal_controller.ucType ==
- ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
- hwmgr->thermal_controller.ucType ==
- ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0xf, /* AMDGPU_IH_CLIENTID_THM */
- 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
- "Failed to register high thermal interrupt!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0xf, /* AMDGPU_IH_CLIENTID_THM */
- 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
- "Failed to register low thermal interrupt!",
- return -EINVAL);
- }
+ memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- /* Register CTF(GPIO_19) interrupt */
- PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
- 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
- 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
- "Failed to register CTF thermal interrupt!",
- return -EINVAL);
+ thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t i, size = 0;
+ static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
+ {90, 60, 0, 0,},
+ {70, 60, 0, 0,},
+ {70, 90, 0, 0,},
+ {30, 60, 0, 6,},
+ };
+ static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ static const char *title[6] = {"NUM",
+ "MODE_NAME",
+ "BUSY_SET_POINT",
+ "FPS",
+ "USE_RLC_BUSY",
+ "MIN_ACTIVE_LEVEL"};
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ title[1], title[2], title[3], title[4], title[5]);
+
+ for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
+ size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ profile_mode_setting[i][0], profile_mode_setting[i][1],
+ profile_mode_setting[i][2], profile_mode_setting[i][3]);
+ size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
+ profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ data->custom_profile_mode[0], data->custom_profile_mode[1],
+ data->custom_profile_mode[2], data->custom_profile_mode[3]);
+ return size;
+}
+
+static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint8_t busy_set_point;
+ uint8_t FPS;
+ uint8_t use_rlc_busy;
+ uint8_t min_active_level;
+
+ hwmgr->power_profile_mode = input[size];
+
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1<<hwmgr->power_profile_mode);
+
+ if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (size == 0 || size > 4)
+ return -EINVAL;
+
+ data->custom_profile_mode[0] = busy_set_point = input[0];
+ data->custom_profile_mode[1] = FPS = input[1];
+ data->custom_profile_mode[2] = use_rlc_busy = input[2];
+ data->custom_profile_mode[3] = min_active_level = input[3];
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetCustomGfxDpmParameters,
+ busy_set_point | FPS<<8 |
+ use_rlc_busy << 16 | min_active_level<<24);
+ }
return 0;
}
@@ -5085,7 +4904,6 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.notify_smc_display_config_after_ps_adjustment =
vega10_notify_smc_display_config_after_ps_adjustment,
.force_dpm_level = vega10_dpm_force_dpm_level,
- .get_temperature = vega10_thermal_get_temperature,
.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
.get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
@@ -5114,17 +4932,30 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
vega10_check_smc_update_required_for_display_configuration,
.power_off_asic = vega10_power_off_asic,
.disable_smc_firmware_ctf = vega10_thermal_disable_alert,
- .set_power_profile_state = vega10_set_power_profile_state,
.get_sclk_od = vega10_get_sclk_od,
.set_sclk_od = vega10_set_sclk_od,
.get_mclk_od = vega10_get_mclk_od,
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
.notify_cac_buffer_info = vega10_notify_cac_buffer_info,
- .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
+ .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
+ .register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega10_start_thermal_controller,
+ .get_power_profile_mode = vega10_get_power_profile_mode,
+ .set_power_profile_mode = vega10_set_power_profile_mode,
+ .set_power_limit = vega10_set_power_limit,
};
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint32_t feature_mask)
+{
+ int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
+ PPSMC_MSG_DisableSmuFeatures;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ msg, feature_mask);
+}
+
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 8f7358c..5339ea1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -66,6 +66,7 @@ enum {
GNLD_FEATURE_FAST_PPT_BIT,
GNLD_DIDT,
GNLD_ACG,
+ GNLD_PCC_LIMIT,
GNLD_FEATURES_MAX
};
@@ -189,12 +190,6 @@ struct vega10_vbios_boot_state {
uint32_t dcef_clock;
};
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
-
struct vega10_smc_state_table {
uint32_t soc_boot_level;
uint32_t gfx_boot_level;
@@ -379,17 +374,14 @@ struct vega10_hwmgr {
/* ---- Overdrive next setting ---- */
uint32_t apply_overdrive_next_settings_mask;
- /* ---- Workload Mask ---- */
- uint32_t workload_mask;
-
/* ---- SMU9 ---- */
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega10_smc_state_table smc_state_table;
uint32_t config_telemetry;
- uint32_t smu_version;
uint32_t acg_loop_state;
uint32_t mem_channels;
+ uint8_t custom_profile_mode[4];
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
@@ -448,5 +440,7 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint32_t feature_mask);
#endif /* _VEGA10_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h
index 8c55eaa..faf7ac0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h
@@ -24,21 +24,20 @@
#ifndef VEGA10_INC_H
#define VEGA10_INC_H
-#include "asic_reg/vega10/THM/thm_9_0_default.h"
-#include "asic_reg/vega10/THM/thm_9_0_offset.h"
-#include "asic_reg/vega10/THM/thm_9_0_sh_mask.h"
+#include "asic_reg/thm/thm_9_0_default.h"
+#include "asic_reg/thm/thm_9_0_offset.h"
+#include "asic_reg/thm/thm_9_0_sh_mask.h"
-#include "asic_reg/vega10/MP/mp_9_0_default.h"
-#include "asic_reg/vega10/MP/mp_9_0_offset.h"
-#include "asic_reg/vega10/MP/mp_9_0_sh_mask.h"
+#include "asic_reg/mp/mp_9_0_offset.h"
+#include "asic_reg/mp/mp_9_0_sh_mask.h"
-#include "asic_reg/vega10/GC/gc_9_0_default.h"
-#include "asic_reg/vega10/GC/gc_9_0_offset.h"
-#include "asic_reg/vega10/GC/gc_9_0_sh_mask.h"
+#include "asic_reg/gc/gc_9_0_default.h"
+#include "asic_reg/gc/gc_9_0_offset.h"
+#include "asic_reg/gc/gc_9_0_sh_mask.h"
-#include "asic_reg/vega10/NBIO/nbio_6_1_default.h"
-#include "asic_reg/vega10/NBIO/nbio_6_1_offset.h"
-#include "asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h"
+#include "asic_reg/nbio/nbio_6_1_default.h"
+#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_sh_mask.h"
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 598a194..ba63fae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -24,7 +24,6 @@
#include "hwmgr.h"
#include "vega10_hwmgr.h"
#include "vega10_powertune.h"
-#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
#include "pp_debug.h"
@@ -850,7 +849,6 @@ static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const
static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
{
uint32_t data;
- int result;
uint32_t en = (enable ? 1 : 0);
uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
@@ -924,24 +922,20 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
}
}
- if (enable) {
- /* For Vega10, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
- PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
- }
+ /* For Vega10, SMC does not support any mask yet. */
+ if (enable)
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+
}
static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
uint32_t num_se = 0, count, data;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
- if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
- num_se = sys_info.value;
+ num_se = adev->gfx.config.max_shader_engines;
cgs_enter_safe_mode(hwmgr->device, true);
@@ -989,13 +983,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
uint32_t num_se = 0, count, data;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
- if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
- num_se = sys_info.value;
+ num_se = adev->gfx.config.max_shader_engines;
cgs_enter_safe_mode(hwmgr->device, true);
@@ -1054,13 +1045,10 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
{
int result;
uint32_t num_se = 0, count, data;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
- if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
- num_se = sys_info.value;
+ num_se = adev->gfx.config.max_shader_engines;
cgs_enter_safe_mode(hwmgr->device, true);
@@ -1105,13 +1093,10 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
int result;
uint32_t num_se = 0;
uint32_t count, data;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
- if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
- num_se = sys_info.value;
+ num_se = adev->gfx.config.max_shader_engines;
cgs_enter_safe_mode(hwmgr->device, true);
@@ -1208,7 +1193,7 @@ static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DIDT].supported) {
if (data->smu_features[GNLD_DIDT].enabled)
@@ -1255,7 +1240,7 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DIDT].supported) {
if (!data->smu_features[GNLD_DIDT].enabled)
@@ -1301,7 +1286,7 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_tdp_table *tdp_table = table_info->tdp_table;
@@ -1340,11 +1325,10 @@ void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->registry_data.enable_pkg_pwr_tracking_feature)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetPptLimit, n);
return 0;
@@ -1352,15 +1336,15 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_tdp_table *tdp_table = table_info->tdp_table;
- uint32_t default_pwr_limit =
- (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
int result = 0;
+ hwmgr->default_power_limit = hwmgr->power_limit =
+ (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
+
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
@@ -1374,7 +1358,7 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
- result = vega10_set_power_limit(hwmgr, default_pwr_limit);
+ result = vega10_set_power_limit(hwmgr, hwmgr->power_limit);
PP_ASSERT_WITH_CODE(!result,
"Failed to set Default Power Limit in SMC!",
return result);
@@ -1385,8 +1369,7 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data =
- (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
@@ -1405,24 +1388,24 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
+static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
}
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
- int adjust_percent, result = 0;
+ int adjust_percent;
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- result = vega10_set_overdrive_target_percentage(hwmgr,
+ vega10_set_overdrive_target_percentage(hwmgr,
(uint32_t)adjust_percent);
}
- return result;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index f14c761..c61d074 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -267,10 +267,10 @@ static int init_over_drive_limits(
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
hwmgr->platform_descriptor.overdriveVDDCStep = 0;
- if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 &&
- hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACOverdriveSupport);
+ if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 ||
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) {
+ hwmgr->od_enabled = false;
+ pr_debug("OverDrive feature not support by VBIOS\n");
}
return 0;
@@ -688,9 +688,9 @@ static int get_dcefclk_voltage_dependency_table(
uint8_t num_entries;
struct phm_ppt_v1_clock_voltage_dependency_table
*clk_table;
- struct cgs_system_info sys_info = {0};
uint32_t dev_id;
uint32_t rev_id;
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -701,15 +701,8 @@ static int get_dcefclk_voltage_dependency_table(
* This DPM level was added to support 3DPM monitors @ 4K120Hz
*
*/
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- rev_id = (uint32_t)sys_info.value;
+ dev_id = adev->pdev->device;
+ rev_id = adev->pdev->revision;
if (dev_id == 0x6863 && rev_id == 0 &&
clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index dc3761b..9f18226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -23,7 +23,6 @@
#include "vega10_thermal.h"
#include "vega10_hwmgr.h"
-#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
#include "pp_soc15.h"
@@ -31,14 +30,8 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm),
- "Attempt to get current RPM from SMC Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
- current_rpm),
- "Attempt to read current RPM from SMC Failed!",
- return -1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
+ *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -96,7 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t tach_period;
uint32_t crystal_clock_freq;
int result = 0;
@@ -117,7 +110,7 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (tach_period == 0)
return -EINVAL;
- crystal_clock_freq = smu7_get_xclk(hwmgr);
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
*speed = 60 * crystal_clock_freq * 10000 / tach_period;
}
@@ -195,7 +188,7 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
*/
static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
@@ -212,7 +205,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
@@ -242,7 +235,7 @@ int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -1;
@@ -338,7 +331,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
if (!result) {
- crystal_clock_freq = smu7_get_xclk(hwmgr);
+ crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
@@ -386,9 +379,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
- uint32_t low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
+ int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
+ int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t val, reg;
@@ -409,7 +402,9 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
+ (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
+ (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
cgs_write_register(hwmgr->device, reg, val);
@@ -450,7 +445,7 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
*/
static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t val = 0;
uint32_t reg;
@@ -482,7 +477,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
*/
int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
@@ -531,7 +526,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
- struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *table = &(data->smc_state_table.pp_table);
if (!data->smu_features[GNLD_FAN_CONTROL].supported)
@@ -575,8 +570,9 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
table->FanStartTemp = hwmgr->thermal_controller.
advanceFanControlParameters.usZeroRPMStartTemperature;
- ret = vega10_copy_table_to_smc(hwmgr,
- (uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE);
+ ret = smum_smc_table_manager(hwmgr,
+ (uint8_t *)(&(data->smc_state_table.pp_table)),
+ PPTABLE, false);
if (ret)
pr_info("Failed to update Fan Control Table in PPTable!");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 82f10bd..21e7c4d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -73,7 +73,7 @@ extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
-extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
new file mode 100644
index 0000000..15ce1e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -0,0 +1,2090 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "hwmgr.h"
+#include "amd_powerplay.h"
+#include "vega12_smumgr.h"
+#include "hardwaremanager.h"
+#include "ppatomfwctrl.h"
+#include "atomfirmware.h"
+#include "cgs_common.h"
+#include "vega12_powertune.h"
+#include "vega12_inc.h"
+#include "pp_soc15.h"
+#include "pppcielanes.h"
+#include "vega12_hwmgr.h"
+#include "vega12_processpptables.h"
+#include "vega12_pptable.h"
+#include "vega12_thermal.h"
+#include "vega12_ppsmc.h"
+#include "pp_debug.h"
+#include "amd_pcie_helpers.h"
+#include "ppinterrupt.h"
+#include "pp_overdriver.h"
+#include "pp_thermal.h"
+
+
+static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask);
+static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
+ uint32_t *clock,
+ PPCLK_e clock_select,
+ bool max);
+
+static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
+ data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
+ data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
+ data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
+ data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
+
+ data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
+ data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+ data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
+
+ data->registry_data.disallowed_features = 0x0;
+ data->registry_data.od_state_in_dc_support = 0;
+ data->registry_data.skip_baco_hardware = 0;
+
+ data->registry_data.log_avfs_param = 0;
+ data->registry_data.sclk_throttle_low_notification = 1;
+ data->registry_data.force_dpm_high = 0;
+ data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
+
+ data->registry_data.didt_support = 0;
+ if (data->registry_data.didt_support) {
+ data->registry_data.didt_mode = 6;
+ data->registry_data.sq_ramping_support = 1;
+ data->registry_data.db_ramping_support = 0;
+ data->registry_data.td_ramping_support = 0;
+ data->registry_data.tcp_ramping_support = 0;
+ data->registry_data.dbr_ramping_support = 0;
+ data->registry_data.edc_didt_support = 1;
+ data->registry_data.gc_didt_support = 0;
+ data->registry_data.psm_didt_support = 0;
+ }
+
+ data->registry_data.pcie_lane_override = 0xff;
+ data->registry_data.pcie_speed_override = 0xff;
+ data->registry_data.pcie_clock_override = 0xffffffff;
+ data->registry_data.regulator_hot_gpio_support = 1;
+ data->registry_data.ac_dc_switch_gpio_support = 0;
+ data->registry_data.quick_transition_support = 0;
+ data->registry_data.zrpm_start_temp = 0xffff;
+ data->registry_data.zrpm_stop_temp = 0xffff;
+ data->registry_data.odn_feature_enable = 1;
+ data->registry_data.disable_water_mark = 0;
+ data->registry_data.disable_pp_tuning = 0;
+ data->registry_data.disable_xlpp_tuning = 0;
+ data->registry_data.disable_workload_policy = 0;
+ data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
+ data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
+ data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
+ data->registry_data.force_workload_policy_mask = 0;
+ data->registry_data.disable_3d_fs_detection = 0;
+ data->registry_data.fps_support = 1;
+ data->registry_data.disable_auto_wattman = 1;
+ data->registry_data.auto_wattman_debug = 0;
+ data->registry_data.auto_wattman_sample_period = 100;
+ data->registry_data.auto_wattman_threshold = 50;
+}
+
+static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDynamicPowerGating);
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UnTabledHardwareInterface);
+
+ if (data->registry_data.odn_feature_enable)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODNinACSupport);
+ else {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_OD6inACSupport);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_OD6PlusinACSupport);
+ }
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ActivityReporting);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ if (data->registry_data.od_state_in_dc_support) {
+ if (data->registry_data.odn_feature_enable)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODNinDCSupport);
+ else {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_OD6inDCSupport);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_OD6PlusinDCSupport);
+ }
+ }
+
+ if (data->registry_data.thermal_support
+ && data->registry_data.fuzzy_fan_control_support
+ && hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPowerManagement);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMC);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalPolicyDelay);
+
+ if (data->registry_data.force_dpm_high)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicUVDState);
+
+ if (data->registry_data.sclk_throttle_low_notification)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification);
+
+ /* power tune caps */
+ /* assume disabled */
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtSupport);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtEDCEnable);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GCEDC);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PSM);
+
+ if (data->registry_data.didt_support) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
+ if (data->registry_data.sq_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
+ if (data->registry_data.db_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
+ if (data->registry_data.td_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
+ if (data->registry_data.tcp_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
+ if (data->registry_data.dbr_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
+ if (data->registry_data.edc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
+ if (data->registry_data.gc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
+ if (data->registry_data.psm_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
+ }
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+
+ if (data->registry_data.ac_dc_switch_gpio_support) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
+ }
+
+ if (data->registry_data.quick_transition_support) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_LowestUclkReservedForUlv);
+ if (data->lowest_uclk_reserved_for_ulv == 1)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_LowestUclkReservedForUlv);
+ }
+
+ if (data->registry_data.custom_fan_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CustomFanControlSupport);
+
+ return 0;
+}
+
+static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int i;
+
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ FEATURE_DPM_PREFETCHER_BIT;
+ data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
+ FEATURE_DPM_GFXCLK_BIT;
+ data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
+ FEATURE_DPM_UCLK_BIT;
+ data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
+ FEATURE_DPM_SOCCLK_BIT;
+ data->smu_features[GNLD_DPM_UVD].smu_feature_id =
+ FEATURE_DPM_UVD_BIT;
+ data->smu_features[GNLD_DPM_VCE].smu_feature_id =
+ FEATURE_DPM_VCE_BIT;
+ data->smu_features[GNLD_ULV].smu_feature_id =
+ FEATURE_ULV_BIT;
+ data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
+ FEATURE_DPM_MP0CLK_BIT;
+ data->smu_features[GNLD_DPM_LINK].smu_feature_id =
+ FEATURE_DPM_LINK_BIT;
+ data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
+ FEATURE_DPM_DCEFCLK_BIT;
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
+ FEATURE_DS_GFXCLK_BIT;
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
+ FEATURE_DS_SOCCLK_BIT;
+ data->smu_features[GNLD_DS_LCLK].smu_feature_id =
+ FEATURE_DS_LCLK_BIT;
+ data->smu_features[GNLD_PPT].smu_feature_id =
+ FEATURE_PPT_BIT;
+ data->smu_features[GNLD_TDC].smu_feature_id =
+ FEATURE_TDC_BIT;
+ data->smu_features[GNLD_THERMAL].smu_feature_id =
+ FEATURE_THERMAL_BIT;
+ data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
+ FEATURE_GFX_PER_CU_CG_BIT;
+ data->smu_features[GNLD_RM].smu_feature_id =
+ FEATURE_RM_BIT;
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
+ FEATURE_DS_DCEFCLK_BIT;
+ data->smu_features[GNLD_ACDC].smu_feature_id =
+ FEATURE_ACDC_BIT;
+ data->smu_features[GNLD_VR0HOT].smu_feature_id =
+ FEATURE_VR0HOT_BIT;
+ data->smu_features[GNLD_VR1HOT].smu_feature_id =
+ FEATURE_VR1HOT_BIT;
+ data->smu_features[GNLD_FW_CTF].smu_feature_id =
+ FEATURE_FW_CTF_BIT;
+ data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
+ FEATURE_LED_DISPLAY_BIT;
+ data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
+ FEATURE_FAN_CONTROL_BIT;
+ data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
+ data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
+ data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
+ data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
+
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ data->smu_features[i].smu_feature_bitmap =
+ (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
+ data->smu_features[i].allowed =
+ ((data->registry_data.disallowed_features >> i) & 1) ?
+ false : true;
+ }
+}
+
+static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
+
+ return 0;
+}
+
+static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega12_hwmgr *data;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ vega12_set_default_registry_data(hwmgr);
+
+ data->disable_dpm_mask = 0xff;
+ data->workload_mask = 0xff;
+
+ /* need to set voltage control types before EVV patching */
+ data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
+ data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
+
+ data->water_marks_bitmap = 0;
+ data->avfs_exist = false;
+
+ vega12_set_features_platform_caps(hwmgr);
+
+ vega12_init_dpm_defaults(hwmgr);
+
+ /* Parse pptable data read from VBIOS */
+ vega12_set_private_data_based_on_pptable(hwmgr);
+
+ data->is_tlu_enabled = false;
+
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ VEGA12_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+
+ data->total_active_cus = adev->gfx.cu_info.number;
+ /* Setup default Overdrive Fan control settings */
+ data->odn_fan_table.target_fan_speed =
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
+ data->odn_fan_table.target_temperature =
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
+ data->odn_fan_table.min_performance_clock =
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
+ data->odn_fan_table.min_fan_limit =
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+
+ return result;
+}
+
+static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->low_sclk_interrupt_threshold = 0;
+
+ return 0;
+}
+
+static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
+ "Failed to init sclk threshold!",
+ return -EINVAL);
+
+ return 0;
+}
+
+/*
+ * @fn vega12_init_dpm_state
+ * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
+ *
+ * @param dpm_state - the address of the DPM Table to initiailize.
+ * @return None.
+ */
+static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0xff;
+ dpm_state->soft_max_level = 0xff;
+ dpm_state->hard_min_level = 0xff;
+ dpm_state->hard_max_level = 0xff;
+}
+
+static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkID, uint32_t *num_dpm_level)
+{
+ int result;
+ /*
+ * SMU expects the Clock ID to be in the top 16 bits.
+ * Lower 16 bits specify the level however 0xFF is a
+ * special argument the returns the total number of levels
+ */
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
+ "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
+ return -EINVAL);
+
+ result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+
+ PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
+ "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
+ "[GetNumberDPMLevel] Number of CLK Levels is zero!",
+ return -EINVAL);
+
+ return result;
+}
+
+static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkID, uint32_t index, uint32_t *clock)
+{
+ int result;
+
+ /*
+ *SMU expects the Clock ID to be in the top 16 bits.
+ *Lower 16 bits specify the level
+ */
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+ "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
+ return -EINVAL);
+
+ result = vega12_read_arg_from_smc(hwmgr, clock);
+
+ PP_ASSERT_WITH_CODE(*clock != 0,
+ "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
+ return -EINVAL);
+
+ return result;
+}
+
+/*
+ * This function is to initialize all DPM state tables
+ * for SMU based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ uint32_t num_levels, i, clock;
+
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ struct vega12_single_dpm_table *dpm_table;
+
+ memset(&data->dpm_table, 0, sizeof(data->dpm_table));
+
+ /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+ dpm_table = &(data->dpm_table.soc_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_SOCCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.gfx_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_GFXCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ dpm_table = &(data->dpm_table.mem_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_UCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.eclk_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_ECLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.vclk_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_VCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.dclk_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
+ &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_DCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* Assume there is no headless Vega12 for now */
+ dpm_table = &(data->dpm_table.dcef_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+ PPCLK_DCEFCLK, &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_DCEFCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.pixel_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+ PPCLK_PIXCLK, &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_PIXCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.display_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+ PPCLK_DISPCLK, &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_DISPCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ dpm_table = &(data->dpm_table.phy_table);
+
+ PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
+ PPCLK_PHYCLK, &num_levels) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
+ return -EINVAL);
+
+ dpm_table->count = num_levels;
+
+ for (i = 0; i < num_levels; i++) {
+ PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
+ PPCLK_PHYCLK, i, &clock) == 0,
+ "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
+ return -EINVAL);
+
+ dpm_table->dpm_levels[i].value = clock;
+ }
+
+ vega12_init_dpm_state(&(dpm_table->dpm_state));
+
+ /* save a copy of the default DPM table */
+ memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+ sizeof(struct vega12_dpm_table));
+
+ return 0;
+}
+
+#if 0
+static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
+ uint32_t min_level;
+
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available)
+ */
+ if (dpm_table->count > 2)
+ min_level = dpm_table->count - 2;
+ else if (dpm_table->count == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+
+ hwmgr->default_compute_power_profile.min_sclk =
+ dpm_table->dpm_levels[min_level].value;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+
+ return 0;
+}
+#endif
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data (PowerState)
+* @return always 0
+*/
+static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+
+ result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
+ if (!result) {
+ data->vbios_boot_state.vddc = boot_up_values.usVddc;
+ data->vbios_boot_state.vddci = boot_up_values.usVddci;
+ data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
+ data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
+ data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
+ data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
+ data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
+ data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ }
+
+ memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
+
+ result = vega12_copy_table_to_smc(hwmgr,
+ (uint8_t *)pp_table, TABLE_PPTABLE);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload PPtable!", return result);
+
+ return 0;
+}
+
+static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int i;
+ uint32_t allowed_features_low = 0, allowed_features_high = 0;
+
+ for (i = 0; i < GNLD_FEATURES_MAX; i++)
+ if (data->smu_features[i].allowed)
+ data->smu_features[i].smu_feature_id > 31 ?
+ (allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
+ (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+ "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+ "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
+ return -1);
+
+ return 0;
+}
+
+static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint64_t features_enabled;
+ int i;
+ bool enabled;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+ "[EnableAllSMUFeatures] Failed to enable all smu features!",
+ return -1);
+
+ if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
+ data->smu_features[i].enabled = enabled;
+ data->smu_features[i].supported = enabled;
+ PP_ASSERT(
+ !data->smu_features[i].allowed || enabled,
+ "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint64_t features_enabled;
+ int i;
+ bool enabled;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+ "[DisableAllSMUFeatures] Failed to disable all smu features!",
+ return -1);
+
+ if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
+ data->smu_features[i].enabled = enabled;
+ data->smu_features[i].supported = enabled;
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_odn_initialize_default_settings(
+ struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, 0);
+
+ result = vega12_set_allowed_featuresmask(hwmgr);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
+ return result);
+
+ tmp_result = vega12_init_smc_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to initialize SMC table!",
+ result = tmp_result);
+
+ result = vega12_enable_all_smu_features(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to enable all smu features!",
+ return result);
+
+ tmp_result = vega12_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to power control set level!",
+ result = tmp_result);
+
+ result = vega12_odn_initialize_default_settings(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to power control set level!",
+ return result);
+
+ result = vega12_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to setup default DPM tables!",
+ return result);
+ return result;
+}
+
+static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps)
+{
+ return 0;
+}
+
+static uint32_t vega12_find_lowest_dpm_level(
+ struct vega12_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+
+ return i;
+}
+
+static uint32_t vega12_find_highest_dpm_level(
+ struct vega12_single_dpm_table *table)
+{
+ uint32_t i = 0;
+
+ if (table->count <= MAX_REGULAR_DPM_NUMBER) {
+ for (i = table->count; i > 0; i--) {
+ if (table->dpm_levels[i - 1].enabled)
+ return i - 1;
+ }
+ } else {
+ pr_info("DPM Table Has Too Many Entries!");
+ return MAX_REGULAR_DPM_NUMBER - 1;
+ }
+
+ return i;
+}
+
+static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+
+int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_VCE].supported) {
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ enable,
+ data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
+ "Attempt to Enable/Disable DPM VCE Failed!",
+ return -1);
+ data->smu_features[GNLD_DPM_VCE].enabled = enable;
+ }
+
+ return 0;
+}
+
+static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t gfx_clk;
+
+ if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
+ return -1;
+
+ if (low)
+ PP_ASSERT_WITH_CODE(
+ vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
+ "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
+ return -1);
+ else
+ PP_ASSERT_WITH_CODE(
+ vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
+ "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
+ return -1);
+
+ return (gfx_clk * 100);
+}
+
+static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t mem_clk;
+
+ if (!data->smu_features[GNLD_DPM_UCLK].enabled)
+ return -1;
+
+ if (low)
+ PP_ASSERT_WITH_CODE(
+ vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
+ "[GetMclks]: fail to get min PPCLK_UCLK\n",
+ return -1);
+ else
+ PP_ASSERT_WITH_CODE(
+ vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
+ "[GetMclks]: fail to get max PPCLK_UCLK\n",
+ return -1);
+
+ return (mem_clk * 100);
+}
+
+static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
+ struct pp_gpu_power *query)
+{
+#if 0
+ uint32_t value;
+
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetCurrPkgPwr),
+ "Failed to get current package power!",
+ return -EINVAL);
+
+ vega12_read_arg_from_smc(hwmgr, &value);
+ /* power value is an integer */
+ query->average_gpu_power = value << 8;
+#endif
+ return 0;
+}
+
+static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
+{
+ uint32_t gfx_clk = 0;
+
+ *gfx_freq = 0;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(
+ vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
+ "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
+ return -1);
+
+ *gfx_freq = gfx_clk * 100;
+
+ return 0;
+}
+
+static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
+{
+ uint32_t mem_clk = 0;
+
+ *mclk_freq = 0;
+
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+ "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(
+ vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
+ "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
+ return -1);
+
+ *mclk_freq = mem_clk * 100;
+
+ return 0;
+}
+
+static int vega12_get_current_activity_percent(
+ struct pp_hwmgr *hwmgr,
+ uint32_t *activity_percent)
+{
+ int ret = 0;
+ uint32_t current_activity = 50;
+
+#if 0
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ if (!ret) {
+ ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
+ if (!ret) {
+ if (current_activity > 100) {
+ PP_ASSERT(false,
+ "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+ current_activity = 100;
+ }
+ } else
+ PP_ASSERT(false,
+ "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
+ } else
+ PP_ASSERT(false,
+ "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
+#endif
+ *activity_percent = current_activity;
+
+ return ret;
+}
+
+static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ void *value, int *size)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
+ if (!ret)
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
+ if (!ret)
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
+ if (!ret)
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ if (*size < sizeof(struct pp_gpu_power))
+ ret = -EINVAL;
+ else {
+ *size = sizeof(struct pp_gpu_power);
+ ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
+ bool has_disp)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+ has_disp ? 0 : 1);
+
+ return 0;
+}
+
+int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
+ struct pp_display_clock_request *clock_req)
+{
+ int result = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ enum amd_pp_clock_type clk_type = clock_req->clock_type;
+ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+ PPCLK_e clk_select = 0;
+ uint32_t clk_request = 0;
+
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+ clk_freq = clock_req->clock_freq_in_khz / 100;
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+ clk_select = PPCLK_DISPCLK;
+ break;
+ case amd_pp_pixel_clock:
+ clk_select = PPCLK_PIXCLK;
+ break;
+ case amd_pp_phy_clock:
+ clk_select = PPCLK_PHYCLK;
+ break;
+ default:
+ pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
+ result = -1;
+ break;
+ }
+
+ if (!result) {
+ clk_request = (clk_select << 16) | clk_freq;
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ clk_request);
+ }
+ }
+
+ return result;
+}
+
+static int vega12_notify_smc_display_config_after_ps_adjustment(
+ struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t num_active_disps = 0;
+ struct cgs_display_info info = {0};
+ struct PP_Clocks min_clocks = {0};
+ struct pp_display_clock_request clock_req;
+ uint32_t clk_request;
+
+ info.mode_info = NULL;
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_active_disps = info.display_count;
+ if (num_active_disps > 1)
+ vega12_notify_smc_display_change(hwmgr, false);
+ else
+ vega12_notify_smc_display_change(hwmgr, true);
+
+ min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
+ min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
+ min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+ if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+ if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
+ if (data->smu_features[GNLD_DS_DCEFCLK].supported)
+ PP_ASSERT_WITH_CODE(
+ !smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
+ min_clocks.dcefClockInSR /100),
+ "Attempt to set divider for DCEFCLK Failed!",
+ return -1);
+ } else {
+ pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
+ }
+ }
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
+ "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
+ return -1);
+ data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
+ }
+
+ return 0;
+}
+
+static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->smc_state_table.gfx_boot_level =
+ data->smc_state_table.gfx_max_level =
+ vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+ data->smc_state_table.mem_boot_level =
+ data->smc_state_table.mem_max_level =
+ vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to highest!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -1);
+
+ return 0;
+}
+
+static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->smc_state_table.gfx_boot_level =
+ data->smc_state_table.gfx_max_level =
+ vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+ data->smc_state_table.mem_boot_level =
+ data->smc_state_table.mem_max_level =
+ vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to highest!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -1);
+
+ return 0;
+
+}
+
+static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->smc_state_table.gfx_boot_level =
+ vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+ data->smc_state_table.gfx_max_level =
+ vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+ data->smc_state_table.mem_boot_level =
+ vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ data->smc_state_table.mem_max_level =
+ vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload DPM Bootup Levels!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload DPM Max Levels!",
+ return -1);
+ return 0;
+}
+
+#if 0
+static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
+{
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
+ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
+ *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+ *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ }
+ return 0;
+}
+#endif
+
+static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega12_fan_ctrl_start_smc_fan_control(hwmgr);
+ break;
+ default:
+ break;
+ }
+}
+
+static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+#if 0
+ uint32_t sclk_mask = 0;
+ uint32_t mclk_mask = 0;
+ uint32_t soc_mask = 0;
+#endif
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = vega12_force_dpm_highest(hwmgr);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = vega12_force_dpm_lowest(hwmgr);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = vega12_unforce_dpm_levels(hwmgr);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+#if 0
+ ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+ vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+ vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+#endif
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+#if 0
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+ }
+#endif
+ return ret;
+}
+
+static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
+ return AMD_FAN_CTRL_MANUAL;
+ else
+ return AMD_FAN_CTRL_AUTO;
+}
+
+static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
+ struct amd_pp_simple_clock_info *info)
+{
+#if 0
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)hwmgr->pptable;
+ struct phm_clock_and_voltage_limits *max_limits =
+ &table_info->max_clock_voltage_on_ac;
+
+ info->engine_max_clock = max_limits->sclk;
+ info->memory_max_clock = max_limits->mclk;
+#endif
+ return 0;
+}
+
+static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
+ uint32_t *clock,
+ PPCLK_e clock_select,
+ bool max)
+{
+ int result;
+ *clock = 0;
+
+ if (max) {
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
+ "[GetClockRanges] Failed to get max clock from SMC!",
+ return -1);
+ result = vega12_read_arg_from_smc(hwmgr, clock);
+ } else {
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
+ "[GetClockRanges] Failed to get min clock from SMC!",
+ return -1);
+ result = vega12_read_arg_from_smc(hwmgr, clock);
+ }
+
+ return result;
+}
+
+static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t ucount;
+ int i;
+ struct vega12_single_dpm_table *dpm_table;
+
+ if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
+ return -1;
+
+ dpm_table = &(data->dpm_table.gfx_table);
+ ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
+ VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 100;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ clocks->num_levels = ucount;
+
+ return 0;
+}
+
+static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
+ uint32_t clock)
+{
+ return 25;
+}
+
+static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t ucount;
+ int i;
+ struct vega12_single_dpm_table *dpm_table;
+ if (!data->smu_features[GNLD_DPM_UCLK].enabled)
+ return -1;
+
+ dpm_table = &(data->dpm_table.mem_table);
+ ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
+ VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 100;
+
+ clocks->data[i].latency_in_us =
+ data->mclk_latency_table.entries[i].latency =
+ vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
+ }
+
+ clocks->num_levels = data->mclk_latency_table.count = ucount;
+
+ return 0;
+}
+
+static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t ucount;
+ int i;
+ struct vega12_single_dpm_table *dpm_table;
+
+ if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
+ return -1;
+
+
+ dpm_table = &(data->dpm_table.dcef_table);
+ ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
+ VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 100;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ clocks->num_levels = ucount;
+
+ return 0;
+}
+
+static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t ucount;
+ int i;
+ struct vega12_single_dpm_table *dpm_table;
+
+ if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
+ return -1;
+
+
+ dpm_table = &(data->dpm_table.soc_table);
+ ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
+ VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+
+ for (i = 0; i < ucount; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 100;
+
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ clocks->num_levels = ucount;
+
+ return 0;
+
+}
+
+static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret;
+
+ switch (type) {
+ case amd_pp_sys_clock:
+ ret = vega12_get_sclks(hwmgr, clocks);
+ break;
+ case amd_pp_mem_clock:
+ ret = vega12_get_memclocks(hwmgr, clocks);
+ break;
+ case amd_pp_dcef_clock:
+ ret = vega12_get_dcefclocks(hwmgr, clocks);
+ break;
+ case amd_pp_soc_clock:
+ ret = vega12_get_socclocks(hwmgr, clocks);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ clocks->num_levels = 0;
+
+ return 0;
+}
+
+static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ Watermarks_t *table = &(data->smc_state_table.water_marks_table);
+ int result = 0;
+ uint32_t i;
+
+ if (!data->registry_data.disable_water_mark &&
+ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
+ data->smu_features[GNLD_DPM_SOCCLK].supported) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
+ wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ }
+ data->water_marks_bitmap |= WaterMarksExist;
+ data->water_marks_bitmap &= ~WaterMarksLoaded;
+ }
+
+ return result;
+}
+
+static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
+ break;
+
+ case PP_MCLK:
+ data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
+
+ break;
+
+ case PP_PCIE:
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ int i, now, size = 0;
+ struct pp_clock_levels_with_latency clocks;
+
+ switch (type) {
+ case PP_SCLK:
+ PP_ASSERT_WITH_CODE(
+ vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
+ "Attempt to get current gfx clk Failed!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ vega12_get_sclks(hwmgr, &clocks) == 0,
+ "Attempt to get gfx clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 100,
+ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ break;
+
+ case PP_MCLK:
+ PP_ASSERT_WITH_CODE(
+ vega12_get_current_mclk_freq(hwmgr, &now) == 0,
+ "Attempt to get current mclk freq Failed!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ vega12_get_memclocks(hwmgr, &clocks) == 0,
+ "Attempt to get memory clk levels Failed!",
+ return -1);
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 100,
+ (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+ break;
+
+ default:
+ break;
+ }
+ return size;
+}
+
+static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ uint32_t num_turned_on_displays = 1;
+ Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
+ struct cgs_display_info info = {0};
+
+ if ((data->water_marks_bitmap & WaterMarksExist) &&
+ !(data->water_marks_bitmap & WaterMarksLoaded)) {
+ result = vega12_copy_table_to_smc(hwmgr,
+ (uint8_t *)wm_table, TABLE_WATERMARKS);
+ PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+ data->water_marks_bitmap |= WaterMarksLoaded;
+ }
+
+ if ((data->water_marks_bitmap & WaterMarksExist) &&
+ data->smu_features[GNLD_DPM_DCEFCLK].supported &&
+ data->smu_features[GNLD_DPM_SOCCLK].supported) {
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_turned_on_displays = info.display_count;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
+ }
+
+ return result;
+}
+
+int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UVD].supported) {
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ enable,
+ data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
+ "Attempt to Enable/Disable DPM UVD Failed!",
+ return -1);
+ data->smu_features[GNLD_DPM_UVD].enabled = enable;
+ }
+
+ return 0;
+}
+
+static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->vce_power_gated = bgate;
+ vega12_enable_disable_vce_dpm(hwmgr, !bgate);
+}
+
+static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = bgate;
+ vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static bool
+vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0, 0, NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+
+ if (data->registry_data.gfx_clk_deep_sleep_support) {
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
+ is_update_required = true;
+ }
+
+ return is_update_required;
+}
+
+static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = vega12_disable_all_smu_features(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable all smu features!", result = tmp_result);
+
+ return result;
+}
+
+static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int result;
+
+ result = vega12_disable_dpm_tasks(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "[disable_dpm_tasks] Failed to disable DPM!",
+ );
+ data->water_marks_bitmap &= ~(WaterMarksLoaded);
+
+ return result;
+}
+
+#if 0
+static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
+ uint32_t *sclk_idx, uint32_t *mclk_idx,
+ uint32_t min_sclk, uint32_t min_mclk)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_dpm_table *dpm_table = &(data->dpm_table);
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->gfx_table.count; i++) {
+ if (dpm_table->gfx_table.dpm_levels[i].enabled &&
+ dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
+ *sclk_idx = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < dpm_table->mem_table.count; i++) {
+ if (dpm_table->mem_table.dpm_levels[i].enabled &&
+ dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
+ *mclk_idx = i;
+ break;
+ }
+ }
+}
+#endif
+
+#if 0
+static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ return 0;
+}
+
+static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.gfx_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ return 0;
+}
+
+static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+ struct vega12_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mem_table);
+ int value;
+
+ value = (mclk_table->dpm_levels
+ [mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels
+ [golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ return 0;
+}
+#endif
+
+static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrHigh,
+ virtual_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrLow,
+ virtual_addr_low);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrHigh,
+ mc_addr_hi);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrLow,
+ mc_addr_low);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramSize,
+ size);
+ return 0;
+}
+
+static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *thermal_data)
+{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+
+ memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ thermal_data->max = pptable_information->us_software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .backend_init = vega12_hwmgr_backend_init,
+ .backend_fini = vega12_hwmgr_backend_fini,
+ .asic_setup = vega12_setup_asic_task,
+ .dynamic_state_management_enable = vega12_enable_dpm_tasks,
+ .dynamic_state_management_disable = vega12_disable_dpm_tasks,
+ .patch_boot_state = vega12_patch_boot_state,
+ .get_sclk = vega12_dpm_get_sclk,
+ .get_mclk = vega12_dpm_get_mclk,
+ .notify_smc_display_config_after_ps_adjustment =
+ vega12_notify_smc_display_config_after_ps_adjustment,
+ .force_dpm_level = vega12_dpm_force_dpm_level,
+ .stop_thermal_controller = vega12_thermal_stop_thermal_controller,
+ .get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
+ .reset_fan_speed_to_default =
+ vega12_fan_ctrl_reset_fan_speed_to_default,
+ .get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_control_mode = vega12_set_fan_control_mode,
+ .get_fan_control_mode = vega12_get_fan_control_mode,
+ .read_sensor = vega12_read_sensor,
+ .get_dal_power_level = vega12_get_dal_power_level,
+ .get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
+ .set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = vega12_display_clock_voltage_request,
+ .force_clock_level = vega12_force_clock_level,
+ .print_clock_levels = vega12_print_clock_levels,
+ .display_config_changed = vega12_display_configuration_changed_task,
+ .powergate_uvd = vega12_power_gate_uvd,
+ .powergate_vce = vega12_power_gate_vce,
+ .check_smc_update_required_for_display_configuration =
+ vega12_check_smc_update_required_for_display_configuration,
+ .power_off_asic = vega12_power_off_asic,
+ .disable_smc_firmware_ctf = vega12_thermal_disable_alert,
+#if 0
+ .set_power_profile_state = vega12_set_power_profile_state,
+ .get_sclk_od = vega12_get_sclk_od,
+ .set_sclk_od = vega12_set_sclk_od,
+ .get_mclk_od = vega12_get_mclk_od,
+ .set_mclk_od = vega12_set_mclk_od,
+#endif
+ .notify_cac_buffer_info = vega12_notify_cac_buffer_info,
+ .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
+ .register_irq_handlers = smu9_register_irq_handlers,
+ .start_thermal_controller = vega12_start_thermal_controller,
+};
+
+int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+ hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
+ hwmgr->pptable_func = &vega12_pptable_funcs;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
new file mode 100644
index 0000000..bc98b1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _VEGA12_HWMGR_H_
+#define _VEGA12_HWMGR_H_
+
+#include "hwmgr.h"
+#include "vega12/smu9_driver_if.h"
+#include "ppatomfwctrl.h"
+
+#define VEGA12_MAX_HARDWARE_POWERLEVELS 2
+
+#define WaterMarksExist 1
+#define WaterMarksLoaded 2
+
+#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8
+#define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
+#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
+#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
+
+enum
+{
+ GNLD_DPM_PREFETCHER = 0,
+ GNLD_DPM_GFXCLK,
+ GNLD_DPM_UCLK,
+ GNLD_DPM_SOCCLK,
+ GNLD_DPM_UVD,
+ GNLD_DPM_VCE,
+ GNLD_ULV,
+ GNLD_DPM_MP0CLK,
+ GNLD_DPM_LINK,
+ GNLD_DPM_DCEFCLK,
+ GNLD_DS_GFXCLK,
+ GNLD_DS_SOCCLK,
+ GNLD_DS_LCLK,
+ GNLD_PPT,
+ GNLD_TDC,
+ GNLD_THERMAL,
+ GNLD_GFX_PER_CU_CG,
+ GNLD_RM,
+ GNLD_DS_DCEFCLK,
+ GNLD_ACDC,
+ GNLD_VR0HOT,
+ GNLD_VR1HOT,
+ GNLD_FW_CTF,
+ GNLD_LED_DISPLAY,
+ GNLD_FAN_CONTROL,
+ GNLD_DIDT,
+ GNLD_GFXOFF,
+ GNLD_CG,
+ GNLD_ACG,
+
+ GNLD_FEATURES_MAX
+};
+
+
+#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1)
+
+#define SMC_DPM_FEATURES 0x30F
+
+struct smu_features {
+ bool supported;
+ bool enabled;
+ bool allowed;
+ uint32_t smu_feature_id;
+ uint64_t smu_feature_bitmap;
+};
+
+struct vega12_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+#define VEGA12_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+#define VEGA12_MINIMUM_ENGINE_CLOCK 2500
+
+struct vega12_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct vega12_single_dpm_table {
+ uint32_t count;
+ struct vega12_dpm_state dpm_state;
+ struct vega12_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega12_odn_dpm_control {
+ uint32_t count;
+ uint32_t entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega12_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct vega12_dpm_table {
+ struct vega12_single_dpm_table soc_table;
+ struct vega12_single_dpm_table gfx_table;
+ struct vega12_single_dpm_table mem_table;
+ struct vega12_single_dpm_table eclk_table;
+ struct vega12_single_dpm_table vclk_table;
+ struct vega12_single_dpm_table dclk_table;
+ struct vega12_single_dpm_table dcef_table;
+ struct vega12_single_dpm_table pixel_table;
+ struct vega12_single_dpm_table display_table;
+ struct vega12_single_dpm_table phy_table;
+ struct vega12_pcie_table pcie_table;
+};
+
+#define VEGA12_MAX_LEAKAGE_COUNT 8
+struct vega12_leakage_voltage {
+ uint16_t count;
+ uint16_t leakage_id[VEGA12_MAX_LEAKAGE_COUNT];
+ uint16_t actual_voltage[VEGA12_MAX_LEAKAGE_COUNT];
+};
+
+struct vega12_display_timing {
+ uint32_t min_clock_in_sr;
+ uint32_t num_existing_displays;
+};
+
+struct vega12_dpmlevel_enable_mask {
+ uint32_t uvd_dpm_enable_mask;
+ uint32_t vce_dpm_enable_mask;
+ uint32_t samu_dpm_enable_mask;
+ uint32_t sclk_dpm_enable_mask;
+ uint32_t mclk_dpm_enable_mask;
+};
+
+struct vega12_vbios_boot_state {
+ bool bsoc_vddc_lock;
+ uint8_t uc_cooling_id;
+ uint16_t vddc;
+ uint16_t vddci;
+ uint16_t mvddc;
+ uint16_t vdd_gfx;
+ uint32_t gfx_clock;
+ uint32_t mem_clock;
+ uint32_t soc_clock;
+ uint32_t dcef_clock;
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
+
+struct vega12_smc_state_table {
+ uint32_t soc_boot_level;
+ uint32_t gfx_boot_level;
+ uint32_t dcef_boot_level;
+ uint32_t mem_boot_level;
+ uint32_t uvd_boot_level;
+ uint32_t vce_boot_level;
+ uint32_t gfx_max_level;
+ uint32_t mem_max_level;
+ uint8_t vr_hot_gpio;
+ uint8_t ac_dc_gpio;
+ uint8_t therm_out_gpio;
+ uint8_t therm_out_polarity;
+ uint8_t therm_out_mode;
+ PPTable_t pp_table;
+ Watermarks_t water_marks_table;
+ AvfsDebugTable_t avfs_debug_table;
+ AvfsFuseOverride_t avfs_fuse_override_table;
+ SmuMetrics_t smu_metrics;
+ DriverSmuConfig_t driver_smu_config;
+ DpmActivityMonitorCoeffInt_t dpm_activity_monitor_coeffint;
+ OverDriveTable_t overdrive_table;
+};
+
+struct vega12_mclk_latency_entries {
+ uint32_t frequency;
+ uint32_t latency;
+};
+
+struct vega12_mclk_latency_table {
+ uint32_t count;
+ struct vega12_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega12_registry_data {
+ uint64_t disallowed_features;
+ uint8_t ac_dc_switch_gpio_support;
+ uint8_t acg_loop_support;
+ uint8_t clock_stretcher_support;
+ uint8_t db_ramping_support;
+ uint8_t didt_mode;
+ uint8_t didt_support;
+ uint8_t edc_didt_support;
+ uint8_t force_dpm_high;
+ uint8_t fuzzy_fan_control_support;
+ uint8_t mclk_dpm_key_disabled;
+ uint8_t od_state_in_dc_support;
+ uint8_t pcie_lane_override;
+ uint8_t pcie_speed_override;
+ uint32_t pcie_clock_override;
+ uint8_t pcie_dpm_key_disabled;
+ uint8_t dcefclk_dpm_key_disabled;
+ uint8_t prefetcher_dpm_key_disabled;
+ uint8_t quick_transition_support;
+ uint8_t regulator_hot_gpio_support;
+ uint8_t master_deep_sleep_support;
+ uint8_t gfx_clk_deep_sleep_support;
+ uint8_t sclk_deep_sleep_support;
+ uint8_t lclk_deep_sleep_support;
+ uint8_t dce_fclk_deep_sleep_support;
+ uint8_t sclk_dpm_key_disabled;
+ uint8_t sclk_throttle_low_notification;
+ uint8_t skip_baco_hardware;
+ uint8_t socclk_dpm_key_disabled;
+ uint8_t sq_ramping_support;
+ uint8_t tcp_ramping_support;
+ uint8_t td_ramping_support;
+ uint8_t dbr_ramping_support;
+ uint8_t gc_didt_support;
+ uint8_t psm_didt_support;
+ uint8_t thermal_support;
+ uint8_t fw_ctf_enabled;
+ uint8_t led_dpm_enabled;
+ uint8_t fan_control_support;
+ uint8_t ulv_support;
+ uint8_t odn_feature_enable;
+ uint8_t disable_water_mark;
+ uint8_t disable_workload_policy;
+ uint32_t force_workload_policy_mask;
+ uint8_t disable_3d_fs_detection;
+ uint8_t disable_pp_tuning;
+ uint8_t disable_xlpp_tuning;
+ uint32_t perf_ui_tuning_profile_turbo;
+ uint32_t perf_ui_tuning_profile_powerSave;
+ uint32_t perf_ui_tuning_profile_xl;
+ uint16_t zrpm_stop_temp;
+ uint16_t zrpm_start_temp;
+ uint32_t stable_pstate_sclk_dpm_percentage;
+ uint8_t fps_support;
+ uint8_t vr0hot;
+ uint8_t vr1hot;
+ uint8_t disable_auto_wattman;
+ uint32_t auto_wattman_debug;
+ uint32_t auto_wattman_sample_period;
+ uint8_t auto_wattman_threshold;
+ uint8_t log_avfs_param;
+ uint8_t enable_enginess;
+ uint8_t custom_fan_support;
+ uint8_t disable_pcc_limit_control;
+};
+
+struct vega12_odn_clock_voltage_dependency_table {
+ uint32_t count;
+ struct phm_ppt_v1_clock_voltage_dependency_record
+ entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega12_odn_dpm_table {
+ struct vega12_odn_dpm_control control_gfxclk_state;
+ struct vega12_odn_dpm_control control_memclk_state;
+ struct phm_odn_clock_levels odn_core_clock_dpm_levels;
+ struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
+ struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
+ struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
+ struct vega12_odn_clock_voltage_dependency_table vdd_dependency_on_socclk;
+ uint32_t odn_mclk_min_limit;
+};
+
+struct vega12_odn_fan_table {
+ uint32_t target_fan_speed;
+ uint32_t target_temperature;
+ uint32_t min_performance_clock;
+ uint32_t min_fan_limit;
+ bool force_fan_pwm;
+};
+
+struct vega12_hwmgr {
+ struct vega12_dpm_table dpm_table;
+ struct vega12_dpm_table golden_dpm_table;
+ struct vega12_registry_data registry_data;
+ struct vega12_vbios_boot_state vbios_boot_state;
+ struct vega12_mclk_latency_table mclk_latency_table;
+
+ struct vega12_leakage_voltage vddc_leakage;
+
+ uint32_t vddc_control;
+ struct pp_atomfwctrl_voltage_table vddc_voltage_table;
+ uint32_t mvdd_control;
+ struct pp_atomfwctrl_voltage_table mvdd_voltage_table;
+ uint32_t vddci_control;
+ struct pp_atomfwctrl_voltage_table vddci_voltage_table;
+
+ uint32_t active_auto_throttle_sources;
+ uint32_t water_marks_bitmap;
+
+ struct vega12_odn_dpm_table odn_dpm_table;
+ struct vega12_odn_fan_table odn_fan_table;
+
+ /* ---- General data ---- */
+ uint8_t need_update_dpm_table;
+
+ bool cac_enabled;
+ bool battery_state;
+ bool is_tlu_enabled;
+ bool avfs_exist;
+
+ uint32_t low_sclk_interrupt_threshold;
+
+ uint32_t total_active_cus;
+
+ struct vega12_display_timing display_timing;
+
+ /* ---- Vega12 Dyn Register Settings ---- */
+
+ uint32_t debug_settings;
+ uint32_t lowest_uclk_reserved_for_ulv;
+ uint32_t gfxclk_average_alpha;
+ uint32_t socclk_average_alpha;
+ uint32_t uclk_average_alpha;
+ uint32_t gfx_activity_average_alpha;
+ uint32_t display_voltage_mode;
+ uint32_t dcef_clk_quad_eqn_a;
+ uint32_t dcef_clk_quad_eqn_b;
+ uint32_t dcef_clk_quad_eqn_c;
+ uint32_t disp_clk_quad_eqn_a;
+ uint32_t disp_clk_quad_eqn_b;
+ uint32_t disp_clk_quad_eqn_c;
+ uint32_t pixel_clk_quad_eqn_a;
+ uint32_t pixel_clk_quad_eqn_b;
+ uint32_t pixel_clk_quad_eqn_c;
+ uint32_t phy_clk_quad_eqn_a;
+ uint32_t phy_clk_quad_eqn_b;
+ uint32_t phy_clk_quad_eqn_c;
+
+ /* ---- Thermal Temperature Setting ---- */
+ struct vega12_dpmlevel_enable_mask dpm_level_enable_mask;
+
+ /* ---- Power Gating States ---- */
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool samu_power_gated;
+ bool need_long_memory_training;
+
+ /* Internal settings to apply the application power optimization parameters */
+ bool apply_optimized_settings;
+ uint32_t disable_dpm_mask;
+
+ /* ---- Overdrive next setting ---- */
+ uint32_t apply_overdrive_next_settings_mask;
+
+ /* ---- Workload Mask ---- */
+ uint32_t workload_mask;
+
+ /* ---- SMU9 ---- */
+ uint32_t smu_version;
+ struct smu_features smu_features[GNLD_FEATURES_MAX];
+ struct vega12_smc_state_table smc_state_table;
+};
+
+#define VEGA12_DPM2_NEAR_TDP_DEC 10
+#define VEGA12_DPM2_ABOVE_SAFE_INC 5
+#define VEGA12_DPM2_BELOW_SAFE_INC 20
+
+#define VEGA12_DPM2_LTA_WINDOW_SIZE 7
+
+#define VEGA12_DPM2_LTS_TRUNCATE 0
+
+#define VEGA12_DPM2_TDP_SAFE_LIMIT_PERCENT 80
+
+#define VEGA12_DPM2_MAXPS_PERCENT_M 90
+#define VEGA12_DPM2_MAXPS_PERCENT_H 90
+
+#define VEGA12_DPM2_PWREFFICIENCYRATIO_MARGIN 50
+
+#define VEGA12_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
+#define VEGA12_DPM2_SQ_RAMP_MIN_POWER 0x12
+#define VEGA12_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
+#define VEGA12_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
+#define VEGA12_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
+
+#define VEGA12_VOLTAGE_CONTROL_NONE 0x0
+#define VEGA12_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define VEGA12_VOLTAGE_CONTROL_BY_SVID2 0x2
+#define VEGA12_VOLTAGE_CONTROL_MERGED 0x3
+/* To convert to Q8.8 format for firmware */
+#define VEGA12_Q88_FORMAT_CONVERSION_UNIT 256
+
+#define VEGA12_UNUSED_GPIO_PIN 0x7F
+
+#define VEGA12_THERM_OUT_MODE_DISABLE 0x0
+#define VEGA12_THERM_OUT_MODE_THERM_ONLY 0x1
+#define VEGA12_THERM_OUT_MODE_THERM_VRHOT 0x2
+
+#define PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT 0xffffffff
+#define PPREGKEY_VEGA12QUADRATICEQUATION_DFLT 0xffffffff
+
+#define PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT 0xffffffff
+#define PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT 0xffffffff
+#define PPREGKEY_VEGA12QUADRATICEQUATION_DFLT 0xffffffff
+
+#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
+
+int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif /* _VEGA12_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
new file mode 100644
index 0000000..30b278c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VEGA12_INC_H
+#define VEGA12_INC_H
+
+#include "asic_reg/thm/thm_9_0_default.h"
+#include "asic_reg/thm/thm_9_0_offset.h"
+#include "asic_reg/thm/thm_9_0_sh_mask.h"
+
+#include "asic_reg/mp/mp_9_0_offset.h"
+#include "asic_reg/mp/mp_9_0_sh_mask.h"
+
+#include "asic_reg/gc/gc_9_2_1_offset.h"
+#include "asic_reg/gc/gc_9_2_1_sh_mask.h"
+
+#include "asic_reg/nbio/nbio_6_1_offset.h"
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
new file mode 100644
index 0000000..76e60c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
@@ -0,0 +1,1364 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "hwmgr.h"
+#include "vega12_hwmgr.h"
+#include "vega12_powertune.h"
+#include "vega12_smumgr.h"
+#include "vega12_ppsmc.h"
+#include "vega12_inc.h"
+#include "pp_debug.h"
+#include "pp_soc15.h"
+
+static const struct vega12_didt_config_reg SEDiDtTuningCtrlConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEDiDtCtrl3Config_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /*DIDT_SQ_CTRL3 */
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TCP_CTRL3 */
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TD_CTRL3 */
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_DB_CTRL3 */
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEDiDtCtrl2Config_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEDiDtCtrl1Config_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega12_didt_config_reg SEDiDtWeightConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
+ { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
+ { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
+ { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
+ { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
+ { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
+ { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
+ { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEDiDtCtrl0Config_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega12_didt_config_reg SEDiDtStallCtrlConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEDiDtStallPatternConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ_STALL_PATTERN_1_2 */
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_3_4 */
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_5_6 */
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_SQ_STALL_PATTERN_7 */
+ { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_1_2 */
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_3_4 */
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_5_6 */
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_7 */
+ { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_1_2 */
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_3_4 */
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_5_6 */
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_7 */
+ { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_1_2 */
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_3_4 */
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_5_6 */
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_7 */
+ { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SELCacConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
+ /* TD */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
+ /* TCP */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
+ /* DB */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega12_didt_config_reg SEEDCStallPatternConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCForceStallPatternConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCStallDelayConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCThresholdConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
+ { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCCtrlResetConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCCtrlConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg SEEDCCtrlForceStallConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ /* TD */
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg GCDiDtDroopCtrlConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg GCDiDtCtrl0Config_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega12_didt_config_reg PSMSEEDCStallPatternConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL PATTERNs */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMSEEDCStallDelayConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL DELAYs */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMSEEDCThresholdConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC THRESHOLD */
+ { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMSEEDCCtrlResetConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMSEEDCCtrlConfig_Vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMGCEDCThresholdConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMGCEDCDroopCtrlConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMGCEDCCtrlResetConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg PSMGCEDCCtrlConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg AvfsPSMResetConfig_vega12[]=
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
+ { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
+ { 0x16A06, 0x00000001, 0x0, 0x02000000 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega12_didt_config_reg AvfsPSMInitConfig_vega12[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
+ { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
+ { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
+ { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static int vega12_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs, enum vega12_didt_config_reg_type reg_type)
+{
+ uint32_t data;
+
+ PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega12_program_didt_config_registers] Invalid config register table!", return -EINVAL);
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ switch (reg_type) {
+ case VEGA12_CONFIGREG_DIDT:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+ break;
+ case VEGA12_CONFIGREG_GCCAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+ break;
+ case VEGA12_CONFIGREG_SECAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static int vega12_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs)
+{
+ uint32_t data;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ data = cgs_read_register(hwmgr->device, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_register(hwmgr->device, config_regs->offset, data);
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void vega12_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
+{
+ uint32_t data;
+ int result;
+ uint32_t en = (enable ? 1 : 0);
+ uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
+ didt_block_info &= ~SQ_Enable_MASK;
+ didt_block_info |= en << SQ_Enable_SHIFT;
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
+ didt_block_info &= ~DB_Enable_MASK;
+ didt_block_info |= en << DB_Enable_SHIFT;
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
+ didt_block_info &= ~TD_Enable_MASK;
+ didt_block_info |= en << TD_Enable_SHIFT;
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
+ didt_block_info &= ~TCP_Enable_MASK;
+ didt_block_info |= en << TCP_Enable_SHIFT;
+ }
+
+#if 0
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
+ }
+#endif
+
+ if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
+ }
+
+#if 0
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
+ }
+#endif
+ }
+
+ if (enable) {
+ /* For Vega12, SMC does not support any mask yet. */
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
+ }
+}
+
+static int vega12_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SELCacConfig_Vega12, VEGA12_CONFIGREG_SECAC);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega12_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega12_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega12_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega12_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega12_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega12_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega12);
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
+ vega12_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega12);
+
+ if (PP_CAP(PHM_PlatformCaps_PSM))
+ vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
+
+ return 0;
+}
+
+static int vega12_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega12_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_PSM))
+ vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
+
+ return 0;
+}
+
+static int vega12_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result = vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega12_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega12_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega12_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega12_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0;
+ uint32_t count, data;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg;
+
+ num_se = adev->gfx.config.max_shader_engines;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega12_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega12);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega12);
+ vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega12);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_PSM))
+ vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
+
+ return 0;
+}
+
+static int vega12_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega12_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
+ }
+
+ if (PP_CAP(PHM_PlatformCaps_PSM))
+ vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
+
+ return 0;
+}
+
+static int vega12_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+ int result;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ result = vega12_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega12, VEGA12_CONFIGREG_DIDT);
+ if (0 != result)
+ return result;
+
+ vega12_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega12_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = vega12_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
+
+ return 0;
+}
+
+int vega12_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega12_enable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega12_enable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega12_enable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega12_enable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega12_enable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+#if 0
+ if (0 == result) {
+ result = vega12_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = true;
+ }
+#endif
+ }
+
+ return result;
+}
+
+int vega12_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (!data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega12_disable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega12_disable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega12_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega12_disable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega12_disable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ if (0 == result) {
+ result = vega12_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = false;
+ }
+ }
+
+ return result;
+}
+
+int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_PPT].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetPptLimit, n);
+
+ return 0;
+}
+
+int vega12_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = table_info->tdp_table;
+ uint32_t default_pwr_limit =
+ (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
+ int result = 0;
+
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
+ if (data->smu_features[GNLD_PPT].supported)
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
+ "Attempt to enable PPT feature Failed!",
+ data->smu_features[GNLD_PPT].supported = false);
+
+ if (data->smu_features[GNLD_TDC].supported)
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
+ "Attempt to enable PPT feature Failed!",
+ data->smu_features[GNLD_TDC].supported = false);
+
+ result = vega12_set_power_limit(hwmgr, default_pwr_limit);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to set Default Power Limit in SMC!",
+ return result);
+ }
+
+ return result;
+}
+
+int vega12_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
+ if (data->smu_features[GNLD_PPT].supported)
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
+ "Attempt to disable PPT feature Failed!",
+ data->smu_features[GNLD_PPT].supported = false);
+
+ if (data->smu_features[GNLD_TDC].supported)
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
+ false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
+ "Attempt to disable PPT feature Failed!",
+ data->smu_features[GNLD_TDC].supported = false);
+ }
+
+ return 0;
+}
+
+static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
+ uint32_t adjust_percent)
+{
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+}
+
+int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+ int adjust_percent, result = 0;
+
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
+ adjust_percent =
+ hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+ hwmgr->platform_descriptor.TDPAdjustment :
+ (-1 * hwmgr->platform_descriptor.TDPAdjustment);
+ result = vega12_set_overdrive_target_percentage(hwmgr,
+ (uint32_t)adjust_percent);
+ }
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
new file mode 100644
index 0000000..78d31a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _VEGA12_POWERTUNE_H_
+#define _VEGA12_POWERTUNE_H_
+
+enum vega12_didt_config_reg_type {
+ VEGA12_CONFIGREG_DIDT = 0,
+ VEGA12_CONFIGREG_GCCAC,
+ VEGA12_CONFIGREG_SECAC
+};
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct vega12_didt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+};
+
+int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
+int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
+int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
+
+int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
+int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
+
+#endif /* _VEGA12_POWERTUNE_H_ */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h
new file mode 100644
index 0000000..bf4f509
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _VEGA12_PPTABLE_H_
+#define _VEGA12_PPTABLE_H_
+
+#pragma pack(push, 1)
+
+#define ATOM_VEGA12_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_VEGA12_PP_THERMALCONTROLLER_VEGA12 25
+
+#define ATOM_VEGA12_PP_PLATFORM_CAP_POWERPLAY 0x1
+#define ATOM_VEGA12_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2
+#define ATOM_VEGA12_PP_PLATFORM_CAP_HARDWAREDC 0x4
+#define ATOM_VEGA12_PP_PLATFORM_CAP_BACO 0x8
+#define ATOM_VEGA12_PP_PLATFORM_CAP_BAMACO 0x10
+#define ATOM_VEGA12_PP_PLATFORM_CAP_ENABLESHADOWPSTATE 0x20
+
+#define ATOM_VEGA12_TABLE_REVISION_VEGA12 9
+
+enum ATOM_VEGA12_ODSETTING_ID {
+ ATOM_VEGA12_ODSETTING_GFXCLKFMAX = 0,
+ ATOM_VEGA12_ODSETTING_GFXCLKFMIN,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEFREQ_P1,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEFREQ_P2,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEFREQ_P3,
+ ATOM_VEGA12_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3,
+ ATOM_VEGA12_ODSETTING_UCLKFMAX,
+ ATOM_VEGA12_ODSETTING_POWERPERCENTAGE,
+ ATOM_VEGA12_ODSETTING_FANRPMMIN,
+ ATOM_VEGA12_ODSETTING_FANRPMACOUSTICLIMIT,
+ ATOM_VEGA12_ODSETTING_FANTARGETTEMPERATURE,
+ ATOM_VEGA12_ODSETTING_OPERATINGTEMPMAX,
+ ATOM_VEGA12_ODSETTING_COUNT,
+};
+typedef enum ATOM_VEGA12_ODSETTING_ID ATOM_VEGA12_ODSETTING_ID;
+
+enum ATOM_VEGA12_PPCLOCK_ID {
+ ATOM_VEGA12_PPCLOCK_GFXCLK = 0,
+ ATOM_VEGA12_PPCLOCK_VCLK,
+ ATOM_VEGA12_PPCLOCK_DCLK,
+ ATOM_VEGA12_PPCLOCK_ECLK,
+ ATOM_VEGA12_PPCLOCK_SOCCLK,
+ ATOM_VEGA12_PPCLOCK_UCLK,
+ ATOM_VEGA12_PPCLOCK_DCEFCLK,
+ ATOM_VEGA12_PPCLOCK_DISPCLK,
+ ATOM_VEGA12_PPCLOCK_PIXCLK,
+ ATOM_VEGA12_PPCLOCK_PHYCLK,
+ ATOM_VEGA12_PPCLOCK_COUNT,
+};
+typedef enum ATOM_VEGA12_PPCLOCK_ID ATOM_VEGA12_PPCLOCK_ID;
+
+
+typedef struct _ATOM_VEGA12_POWERPLAYTABLE
+{
+ struct atom_common_table_header sHeader;
+ UCHAR ucTableRevision;
+ USHORT usTableSize;
+ ULONG ulGoldenPPID;
+ ULONG ulGoldenRevision;
+ USHORT usFormatID;
+
+ ULONG ulPlatformCaps;
+
+ UCHAR ucThermalControllerType;
+
+ USHORT usSmallPowerLimit1;
+ USHORT usSmallPowerLimit2;
+ USHORT usBoostPowerLimit;
+ USHORT usODTurboPowerLimit;
+ USHORT usODPowerSavePowerLimit;
+ USHORT usSoftwareShutdownTemp;
+
+ ULONG PowerSavingClockMax [ATOM_VEGA12_PPCLOCK_COUNT];
+ ULONG PowerSavingClockMin [ATOM_VEGA12_PPCLOCK_COUNT];
+
+ ULONG ODSettingsMax [ATOM_VEGA12_ODSETTING_COUNT];
+ ULONG ODSettingsMin [ATOM_VEGA12_ODSETTING_COUNT];
+
+ USHORT usReserve[5];
+
+ PPTable_t smcPPTable;
+
+} ATOM_Vega12_POWERPLAYTABLE;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
new file mode 100644
index 0000000..e7d7949
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+
+#include "vega12/smu9_driver_if.h"
+#include "vega12_processpptables.h"
+#include "ppatomfwctrl.h"
+#include "atomfirmware.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "vega12_pptable.h"
+
+static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
+ enum phm_platform_caps cap)
+{
+ if (enable)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
+}
+
+static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+{
+ int index = GetIndexIntoMasterDataTable(powerplayinfo);
+
+ u16 size;
+ u8 frev, crev;
+ const void *table_address = hwmgr->soft_pp_table;
+
+ if (!table_address) {
+ table_address = (ATOM_Vega12_POWERPLAYTABLE *)
+ cgs_atom_get_data_table(hwmgr->device, index,
+ &size, &frev, &crev);
+
+ hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
+ hwmgr->soft_pp_table_size = size;
+ }
+
+ return table_address;
+}
+
+static int check_powerplay_tables(
+ struct pp_hwmgr *hwmgr,
+ const ATOM_Vega12_POWERPLAYTABLE *powerplay_table)
+{
+ PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >=
+ ATOM_VEGA12_TABLE_REVISION_VEGA12),
+ "Unsupported PPTable format!", return -1);
+ PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
+ "Invalid PowerPlay Table!", return -1);
+
+ return 0;
+}
+
+static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+{
+ set_hw_cap(
+ hwmgr,
+ 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_POWERPLAY),
+ PHM_PlatformCaps_PowerPlaySupport);
+
+ set_hw_cap(
+ hwmgr,
+ 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
+ PHM_PlatformCaps_BiosPowerSourceControl);
+
+ set_hw_cap(
+ hwmgr,
+ 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_BACO),
+ PHM_PlatformCaps_BACO);
+
+ set_hw_cap(
+ hwmgr,
+ 0 != (powerplay_caps & ATOM_VEGA12_PP_PLATFORM_CAP_BAMACO),
+ PHM_PlatformCaps_BAMACO);
+
+ return 0;
+}
+
+static int copy_clock_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array)
+{
+ uint32_t array_size, i;
+ uint32_t *table;
+
+ array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT;
+
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+ for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
+ table[i] = pptable_array[i];
+
+ *pptable_info_array = table;
+
+ return 0;
+}
+
+static int copy_overdrive_settings_limits_array(
+ struct pp_hwmgr *hwmgr,
+ uint32_t **pptable_info_array,
+ const uint32_t *pptable_array)
+{
+ uint32_t array_size, i;
+ uint32_t *table;
+
+ array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT;
+
+ table = kzalloc(array_size, GFP_KERNEL);
+ if (NULL == table)
+ return -ENOMEM;
+
+ for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
+ table[i] = pptable_array[i];
+
+ *pptable_info_array = table;
+
+ return 0;
+}
+
+static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
+{
+ struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table;
+
+ PP_ASSERT_WITH_CODE(
+ pp_atomfwctrl_get_smc_dpm_information(hwmgr, &smc_dpm_table) == 0,
+ "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
+ return -1);
+
+ ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table.liquid1_i2c_address;
+ ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table.liquid2_i2c_address;
+ ppsmc_pptable->Vr_I2C_address = smc_dpm_table.vr_i2c_address;
+ ppsmc_pptable->Plx_I2C_address = smc_dpm_table.plx_i2c_address;
+
+ ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table.liquid_i2c_linescl;
+ ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table.liquid_i2c_linesda;
+ ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table.vr_i2c_linescl;
+ ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table.vr_i2c_linesda;
+
+ ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table.plx_i2c_linescl;
+ ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table.plx_i2c_linesda;
+ ppsmc_pptable->VrSensorPresent = smc_dpm_table.vrsensorpresent;
+ ppsmc_pptable->LiquidSensorPresent = smc_dpm_table.liquidsensorpresent;
+
+ ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table.maxvoltagestepgfx;
+ ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table.maxvoltagestepsoc;
+
+ ppsmc_pptable->VddGfxVrMapping = smc_dpm_table.vddgfxvrmapping;
+ ppsmc_pptable->VddSocVrMapping = smc_dpm_table.vddsocvrmapping;
+ ppsmc_pptable->VddMem0VrMapping = smc_dpm_table.vddmem0vrmapping;
+ ppsmc_pptable->VddMem1VrMapping = smc_dpm_table.vddmem1vrmapping;
+
+ ppsmc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table.gfxulvphasesheddingmask;
+ ppsmc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table.soculvphasesheddingmask;
+
+ ppsmc_pptable->GfxMaxCurrent = smc_dpm_table.gfxmaxcurrent;
+ ppsmc_pptable->GfxOffset = smc_dpm_table.gfxoffset;
+ ppsmc_pptable->Padding_TelemetryGfx = smc_dpm_table.padding_telemetrygfx;
+
+ ppsmc_pptable->SocMaxCurrent = smc_dpm_table.socmaxcurrent;
+ ppsmc_pptable->SocOffset = smc_dpm_table.socoffset;
+ ppsmc_pptable->Padding_TelemetrySoc = smc_dpm_table.padding_telemetrysoc;
+
+ ppsmc_pptable->Mem0MaxCurrent = smc_dpm_table.mem0maxcurrent;
+ ppsmc_pptable->Mem0Offset = smc_dpm_table.mem0offset;
+ ppsmc_pptable->Padding_TelemetryMem0 = smc_dpm_table.padding_telemetrymem0;
+
+ ppsmc_pptable->Mem1MaxCurrent = smc_dpm_table.mem1maxcurrent;
+ ppsmc_pptable->Mem1Offset = smc_dpm_table.mem1offset;
+ ppsmc_pptable->Padding_TelemetryMem1 = smc_dpm_table.padding_telemetrymem1;
+
+ ppsmc_pptable->AcDcGpio = smc_dpm_table.acdcgpio;
+ ppsmc_pptable->AcDcPolarity = smc_dpm_table.acdcpolarity;
+ ppsmc_pptable->VR0HotGpio = smc_dpm_table.vr0hotgpio;
+ ppsmc_pptable->VR0HotPolarity = smc_dpm_table.vr0hotpolarity;
+
+ ppsmc_pptable->VR1HotGpio = smc_dpm_table.vr1hotgpio;
+ ppsmc_pptable->VR1HotPolarity = smc_dpm_table.vr1hotpolarity;
+ ppsmc_pptable->Padding1 = smc_dpm_table.padding1;
+ ppsmc_pptable->Padding2 = smc_dpm_table.padding2;
+
+ ppsmc_pptable->LedPin0 = smc_dpm_table.ledpin0;
+ ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
+ ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
+
+ ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled;
+ ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent;
+ ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq;
+
+ ppsmc_pptable->UclkSpreadEnabled = 0;
+ ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
+ ppsmc_pptable->UclkSpreadFreq = smc_dpm_table.uclkspreadfreq;
+
+ ppsmc_pptable->SocclkSpreadEnabled = 0;
+ ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
+ ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
+
+ return 0;
+}
+
+#define VEGA12_ENGINECLOCK_HARDMAX 198000
+static int init_powerplay_table_information(
+ struct pp_hwmgr *hwmgr,
+ const ATOM_Vega12_POWERPLAYTABLE *powerplay_table)
+{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ uint32_t disable_power_control = 0;
+ int result;
+
+ hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
+ pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
+
+ set_hw_cap(hwmgr,
+ ATOM_VEGA12_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+ PHM_PlatformCaps_ThermalController);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+
+ if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX)
+ hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX;
+ else
+ hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX];
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX];
+
+ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
+ copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
+
+ /* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
+ hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
+ hwmgr->platformDescriptor.overdriveVDDCStep = 0; */
+
+ if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0
+ && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
+
+ pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
+ pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
+ pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
+ pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
+ pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
+
+ pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+
+ hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE];
+
+ disable_power_control = 0;
+ if (!disable_power_control) {
+ /* enable TDP overdrive (PowerControl) feature as well if supported */
+ if (hwmgr->platform_descriptor.TDPODLimit)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerControl);
+ }
+
+ copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax);
+ copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin);
+
+ pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
+ if (pptable_information->smc_pptable == NULL)
+ return -ENOMEM;
+
+ memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t));
+
+ result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
+
+ return result;
+}
+
+int vega12_pp_tables_initialize(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ const ATOM_Vega12_POWERPLAYTABLE *powerplay_table;
+
+ hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v3_information), GFP_KERNEL);
+ PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
+ "Failed to allocate hwmgr->pptable!", return -ENOMEM);
+
+ powerplay_table = get_powerplay_table(hwmgr);
+ PP_ASSERT_WITH_CODE((powerplay_table != NULL),
+ "Missing PowerPlay Table!", return -1);
+
+ result = check_powerplay_tables(hwmgr, powerplay_table);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "check_powerplay_tables failed", return result);
+
+ result = set_platform_caps(hwmgr,
+ le32_to_cpu(powerplay_table->ulPlatformCaps));
+ PP_ASSERT_WITH_CODE((result == 0),
+ "set_platform_caps failed", return result);
+
+ result = init_powerplay_table_information(hwmgr, powerplay_table);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "init_powerplay_table_information failed", return result);
+
+ return result;
+}
+
+static int vega12_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v3_information *pp_table_info =
+ (struct phm_ppt_v3_information *)(hwmgr->pptable);
+
+ kfree(pp_table_info->power_saving_clock_max);
+ pp_table_info->power_saving_clock_max = NULL;
+
+ kfree(pp_table_info->power_saving_clock_min);
+ pp_table_info->power_saving_clock_min = NULL;
+
+ kfree(pp_table_info->od_settings_max);
+ pp_table_info->od_settings_max = NULL;
+
+ kfree(pp_table_info->od_settings_min);
+ pp_table_info->od_settings_min = NULL;
+
+ kfree(pp_table_info->smc_pptable);
+ pp_table_info->smc_pptable = NULL;
+
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
+
+ return 0;
+}
+
+const struct pp_table_func vega12_pptable_funcs = {
+ .pptable_init = vega12_pp_tables_initialize,
+ .pptable_fini = vega12_pp_tables_uninitialize,
+};
+
+#if 0
+static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
+ uint16_t classification, uint16_t classification2)
+{
+ uint32_t result = 0;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ result |= PP_StateClassificationFlag_Boot;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ result |= PP_StateClassificationFlag_Thermal;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+ result |= PP_StateClassificationFlag_LimitedPowerSource;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
+ result |= PP_StateClassificationFlag_Rest;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
+ result |= PP_StateClassificationFlag_Forced;
+
+ if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ result |= PP_StateClassificationFlag_ACPI;
+
+ if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+ result |= PP_StateClassificationFlag_LimitedPowerSource_2;
+
+ return result;
+}
+
+int vega12_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
+ uint32_t entry_index, struct pp_power_state *power_state,
+ int (*call_back_func)(struct pp_hwmgr *, void *,
+ struct pp_power_state *, void *, uint32_t))
+{
+ int result = 0;
+ const ATOM_Vega12_State_Array *state_arrays;
+ const ATOM_Vega12_State *state_entry;
+ const ATOM_Vega12_POWERPLAYTABLE *pp_table =
+ get_powerplay_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE(pp_table, "Missing PowerPlay Table!",
+ return -1;);
+ power_state->classification.bios_index = entry_index;
+
+ if (pp_table->sHeader.format_revision >=
+ ATOM_Vega12_TABLE_REVISION_VEGA12) {
+ state_arrays = (ATOM_Vega12_State_Array *)
+ (((unsigned long)pp_table) +
+ le16_to_cpu(pp_table->usStateArrayOffset));
+
+ PP_ASSERT_WITH_CODE(pp_table->usStateArrayOffset > 0,
+ "Invalid PowerPlay Table State Array Offset.",
+ return -1);
+ PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0,
+ "Invalid PowerPlay Table State Array.",
+ return -1);
+ PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
+ "Invalid PowerPlay Table State Array Entry.",
+ return -1);
+
+ state_entry = &(state_arrays->states[entry_index]);
+
+ result = call_back_func(hwmgr, (void *)state_entry, power_state,
+ (void *)pp_table,
+ make_classification_flags(hwmgr,
+ le16_to_cpu(state_entry->usClassification),
+ le16_to_cpu(state_entry->usClassification2)));
+ }
+
+ if (!result && (power_state->classification.flags &
+ PP_StateClassificationFlag_Boot))
+ result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
+
+ return result;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h
new file mode 100644
index 0000000..65652ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VEGA12_PROCESSPPTABLES_H
+#define VEGA12_PROCESSPPTABLES_H
+
+#include "hwmgr.h"
+
+enum Vega12_I2CLineID {
+ Vega12_I2CLineID_DDC1 = 0x90,
+ Vega12_I2CLineID_DDC2 = 0x91,
+ Vega12_I2CLineID_DDC3 = 0x92,
+ Vega12_I2CLineID_DDC4 = 0x93,
+ Vega12_I2CLineID_DDC5 = 0x94,
+ Vega12_I2CLineID_DDC6 = 0x95,
+ Vega12_I2CLineID_SCLSDA = 0x96,
+ Vega12_I2CLineID_DDCVGA = 0x97
+};
+
+#define Vega12_I2C_DDC1DATA 0
+#define Vega12_I2C_DDC1CLK 1
+#define Vega12_I2C_DDC2DATA 2
+#define Vega12_I2C_DDC2CLK 3
+#define Vega12_I2C_DDC3DATA 4
+#define Vega12_I2C_DDC3CLK 5
+#define Vega12_I2C_SDA 40
+#define Vega12_I2C_SCL 41
+#define Vega12_I2C_DDC4DATA 65
+#define Vega12_I2C_DDC4CLK 66
+#define Vega12_I2C_DDC5DATA 0x48
+#define Vega12_I2C_DDC5CLK 0x49
+#define Vega12_I2C_DDC6DATA 0x4a
+#define Vega12_I2C_DDC6CLK 0x4b
+#define Vega12_I2C_DDCVGADATA 0x4c
+#define Vega12_I2C_DDCVGACLK 0x4d
+
+extern const struct pp_table_func vega12_pptable_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
new file mode 100644
index 0000000..df0fa81
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "vega12_thermal.h"
+#include "vega12_hwmgr.h"
+#include "vega12_smumgr.h"
+#include "vega12_ppsmc.h"
+#include "vega12_inc.h"
+#include "pp_soc15.h"
+#include "pp_debug.h"
+
+static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
+{
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetCurrentRpm),
+ "Attempt to get current RPM from SMC Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
+ current_rpm),
+ "Attempt to read current RPM from SMC Failed!",
+ return -1);
+ return 0;
+}
+
+int vega12_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ struct phm_fan_speed_info *fan_speed_info)
+{
+ memset(fan_speed_info, 0, sizeof(*fan_speed_info));
+ fan_speed_info->supports_percent_read = false;
+ fan_speed_info->supports_percent_write = false;
+ fan_speed_info->supports_rpm_read = true;
+ fan_speed_info->supports_rpm_write = true;
+
+ return 0;
+}
+
+int vega12_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+ *speed = 0;
+
+ return vega12_get_current_rpm(hwmgr, speed);
+}
+
+/**
+ * @fn vega12_enable_fan_control_feature
+ * @brief Enables the SMC Fan Control Feature.
+ *
+ * @param hwmgr - the address of the powerplay hardware manager.
+ * @return 0 on success. -1 otherwise.
+ */
+static int vega12_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
+{
+#if 0
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
+ hwmgr, true,
+ data->smu_features[GNLD_FAN_CONTROL].
+ smu_feature_bitmap),
+ "Attempt to Enable FAN CONTROL feature Failed!",
+ return -1);
+ data->smu_features[GNLD_FAN_CONTROL].enabled = true;
+ }
+#endif
+ return 0;
+}
+
+static int vega12_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
+{
+#if 0
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
+ PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
+ hwmgr, false,
+ data->smu_features[GNLD_FAN_CONTROL].
+ smu_feature_bitmap),
+ "Attempt to Enable FAN CONTROL feature Failed!",
+ return -1);
+ data->smu_features[GNLD_FAN_CONTROL].enabled = false;
+ }
+#endif
+ return 0;
+}
+
+int vega12_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ PP_ASSERT_WITH_CODE(
+ !vega12_enable_fan_control_feature(hwmgr),
+ "Attempt to Enable SMC FAN CONTROL Feature Failed!",
+ return -1);
+
+ return 0;
+}
+
+
+int vega12_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ PP_ASSERT_WITH_CODE(!vega12_disable_fan_control_feature(hwmgr),
+ "Attempt to Disable SMC FAN CONTROL Feature Failed!",
+ return -1);
+
+ return 0;
+}
+
+/**
+* Reset Fan Speed to default.
+* @param hwmgr the address of the powerplay hardware manager.
+* @exception Always succeeds.
+*/
+int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+{
+ return vega12_fan_ctrl_start_smc_fan_control(hwmgr);
+}
+
+/**
+* Reads the remote temperature from the SIslands thermal controller.
+*
+* @param hwmgr The address of the hardware manager.
+*/
+int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int temp = 0;
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(THM_HWID, 0,
+ mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
+
+ temp = cgs_read_register(hwmgr->device, reg);
+
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+ temp = temp & 0x1ff;
+
+ temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ return temp;
+}
+
+/**
+* Set the requested temperature range for high and low alert signals
+*
+* @param hwmgr The address of the hardware manager.
+* @param range Temperature range to be programmed for
+* high and low alert signals
+* @exception PP_Result_BadInput if the input data is not valid.
+*/
+static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
+{
+ int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ uint32_t val, reg;
+
+ if (low < range->min)
+ low = range->min;
+ if (high > range->max)
+ high = range->max;
+
+ if (low > high)
+ return -EINVAL;
+
+ reg = soc15_get_register_offset(THM_HWID, 0,
+ mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
+
+ val = cgs_read_register(hwmgr->device, reg);
+
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ cgs_write_register(hwmgr->device, reg, val);
+
+ return 0;
+}
+
+/**
+* Enable thermal alerts on the RV770 thermal controller.
+*
+* @param hwmgr The address of the hardware manager.
+*/
+static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+{
+ uint32_t val = 0;
+ uint32_t reg;
+
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+
+ reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
+ cgs_write_register(hwmgr->device, reg, val);
+
+ return 0;
+}
+
+/**
+* Disable thermal alerts on the RV770 thermal controller.
+* @param hwmgr The address of the hardware manager.
+*/
+int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ return 0;
+}
+
+/**
+* Uninitialize the thermal controller.
+* Currently just disables alerts.
+* @param hwmgr The address of the hardware manager.
+*/
+int vega12_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+ int result = vega12_thermal_disable_alert(hwmgr);
+
+ return result;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ PPTable_t *table = &(data->smc_state_table.pp_table);
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanTemperatureTarget,
+ (uint32_t)table->FanTargetTemperature);
+
+ return ret;
+}
+
+/**
+* Start the fan control on the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int vega12_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ /* If the fantable setup has failed we could have disabled
+ * PHM_PlatformCaps_MicrocodeFanControl even after
+ * this function was included in the table.
+ * Make sure that we still think controlling the fan is OK.
+ */
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega12_fan_ctrl_start_smc_fan_control(hwmgr);
+
+ return 0;
+}
+
+
+int vega12_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
+{
+ int ret = 0;
+
+ if (range == NULL)
+ return -EINVAL;
+
+ ret = vega12_thermal_set_temperature_range(hwmgr, range);
+ if (ret)
+ return -EINVAL;
+
+ vega12_thermal_enable_alert(hwmgr);
+ /* We should restrict performance levels to low before we halt the SMC.
+ * On the other hand we are still in boot state when we do this
+ * so it would be pointless.
+ * If this assumption changes we have to revisit this table.
+ */
+ ret = vega12_thermal_setup_fan_table(hwmgr);
+ if (ret)
+ return -EINVAL;
+
+ vega12_thermal_start_smc_fan_control(hwmgr);
+
+ return 0;
+};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h
new file mode 100644
index 0000000..0d8ed039
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VEGA12_THERMAL_H
+#define VEGA12_THERMAL_H
+
+#include "hwmgr.h"
+
+struct vega12_temperature {
+ uint16_t edge_temp;
+ uint16_t hot_spot_temp;
+ uint16_t hbm_temp;
+ uint16_t vr_soc_temp;
+ uint16_t vr_mem_temp;
+ uint16_t liquid1_temp;
+ uint16_t liquid2_temp;
+ uint16_t plx_temp;
+};
+
+#define VEGA12_THERMAL_HIGH_ALERT_MASK 0x1
+#define VEGA12_THERMAL_LOW_ALERT_MASK 0x2
+
+#define VEGA12_THERMAL_MINIMUM_TEMP_READING -256
+#define VEGA12_THERMAL_MAXIMUM_TEMP_READING 255
+
+#define VEGA12_THERMAL_MINIMUM_ALERT_TEMP 0
+#define VEGA12_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+extern int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int vega12_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int vega12_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ struct phm_fan_speed_info *fan_speed_info);
+extern int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int vega12_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
+ uint32_t *speed);
+extern int vega12_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+extern int vega12_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega12_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 95932cc..fe36659 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,280 +29,7 @@
#include "amd_shared.h"
#include "cgs_common.h"
#include "dm_pp_interface.h"
-
-extern const struct amd_ip_funcs pp_ip_funcs;
-extern const struct amd_pm_funcs pp_dpm_funcs;
-
-enum amd_pp_sensors {
- AMDGPU_PP_SENSOR_GFX_SCLK = 0,
- AMDGPU_PP_SENSOR_VDDNB,
- AMDGPU_PP_SENSOR_VDDGFX,
- AMDGPU_PP_SENSOR_UVD_VCLK,
- AMDGPU_PP_SENSOR_UVD_DCLK,
- AMDGPU_PP_SENSOR_VCE_ECCLK,
- AMDGPU_PP_SENSOR_GPU_LOAD,
- AMDGPU_PP_SENSOR_GFX_MCLK,
- AMDGPU_PP_SENSOR_GPU_TEMP,
- AMDGPU_PP_SENSOR_VCE_POWER,
- AMDGPU_PP_SENSOR_UVD_POWER,
- AMDGPU_PP_SENSOR_GPU_POWER,
-};
-
-enum amd_pp_task {
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
- AMD_PP_TASK_ENABLE_USER_STATE,
- AMD_PP_TASK_READJUST_POWER_STATE,
- AMD_PP_TASK_COMPLETE_INIT,
- AMD_PP_TASK_MAX
-};
-
-struct amd_pp_init {
- struct cgs_device *device;
- uint32_t chip_family;
- uint32_t chip_id;
- bool pm_en;
- uint32_t feature_mask;
-};
-
-enum amd_pp_display_config_type{
- AMD_PP_DisplayConfigType_None = 0,
- AMD_PP_DisplayConfigType_DP54 ,
- AMD_PP_DisplayConfigType_DP432 ,
- AMD_PP_DisplayConfigType_DP324 ,
- AMD_PP_DisplayConfigType_DP27,
- AMD_PP_DisplayConfigType_DP243,
- AMD_PP_DisplayConfigType_DP216,
- AMD_PP_DisplayConfigType_DP162,
- AMD_PP_DisplayConfigType_HDMI6G ,
- AMD_PP_DisplayConfigType_HDMI297 ,
- AMD_PP_DisplayConfigType_HDMI162,
- AMD_PP_DisplayConfigType_LVDS,
- AMD_PP_DisplayConfigType_DVI,
- AMD_PP_DisplayConfigType_WIRELESS,
- AMD_PP_DisplayConfigType_VGA
-};
-
-struct single_display_configuration
-{
- uint32_t controller_index;
- uint32_t controller_id;
- uint32_t signal_type;
- uint32_t display_state;
- /* phy id for the primary internal transmitter */
- uint8_t primary_transmitter_phyi_d;
- /* bitmap with the active lanes */
- uint8_t primary_transmitter_active_lanemap;
- /* phy id for the secondary internal transmitter (for dual-link dvi) */
- uint8_t secondary_transmitter_phy_id;
- /* bitmap with the active lanes */
- uint8_t secondary_transmitter_active_lanemap;
- /* misc phy settings for SMU. */
- uint32_t config_flags;
- uint32_t display_type;
- uint32_t view_resolution_cx;
- uint32_t view_resolution_cy;
- enum amd_pp_display_config_type displayconfigtype;
- uint32_t vertical_refresh; /* for active display */
-};
-
-#define MAX_NUM_DISPLAY 32
-
-struct amd_pp_display_configuration {
- bool nb_pstate_switch_disable;/* controls NB PState switch */
- bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
- bool cpu_pstate_disable;
- uint32_t cpu_pstate_separation_time;
-
- uint32_t num_display; /* total number of display*/
- uint32_t num_path_including_non_display;
- uint32_t crossfire_display_index;
- uint32_t min_mem_set_clock;
- uint32_t min_core_set_clock;
- /* unit 10KHz x bit*/
- uint32_t min_bus_bandwidth;
- /* minimum required stutter sclk, in 10khz uint32_t ulMinCoreSetClk;*/
- uint32_t min_core_set_clock_in_sr;
-
- struct single_display_configuration displays[MAX_NUM_DISPLAY];
-
- uint32_t vrefresh; /* for active display*/
-
- uint32_t min_vblank_time; /* for active display*/
- bool multi_monitor_in_sync;
- /* Controller Index of primary display - used in MCLK SMC switching hang
- * SW Workaround*/
- uint32_t crtc_index;
- /* htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
- uint32_t line_time_in_us;
- bool invalid_vblank_time;
-
- uint32_t display_clk;
- /*
- * for given display configuration if multimonitormnsync == false then
- * Memory clock DPMS with this latency or below is allowed, DPMS with
- * higher latency not allowed.
- */
- uint32_t dce_tolerable_mclk_in_active_latency;
- uint32_t min_dcef_set_clk;
- uint32_t min_dcef_deep_sleep_set_clk;
-};
-
-struct amd_pp_simple_clock_info {
- uint32_t engine_max_clock;
- uint32_t memory_max_clock;
- uint32_t level;
-};
-
-enum PP_DAL_POWERLEVEL {
- PP_DAL_POWERLEVEL_INVALID = 0,
- PP_DAL_POWERLEVEL_ULTRALOW,
- PP_DAL_POWERLEVEL_LOW,
- PP_DAL_POWERLEVEL_NOMINAL,
- PP_DAL_POWERLEVEL_PERFORMANCE,
-
- PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
- PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
- PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
- PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
- PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
- PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
- PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
- PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
-};
-
-struct amd_pp_clock_info {
- uint32_t min_engine_clock;
- uint32_t max_engine_clock;
- uint32_t min_memory_clock;
- uint32_t max_memory_clock;
- uint32_t min_bus_bandwidth;
- uint32_t max_bus_bandwidth;
- uint32_t max_engine_clock_in_sr;
- uint32_t min_engine_clock_in_sr;
- enum PP_DAL_POWERLEVEL max_clocks_state;
-};
-
-enum amd_pp_clock_type {
- amd_pp_disp_clock = 1,
- amd_pp_sys_clock,
- amd_pp_mem_clock,
- amd_pp_dcef_clock,
- amd_pp_soc_clock,
- amd_pp_pixel_clock,
- amd_pp_phy_clock,
- amd_pp_dcf_clock,
- amd_pp_dpp_clock,
- amd_pp_f_clock = amd_pp_dcef_clock,
-};
-
-#define MAX_NUM_CLOCKS 16
-
-struct amd_pp_clocks {
- uint32_t count;
- uint32_t clock[MAX_NUM_CLOCKS];
- uint32_t latency[MAX_NUM_CLOCKS];
-};
-
-
-enum {
- PP_GROUP_UNKNOWN = 0,
- PP_GROUP_GFX = 1,
- PP_GROUP_SYS,
- PP_GROUP_MAX
-};
-
-struct pp_states_info {
- uint32_t nums;
- uint32_t states[16];
-};
-
-struct pp_gpu_power {
- uint32_t vddc_power;
- uint32_t vddci_power;
- uint32_t max_gpu_power;
- uint32_t average_gpu_power;
-};
-
-struct pp_display_clock_request {
- enum amd_pp_clock_type clock_type;
- uint32_t clock_freq_in_khz;
-};
-
-#define PP_GROUP_MASK 0xF0000000
-#define PP_GROUP_SHIFT 28
-
-#define PP_BLOCK_MASK 0x0FFFFF00
-#define PP_BLOCK_SHIFT 8
-
-#define PP_BLOCK_GFX_CG 0x01
-#define PP_BLOCK_GFX_MG 0x02
-#define PP_BLOCK_GFX_3D 0x04
-#define PP_BLOCK_GFX_RLC 0x08
-#define PP_BLOCK_GFX_CP 0x10
-#define PP_BLOCK_SYS_BIF 0x01
-#define PP_BLOCK_SYS_MC 0x02
-#define PP_BLOCK_SYS_ROM 0x04
-#define PP_BLOCK_SYS_DRM 0x08
-#define PP_BLOCK_SYS_HDP 0x10
-#define PP_BLOCK_SYS_SDMA 0x20
-
-#define PP_STATE_MASK 0x0000000F
-#define PP_STATE_SHIFT 0
-#define PP_STATE_SUPPORT_MASK 0x000000F0
-#define PP_STATE_SUPPORT_SHIFT 0
-
-#define PP_STATE_CG 0x01
-#define PP_STATE_LS 0x02
-#define PP_STATE_DS 0x04
-#define PP_STATE_SD 0x08
-#define PP_STATE_SUPPORT_CG 0x10
-#define PP_STATE_SUPPORT_LS 0x20
-#define PP_STATE_SUPPORT_DS 0x40
-#define PP_STATE_SUPPORT_SD 0x80
-
-#define PP_CG_MSG_ID(group, block, support, state) (group << PP_GROUP_SHIFT |\
- block << PP_BLOCK_SHIFT |\
- support << PP_STATE_SUPPORT_SHIFT |\
- state << PP_STATE_SHIFT)
-
-struct amd_powerplay {
- struct cgs_device *cgs_device;
- void *pp_handle;
- const struct amd_ip_funcs *ip_funcs;
- const struct amd_pm_funcs *pp_funcs;
-};
-
-int amd_powerplay_reset(void *handle);
-
-int amd_powerplay_display_configuration_change(void *handle,
- const struct amd_pp_display_configuration *input);
-
-int amd_powerplay_get_display_power_level(void *handle,
- struct amd_pp_simple_clock_info *output);
-
-int amd_powerplay_get_current_clocks(void *handle,
- struct amd_pp_clock_info *output);
-
-int amd_powerplay_get_clock_by_type(void *handle,
- enum amd_pp_clock_type type,
- struct amd_pp_clocks *clocks);
-
-int amd_powerplay_get_clock_by_type_with_latency(void *handle,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_latency *clocks);
-
-int amd_powerplay_get_clock_by_type_with_voltage(void *handle,
- enum amd_pp_clock_type type,
- struct pp_clock_levels_with_voltage *clocks);
-
-int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
-
-int amd_powerplay_display_clock_voltage_request(void *handle,
- struct pp_display_clock_request *clock);
-
-int amd_powerplay_get_display_mode_validation_clocks(void *handle,
- struct amd_pp_simple_clock_info *output);
-
+#include "kgd_pp_interface.h"
+#include "amdgpu.h"
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 57a0467..8b78bbe 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -232,6 +232,20 @@ enum phm_platform_caps {
PHM_PlatformCaps_UVDClientMCTuning,
PHM_PlatformCaps_ODNinACSupport,
PHM_PlatformCaps_ODNinDCSupport,
+ PHM_PlatformCaps_UMDPState,
+ PHM_PlatformCaps_AutoWattmanSupport,
+ PHM_PlatformCaps_AutoWattmanEnable_CCCState,
+ PHM_PlatformCaps_FreeSyncActive,
+ PHM_PlatformCaps_EnableShadowPstate,
+ PHM_PlatformCaps_customThermalManagement,
+ PHM_PlatformCaps_staticFanControl,
+ PHM_PlatformCaps_Virtual_System,
+ PHM_PlatformCaps_LowestUclkReservedForUlv,
+ PHM_PlatformCaps_EnableBoostState,
+ PHM_PlatformCaps_AVFSSupport,
+ PHM_PlatformCaps_ThermalPolicyDelay,
+ PHM_PlatformCaps_CustomFanControlSupport,
+ PHM_PlatformCaps_BAMACO,
PHM_PlatformCaps_Max
};
@@ -358,6 +372,17 @@ struct phm_clocks {
uint32_t clock[MAX_NUM_CLOCKS];
};
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
+
+/* To determine if sclk and mclk are in overdrive state */
+#define SCLK_OVERDRIVE_ENABLED 0x00000001
+#define MCLK_OVERDRIVE_ENABLED 0x00000002
+#define VDDC_OVERDRIVE_ENABLED 0x00000010
+
struct phm_odn_performance_level {
uint32_t clock;
uint32_t vddc;
@@ -368,9 +393,9 @@ struct phm_odn_clock_levels {
uint32_t size;
uint32_t options;
uint32_t flags;
- uint32_t number_of_performance_levels;
- /* variable-sized array, specify by ulNumberOfPerformanceLevels. */
- struct phm_odn_performance_level performance_level_entries[8];
+ uint32_t num_of_pl;
+ /* variable-sized array, specify by num_of_pl. */
+ struct phm_odn_performance_level entries[8];
};
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
@@ -392,8 +417,8 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level);
extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr);
extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info);
-extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range);
+extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr);
+extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr);
extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 004a40e..17f811d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -25,24 +25,20 @@
#include <linux/seq_file.h>
#include "amd_powerplay.h"
-#include "pp_instance.h"
#include "hardwaremanager.h"
#include "pp_power_source.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
#include "power_state.h"
-#include "cgs_linux.h"
+#include "smu_helper.h"
-struct pp_instance;
struct pp_hwmgr;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
#define VOLTAGE_SCALE 4
-uint8_t convert_to_vid(uint16_t vddc);
-
enum DISPLAY_GAP {
DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */
@@ -83,6 +79,7 @@ enum PP_FEATURE_MASK {
PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
PP_SOCCLK_DPM_MASK = 0x1000,
PP_DCEFCLK_DPM_MASK = 0x2000,
+ PP_OVERDRIVE_MASK = 0x4000,
};
enum PHM_BackEnd_Magic {
@@ -105,36 +102,6 @@ struct phm_set_power_state_input {
const struct pp_hw_power_state *pnew_state;
};
-struct phm_acp_arbiter {
- uint32_t acpclk;
-};
-
-struct phm_uvd_arbiter {
- uint32_t vclk;
- uint32_t dclk;
- uint32_t vclk_ceiling;
- uint32_t dclk_ceiling;
- uint32_t vclk_soft_min;
- uint32_t dclk_soft_min;
-};
-
-struct phm_vce_arbiter {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct phm_gfx_arbiter {
- uint32_t sclk;
- uint32_t sclk_hard_min;
- uint32_t mclk;
- uint32_t sclk_over_drive;
- uint32_t mclk_over_drive;
- uint32_t sclk_threshold;
- uint32_t num_cus;
- uint32_t gfxclk;
- uint32_t fclk;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -263,9 +230,9 @@ struct pp_smumgr_func {
uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
uint32_t (*get_mac_definition)(uint32_t value);
bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
- int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
+ int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
+ int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
};
struct pp_hwmgr_func {
@@ -307,7 +274,6 @@ struct pp_hwmgr_func {
const uint32_t *msg_id);
int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm);
- int (*get_temperature)(struct pp_hwmgr *hwmgr);
int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
@@ -318,8 +284,7 @@ struct pp_hwmgr_func {
int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed);
int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr);
int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr);
- int (*register_internal_thermal_interrupt)(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info);
+ int (*register_irq_handlers)(struct pp_hwmgr *hwmgr);
bool (*check_smc_update_required_for_display_configuration)(struct pp_hwmgr *hwmgr);
int (*check_states_equal)(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pstate1,
@@ -356,8 +321,6 @@ struct pp_hwmgr_func {
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size);
- int (*set_power_profile_state)(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
@@ -369,6 +332,15 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
+ int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range);
+ int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
+ int (*set_power_profile_mode)(struct pp_hwmgr *hwmgr, long *input, uint32_t size);
+ int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+ int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
+ int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@@ -609,6 +581,27 @@ struct phm_ppt_v2_information {
uint8_t uc_dcef_dpm_voltage_mode;
};
+struct phm_ppt_v3_information
+{
+ uint8_t uc_thermal_controller_type;
+
+ uint16_t us_small_power_limit1;
+ uint16_t us_small_power_limit2;
+ uint16_t us_boost_power_limit;
+
+ uint16_t us_od_turbo_power_limit;
+ uint16_t us_od_powersave_power_limit;
+ uint16_t us_software_shutdown_temp;
+
+ uint32_t *power_saving_clock_max;
+ uint32_t *power_saving_clock_min;
+
+ uint32_t *od_settings_max;
+ uint32_t *od_settings_min;
+
+ void *smc_pptable;
+};
+
struct phm_dynamic_state_info {
struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk;
struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk;
@@ -638,7 +631,6 @@ struct phm_dynamic_state_info {
struct phm_ppm_table *ppm_parameter_table;
struct phm_cac_tdp_table *cac_dtp_table;
struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk;
- struct phm_vq_budgeting_table *vq_budgeting_table;
};
struct pp_fan_info {
@@ -719,9 +711,15 @@ enum PP_TABLE_VERSION {
/**
* The main hardware manager structure.
*/
+#define Workload_Policy_Max 5
+
struct pp_hwmgr {
+ void *adev;
uint32_t chip_family;
uint32_t chip_id;
+ uint32_t smu_version;
+ bool pm_en;
+ struct mutex smu_lock;
uint32_t pp_table_version;
void *device;
@@ -737,10 +735,6 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level request_dpm_level;
- struct phm_gfx_arbiter gfx_arbiter;
- struct phm_acp_arbiter acp_arbiter;
- struct phm_uvd_arbiter uvd_arbiter;
- struct phm_vce_arbiter vce_arbiter;
uint32_t usec_timeout;
void *pptable;
struct phm_platform_descriptor platform_descriptor;
@@ -772,181 +766,32 @@ struct pp_hwmgr {
struct pp_power_state *uvd_ps;
struct amd_pp_display_configuration display_config;
uint32_t feature_mask;
-
+ bool avfs_supported;
/* UMD Pstate */
- struct amd_pp_profile gfx_power_profile;
- struct amd_pp_profile compute_power_profile;
- struct amd_pp_profile default_gfx_power_profile;
- struct amd_pp_profile default_compute_power_profile;
- enum amd_pp_profile_type current_power_profile;
bool en_umd_pstate;
-};
-
-struct cgs_irq_src_funcs {
- cgs_irq_source_set_func_t set;
- cgs_irq_handler_func_t handler;
-};
-
-extern int hwmgr_early_init(struct pp_instance *handle);
-extern int hwmgr_hw_init(struct pp_instance *handle);
-extern int hwmgr_hw_fini(struct pp_instance *handle);
-extern int hwmgr_hw_suspend(struct pp_instance *handle);
-extern int hwmgr_hw_resume(struct pp_instance *handle);
-extern int hwmgr_handle_task(struct pp_instance *handle,
+ uint32_t power_profile_mode;
+ uint32_t default_power_profile_mode;
+ uint32_t pstate_sclk;
+ uint32_t pstate_mclk;
+ bool od_enabled;
+ uint32_t power_limit;
+ uint32_t default_power_limit;
+ uint32_t workload_mask;
+ uint32_t workload_prority[Workload_Policy_Max];
+ uint32_t workload_setting[Workload_Policy_Max];
+};
+
+int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
+int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
+int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
+int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
+int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
enum amd_pp_task task_id,
- void *input, void *output);
-extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
- uint32_t value, uint32_t mask);
-
-extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask);
-
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index,
- uint32_t value, uint32_t mask);
-extern int phm_wait_for_indirect_register_unequal(
- struct pp_hwmgr *hwmgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
-
-extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
-extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
-extern bool phm_cf_want_microcode_fan_ctrl(struct pp_hwmgr *hwmgr);
-
-extern int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table);
-extern int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
-extern int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_clock_voltage_dependency_table *dep_table);
-extern int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, phm_ppt_v1_voltage_lookup_table *lookup_table);
-extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table);
-extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
-extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
-extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
-extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage);
-extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
-extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
-extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
-extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk);
-extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
-
-extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
-extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
-
-extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t id, uint16_t *voltage);
+ enum amd_pm_state_type *user_state);
-#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
-#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
-#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
-
-#define PHM_SET_FIELD(origval, reg, field, fieldval) \
- (((origval) & ~PHM_FIELD_MASK(reg, field)) | \
- (PHM_FIELD_MASK(reg, field) & ((fieldval) << PHM_FIELD_SHIFT(reg, field))))
-
-#define PHM_GET_FIELD(value, reg, field) \
- (((value) & PHM_FIELD_MASK(reg, field)) >> \
- PHM_FIELD_SHIFT(reg, field))
-
-
-/* Operations on named fields. */
-
-#define PHM_READ_FIELD(device, reg, field) \
- PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
-
-#define PHM_READ_INDIRECT_FIELD(device, port, reg, field) \
- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define PHM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
- PHM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define PHM_WRITE_FIELD(device, reg, field, fieldval) \
- cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
- cgs_read_register(device, mm##reg), reg, field, fieldval))
-
-#define PHM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-#define PHM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field) )
-
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, \
- mm##port##_INDEX_11, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
- port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, \
- mm##port##_INDEX_11, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- index, value, mask) \
- phm_wait_for_register_unequal(hwmgr, \
- index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
- mm##reg, value, mask)
+#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
- (fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
deleted file mode 100644
index b8f4b73..0000000
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_PP_SMC_H
-#define POLARIS10_PP_SMC_H
-
-
-#pragma pack(push, 1)
-
-#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
-
-#define PPSMC_SWSTATE_FLAG_DC 0x01
-#define PPSMC_SWSTATE_FLAG_UVD 0x02
-#define PPSMC_SWSTATE_FLAG_VCE 0x04
-
-#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
-#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
-#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
-
-#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
-#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
-#define PPSMC_SYSTEMFLAG_GDDR5 0x04
-
-#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
-
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
-#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
-
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
-#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
-
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
-#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
-
-
-#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
-#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
-#define PPSMC_DPM2FLAGS_OCP 0x04
-
-
-#define PPSMC_DISPLAY_WATERMARK_LOW 0
-#define PPSMC_DISPLAY_WATERMARK_HIGH 1
-
-
-#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
-#define PPSMC_STATEFLAG_POWERBOOST 0x02
-#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
-#define PPSMC_STATEFLAG_POWERSHIFT 0x08
-#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
-#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
-#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
-
-
-#define FDO_MODE_HARDWARE 0
-#define FDO_MODE_PIECE_WISE_LINEAR 1
-
-enum FAN_CONTROL {
- FAN_CONTROL_FUZZY,
- FAN_CONTROL_TABLE
-};
-
-
-#define PPSMC_Result_OK ((uint16_t)0x01)
-#define PPSMC_Result_NoMore ((uint16_t)0x02)
-
-#define PPSMC_Result_NotNow ((uint16_t)0x03)
-#define PPSMC_Result_Failed ((uint16_t)0xFF)
-#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
-#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
-
-typedef uint16_t PPSMC_Result;
-
-#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
-
-
-#define PPSMC_MSG_Halt ((uint16_t)0x10)
-#define PPSMC_MSG_Resume ((uint16_t)0x11)
-#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
-#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
-#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
-#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
-#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
-#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
-#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
-#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
-#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
-#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
-#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
-#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
-#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
-#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
-#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
-#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
-#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
-#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
-#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
-#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
-#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
-#define PPSMC_CACHistoryStart ((uint16_t)0x57)
-#define PPSMC_CACHistoryStop ((uint16_t)0x58)
-#define PPSMC_TDPClampingActive ((uint16_t)0x59)
-#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
-#define PPSMC_StartFanControl ((uint16_t)0x5B)
-#define PPSMC_StopFanControl ((uint16_t)0x5C)
-#define PPSMC_NoDisplay ((uint16_t)0x5D)
-#define PPSMC_HasDisplay ((uint16_t)0x5E)
-#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
-#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
-#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
-#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
-#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
-#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
-#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
-#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
-#define PPSMC_OCPActive ((uint16_t)0x6C)
-#define PPSMC_OCPInactive ((uint16_t)0x6D)
-#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
-#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
-#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
-#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
-#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
-#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
-#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
-#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
-#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
-#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
-#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
-#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
-#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
-#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
-#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
-#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
-
-#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
-#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
-#define PPSMC_FlushDataCache ((uint16_t)0x80)
-#define PPSMC_FlushInstrCache ((uint16_t)0x81)
-
-#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
-#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
-
-#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
-
-#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
-#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
-#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
-#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
-
-#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
-#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
-#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
-#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
-
-#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
-
-#define PPSMC_MSG_Test ((uint16_t) 0x100)
-#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
-#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
-#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
-#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
-#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
-#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
-#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
-#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
-#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
-#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
-#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
-#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
-#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
-#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
-#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
-#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
-#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
-#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
-#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
-#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
-#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
-#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
-#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
-#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
-#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
-#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
-#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
-#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
-#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
-#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
-#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
-#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
-#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
-#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
-#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
-#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
-
-#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
-#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
-#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
-#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
-#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
-#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
-#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
-#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
-#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
-#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
-#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
-#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
-#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
-#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
-#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
-#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
-#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
-#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
-#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
-#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
-#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
-#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
-#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
-#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
-#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
-#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
-#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
-#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
-#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
-#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
-#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
-#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
-#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
-#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
-#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
-#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
-#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
-
-#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
-#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
-#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
-#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
-#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
-#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
-#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
-#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
-#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
-#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
-#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
-#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
-#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
-#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
-#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
-#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
-#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
-#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
-#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
-#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
-#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
-#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
-#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
-#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
-#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
-#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
-#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
-#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
-#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
-#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
-#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
-#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
-#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
-#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
-#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
-#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
-#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
-#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
-#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
-#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
-#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
-#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
-#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
-#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
-#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
-#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
-#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
-#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
-#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
-#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
-#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
-#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
-#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
-#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
-#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
-#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
-#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
-#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
-#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
-#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
-#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
-#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
-#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
-#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
-#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
-#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
-#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
-#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
-#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
-#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
-#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
-#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
-#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
-#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
-#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
-#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
-#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
-#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
-#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
-#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
-#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
-
-#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
-#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
-#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
-#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
-#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
-#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
-#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
-#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
-#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
-
-#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
-#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
-#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
-#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
-#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
-#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
-#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
-
-#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
-#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
-#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
-#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
-#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
-#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
-#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
-#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
-#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
-#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
-#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
-#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
-#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
-#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
-#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
-#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
-#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
-#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
-#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
-#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
-#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
-#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
-#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
-#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
-#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
-#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
-
-#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
-#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
-#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
-#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
-#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
-#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
-#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
-#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
-
-#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
-#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
-#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
-
-#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
-#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
-
-#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
-
-#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
-#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
-#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
-#define PPSMC_MSG_GetData ((uint16_t) 0x801)
-#define PPSMC_MSG_SetData ((uint16_t) 0x802)
-
-typedef uint16_t PPSMC_Msg;
-
-#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
-#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
-#define PPSMC_EVENT_STATUS_DC 0x00000004
-
-#pragma pack(pop)
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index 827860f..a99b5cbb 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -122,8 +122,8 @@ struct PP_StateSoftwareAlgorithmBlock {
* Type to hold a temperature range.
*/
struct PP_TemperatureRange {
- uint32_t min;
- uint32_t max;
+ int min;
+ int max;
};
struct PP_StateValidationBlock {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h b/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
deleted file mode 100644
index 0faf6a2..0000000
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_feature.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _PP_FEATURE_H_
-#define _PP_FEATURE_H_
-
-/**
- * PowerPlay feature ids.
- */
-enum pp_feature {
- PP_Feature_PowerPlay = 0,
- PP_Feature_User2DPerformance,
- PP_Feature_User3DPerformance,
- PP_Feature_VariBright,
- PP_Feature_VariBrightOnPowerXpress,
- PP_Feature_ReducedRefreshRate,
- PP_Feature_GFXClockGating,
- PP_Feature_OverdriveTest,
- PP_Feature_OverDrive,
- PP_Feature_PowerBudgetWaiver,
- PP_Feature_PowerControl,
- PP_Feature_PowerControl_2,
- PP_Feature_MultiUVDState,
- PP_Feature_Force3DClock,
- PP_Feature_BACO,
- PP_Feature_PowerDown,
- PP_Feature_DynamicUVDState,
- PP_Feature_VCEDPM,
- PP_Feature_PPM,
- PP_Feature_ACP_POWERGATING,
- PP_Feature_FFC,
- PP_Feature_FPS,
- PP_Feature_ViPG,
- PP_Feature_Max
-};
-
-/**
- * Struct for PowerPlay feature info.
- */
-struct pp_feature_info {
- bool supported; /* feature supported by PowerPlay */
- bool enabled; /* feature enabled in PowerPlay */
- bool enabled_default; /* default enable status of the feature */
- uint32_t version; /* feature version */
-};
-
-#endif /* _PP_FEATURE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
index a511611..214f370 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
@@ -23,7 +23,8 @@
#ifndef PP_SOC15_H
#define PP_SOC15_H
-#include "vega10/soc15ip.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
inline static uint32_t soc15_get_register_offset(
uint32_t hw_id,
@@ -43,7 +44,8 @@ inline static uint32_t soc15_get_register_offset(
reg = DF_BASE.instance[inst].segment[segment] + offset;
else if (hw_id == GC_HWID)
reg = GC_BASE.instance[inst].segment[segment] + offset;
-
+ else if (hw_id == SMUIO_HWID)
+ reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
return reg;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
index 0c1593e..201d2b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,28 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef PP_ASICBLOCKS_H
-#define PP_ASICBLOCKS_H
+#ifndef PP_THERMAL_H
+#define PP_THERMAL_H
+#include "power_state.h"
-enum PHM_AsicBlock {
- PHM_AsicBlock_GFX,
- PHM_AsicBlock_UVD_MVC,
- PHM_AsicBlock_UVD,
- PHM_AsicBlock_UVD_HD,
- PHM_AsicBlock_UVD_SD,
- PHM_AsicBlock_Count
+static const struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
+{
+ {-273150, 99000},
+ { 120000, 120000},
};
-enum PHM_ClockGateSetting {
- PHM_ClockGateSetting_StaticOn,
- PHM_ClockGateSetting_StaticOff,
- PHM_ClockGateSetting_Dynamic
-};
-
-struct phm_asic_blocks {
- bool gfx : 1;
- bool uvd : 1;
+static const struct PP_TemperatureRange SMU7ThermalPolicy[] =
+{
+ {-273150, 99000},
+ { 120000, 120000},
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 2b34971..426bff2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,7 +75,13 @@
#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
#define PPSMC_MSG_SoftReset 0x2E
-#define PPSMC_Message_Count 0x2F
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
+#define PPSMC_MSG_SetHardMinGfxClk 0x31
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
+#define PPSMC_MSG_SetSoftMaxVcn 0x34
+#define PPSMC_MSG_PowerGateMmHub 0x35
+#define PPSMC_Message_Count 0x36
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
index 75a380a..e14072d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7.h
@@ -82,6 +82,25 @@
#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+/* Voltage Regulator Configuration */
+/* VR Config info is contained in dpmTable */
+
+#define VRCONF_VDDC_MASK 0x000000FF
+#define VRCONF_VDDC_SHIFT 0
+#define VRCONF_VDDGFX_MASK 0x0000FF00
+#define VRCONF_VDDGFX_SHIFT 8
+#define VRCONF_VDDCI_MASK 0x00FF0000
+#define VRCONF_VDDCI_SHIFT 16
+#define VRCONF_MVDD_MASK 0xFF000000
+#define VRCONF_MVDD_SHIFT 24
+
+#define VR_MERGED_WITH_VDDC 0
+#define VR_SVI2_PLANE_1 1
+#define VR_SVI2_PLANE_2 2
+#define VR_SMIO_PATTERN_1 3
+#define VR_SMIO_PATTERN_2 4
+#define VR_STATIC_VOLTAGE 5
+
struct SMU7_PIDController
{
uint32_t Ki;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
index 0b0b404..ee87674 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
@@ -316,7 +316,8 @@ struct SMU7_Discrete_DpmTable
uint8_t AcpLevelCount;
uint8_t SamuLevelCount;
uint8_t MasterDeepSleepControl;
- uint32_t Reserved[5];
+ uint32_t VRConfig;
+ uint32_t Reserved[4];
// uint32_t SamuDefaultLevel;
SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
index 550ed67..70ac4d4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
@@ -58,7 +58,7 @@
#define FEATURE_FAST_PPT_BIT 26
#define FEATURE_GFX_EDC_BIT 27
#define FEATURE_ACG_BIT 28
-#define FEATURE_SPARE_29_BIT 29
+#define FEATURE_PCC_LIMIT_CONTROL_BIT 29
#define FEATURE_SPARE_30_BIT 30
#define FEATURE_SPARE_31_BIT 31
@@ -94,7 +94,7 @@
#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT )
#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
-#define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
+#define FEATURE_PCC_LIMIT_CONTROL_MASK (1 << FEATURE_PCC_LIMIT_CONTROL_BIT )
#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
/* Workload types */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index b1b27b2..6c22ed9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -26,32 +26,6 @@
#include "amd_powerplay.h"
#include "hwmgr.h"
-#define smu_lower_32_bits(n) ((uint32_t)(n))
-#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
-
-
-
-enum AVFS_BTC_STATUS {
- AVFS_BTC_BOOT = 0,
- AVFS_BTC_BOOT_STARTEDSMU,
- AVFS_LOAD_VIRUS,
- AVFS_BTC_VIRUS_LOADED,
- AVFS_BTC_VIRUS_FAIL,
- AVFS_BTC_COMPLETED_PREVIOUSLY,
- AVFS_BTC_ENABLEAVFS,
- AVFS_BTC_STARTED,
- AVFS_BTC_FAILED,
- AVFS_BTC_RESTOREVFT_FAILED,
- AVFS_BTC_SAVEVFT_FAILED,
- AVFS_BTC_DPMTABLESETUP_FAILED,
- AVFS_BTC_COMPLETED_UNSAVED,
- AVFS_BTC_COMPLETED_SAVED,
- AVFS_BTC_COMPLETED_RESTORED,
- AVFS_BTC_DISABLED,
- AVFS_BTC_NOTSUPPORTED,
- AVFS_BTC_SMUMSG_ERROR
-};
-
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
@@ -95,6 +69,19 @@ enum SMU_MAC_DEFINITION {
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
+enum SMU9_TABLE_ID {
+ PPTABLE = 0,
+ WMTABLE,
+ AVFSTABLE,
+ TOOLSTABLE,
+ AVFSFUSETABLE
+};
+
+enum SMU10_TABLE_ID {
+ SMU10_WMTABLE = 0,
+ SMU10_CLOCKTABLE,
+};
+
extern int smum_get_argument(struct pp_hwmgr *hwmgr);
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
@@ -106,13 +93,6 @@ extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-extern int smu_allocate_memory(void *device, uint32_t size,
- enum cgs_gpu_mem_type type,
- uint32_t byte_align, uint64_t *mc_addr,
- void **kptr, void *handle);
-
-extern int smu_free_memory(void *device, void *handle);
-
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
@@ -129,10 +109,10 @@ extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value);
extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
-extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-
extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
+extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting);
+
+extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index 247c973..c3ed737 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
+#define PPSMC_MSG_SetPccThrottleLevel 0x67
#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
#define PPSMC_Message_Count 0x69
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
new file mode 100644
index 0000000..cd2e503
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -0,0 +1,758 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VEGA12_SMU9_DRIVER_IF_H
+#define VEGA12_SMU9_DRIVER_IF_H
+
+/**** IMPORTANT ***
+ * SMU TEAM: Always increment the interface version if
+ * any structure is changed in this file
+ */
+#define SMU9_DRIVER_IF_VERSION 0x10
+
+#define PPTABLE_V12_SMU_VERSION 1
+
+#define NUM_GFXCLK_DPM_LEVELS 16
+#define NUM_VCLK_DPM_LEVELS 8
+#define NUM_DCLK_DPM_LEVELS 8
+#define NUM_ECLK_DPM_LEVELS 8
+#define NUM_MP0CLK_DPM_LEVELS 2
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 8
+#define NUM_DCEFCLK_DPM_LEVELS 8
+#define NUM_DISPCLK_DPM_LEVELS 8
+#define NUM_PIXCLK_DPM_LEVELS 8
+#define NUM_PHYCLK_DPM_LEVELS 8
+#define NUM_LINK_LEVELS 2
+
+#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1)
+#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1)
+#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1)
+#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1)
+#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1)
+#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1)
+#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1)
+#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1)
+#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1)
+#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1)
+#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1)
+#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1)
+
+
+#define PPSMC_GeminiModeNone 0
+#define PPSMC_GeminiModeMaster 1
+#define PPSMC_GeminiModeSlave 2
+
+
+#define FEATURE_DPM_PREFETCHER_BIT 0
+#define FEATURE_DPM_GFXCLK_BIT 1
+#define FEATURE_DPM_UCLK_BIT 2
+#define FEATURE_DPM_SOCCLK_BIT 3
+#define FEATURE_DPM_UVD_BIT 4
+#define FEATURE_DPM_VCE_BIT 5
+#define FEATURE_ULV_BIT 6
+#define FEATURE_DPM_MP0CLK_BIT 7
+#define FEATURE_DPM_LINK_BIT 8
+#define FEATURE_DPM_DCEFCLK_BIT 9
+#define FEATURE_DS_GFXCLK_BIT 10
+#define FEATURE_DS_SOCCLK_BIT 11
+#define FEATURE_DS_LCLK_BIT 12
+#define FEATURE_PPT_BIT 13
+#define FEATURE_TDC_BIT 14
+#define FEATURE_THERMAL_BIT 15
+#define FEATURE_GFX_PER_CU_CG_BIT 16
+#define FEATURE_RM_BIT 17
+#define FEATURE_DS_DCEFCLK_BIT 18
+#define FEATURE_ACDC_BIT 19
+#define FEATURE_VR0HOT_BIT 20
+#define FEATURE_VR1HOT_BIT 21
+#define FEATURE_FW_CTF_BIT 22
+#define FEATURE_LED_DISPLAY_BIT 23
+#define FEATURE_FAN_CONTROL_BIT 24
+#define FEATURE_GFX_EDC_BIT 25
+#define FEATURE_GFXOFF_BIT 26
+#define FEATURE_CG_BIT 27
+#define FEATURE_ACG_BIT 28
+#define FEATURE_SPARE_29_BIT 29
+#define FEATURE_SPARE_30_BIT 30
+#define FEATURE_SPARE_31_BIT 31
+
+#define NUM_FEATURES 32
+
+#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT )
+#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT )
+#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT )
+#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT )
+#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT )
+#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT )
+#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT )
+#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT )
+#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT )
+#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT )
+#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT )
+#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
+#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
+#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
+#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
+#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT )
+#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT )
+#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT )
+#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT )
+#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
+#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
+#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
+#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT )
+#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
+#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
+#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
+#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
+#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
+#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
+#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
+#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
+
+
+#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
+#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
+#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004
+#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080
+#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100
+#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000
+#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000
+#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000
+#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000
+#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000
+
+
+#define VR_MAPPING_VR_SELECT_MASK 0x01
+#define VR_MAPPING_VR_SELECT_SHIFT 0x00
+
+#define VR_MAPPING_PLANE_SELECT_MASK 0x02
+#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01
+
+
+#define PSI_SEL_VR0_PLANE0_PSI0 0x01
+#define PSI_SEL_VR0_PLANE0_PSI1 0x02
+#define PSI_SEL_VR0_PLANE1_PSI0 0x04
+#define PSI_SEL_VR0_PLANE1_PSI1 0x08
+#define PSI_SEL_VR1_PLANE0_PSI0 0x10
+#define PSI_SEL_VR1_PLANE0_PSI1 0x20
+#define PSI_SEL_VR1_PLANE1_PSI0 0x40
+#define PSI_SEL_VR1_PLANE1_PSI1 0x80
+
+
+#define THROTTLER_STATUS_PADDING_BIT 0
+#define THROTTLER_STATUS_TEMP_EDGE_BIT 1
+#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2
+#define THROTTLER_STATUS_TEMP_HBM_BIT 3
+#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4
+#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5
+#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6
+#define THROTTLER_STATUS_TEMP_PLX_BIT 7
+#define THROTTLER_STATUS_TEMP_SKIN_BIT 8
+#define THROTTLER_STATUS_TDC_GFX_BIT 9
+#define THROTTLER_STATUS_TDC_SOC_BIT 10
+#define THROTTLER_STATUS_PPT_BIT 11
+#define THROTTLER_STATUS_FIT_BIT 12
+#define THROTTLER_STATUS_PPM_BIT 13
+
+
+#define TABLE_TRANSFER_OK 0x0
+#define TABLE_TRANSFER_FAILED 0xFF
+
+
+#define WORKLOAD_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_COUNT 7
+
+typedef struct {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+} QuadraticInt_t;
+
+typedef struct {
+ uint32_t m;
+ uint32_t b;
+} LinearInt_t;
+
+typedef struct {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+} DroopInt_t;
+
+typedef enum {
+ PPCLK_GFXCLK,
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_ECLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_DCEFCLK,
+ PPCLK_DISPCLK,
+ PPCLK_PIXCLK,
+ PPCLK_PHYCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+enum {
+ VOLTAGE_MODE_AVFS,
+ VOLTAGE_MODE_AVFS_SS,
+ VOLTAGE_MODE_SS,
+ VOLTAGE_MODE_COUNT,
+};
+
+typedef struct {
+ uint8_t VoltageMode;
+ uint8_t SnapToDiscrete;
+ uint8_t NumDiscreteLevels;
+ uint8_t padding;
+ LinearInt_t ConversionToAvfsClk;
+ QuadraticInt_t SsCurve;
+} DpmDescriptor_t;
+
+typedef struct {
+ uint32_t Version;
+
+
+ uint32_t FeaturesToRun[2];
+
+
+ uint16_t SocketPowerLimitAc0;
+ uint16_t SocketPowerLimitAc0Tau;
+ uint16_t SocketPowerLimitAc1;
+ uint16_t SocketPowerLimitAc1Tau;
+ uint16_t SocketPowerLimitAc2;
+ uint16_t SocketPowerLimitAc2Tau;
+ uint16_t SocketPowerLimitAc3;
+ uint16_t SocketPowerLimitAc3Tau;
+ uint16_t SocketPowerLimitDc;
+ uint16_t SocketPowerLimitDcTau;
+ uint16_t TdcLimitSoc;
+ uint16_t TdcLimitSocTau;
+ uint16_t TdcLimitGfx;
+ uint16_t TdcLimitGfxTau;
+
+ uint16_t TedgeLimit;
+ uint16_t ThotspotLimit;
+ uint16_t ThbmLimit;
+ uint16_t Tvr_gfxLimit;
+ uint16_t Tvr_memLimit;
+ uint16_t Tliquid1Limit;
+ uint16_t Tliquid2Limit;
+ uint16_t TplxLimit;
+ uint32_t FitLimit;
+
+ uint16_t PpmPowerLimit;
+ uint16_t PpmTemperatureThreshold;
+
+ uint8_t MemoryOnPackage;
+ uint8_t padding8_limits[3];
+
+
+ uint16_t UlvVoltageOffsetSoc;
+ uint16_t UlvVoltageOffsetGfx;
+
+ uint8_t UlvSmnclkDid;
+ uint8_t UlvMp1clkDid;
+ uint8_t UlvGfxclkBypass;
+ uint8_t Padding234;
+
+
+ uint16_t MinVoltageGfx;
+ uint16_t MinVoltageSoc;
+ uint16_t MaxVoltageGfx;
+ uint16_t MaxVoltageSoc;
+
+ uint16_t LoadLineResistance;
+ uint16_t LoadLine_padding;
+
+
+ DpmDescriptor_t DpmDescriptor[PPCLK_COUNT];
+
+ uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ];
+ uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ];
+ uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ];
+ uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ];
+ uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ];
+ uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ];
+ uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ];
+ uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ];
+ uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ];
+ uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ];
+
+ uint16_t DcModeMaxFreq [PPCLK_COUNT ];
+
+
+ uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS];
+ uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS];
+
+
+ uint16_t GfxclkFidle;
+ uint16_t GfxclkSlewRate;
+ uint16_t CksEnableFreq;
+ uint16_t Padding789;
+ QuadraticInt_t CksVoltageOffset;
+ uint16_t AcgThresholdFreqHigh;
+ uint16_t AcgThresholdFreqLow;
+ uint16_t GfxclkDsMaxFreq;
+ uint8_t Padding456[2];
+
+
+ uint8_t LowestUclkReservedForUlv;
+ uint8_t Padding8_Uclk[3];
+
+
+ uint8_t PcieGenSpeed[NUM_LINK_LEVELS];
+ uint8_t PcieLaneCount[NUM_LINK_LEVELS];
+ uint16_t LclkFreq[NUM_LINK_LEVELS];
+
+
+ uint16_t EnableTdpm;
+ uint16_t TdpmHighHystTemperature;
+ uint16_t TdpmLowHystTemperature;
+ uint16_t GfxclkFreqHighTempLimit;
+
+
+ uint16_t FanStopTemp;
+ uint16_t FanStartTemp;
+
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+ uint16_t FanGainLiquid;
+ uint16_t FanGainVrVddc;
+ uint16_t FanGainVrMvdd;
+ uint16_t FanGainPlx;
+ uint16_t FanGainHbm;
+ uint16_t FanPwmMin;
+ uint16_t FanAcousticLimitRpm;
+ uint16_t FanThrottlingRpm;
+ uint16_t FanMaximumRpm;
+ uint16_t FanTargetTemperature;
+ uint16_t FanTargetGfxclk;
+ uint8_t FanZeroRpmEnable;
+ uint8_t FanTachEdgePerRev;
+
+
+
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t FuzzyFan_Reserved;
+
+
+
+
+ uint8_t OverrideAvfsGb;
+ uint8_t Padding8_Avfs[3];
+
+ QuadraticInt_t qAvfsGb;
+ DroopInt_t dBtcGbGfxCksOn;
+ DroopInt_t dBtcGbGfxCksOff;
+ DroopInt_t dBtcGbGfxAcg;
+ DroopInt_t dBtcGbSoc;
+ LinearInt_t qAgingGbGfx;
+ LinearInt_t qAgingGbSoc;
+
+ QuadraticInt_t qStaticVoltageOffsetGfx;
+ QuadraticInt_t qStaticVoltageOffsetSoc;
+
+ uint16_t DcTolGfx;
+ uint16_t DcTolSoc;
+
+ uint8_t DcBtcGfxEnabled;
+ uint8_t DcBtcSocEnabled;
+ uint8_t Padding8_GfxBtc[2];
+
+ uint16_t DcBtcGfxMin;
+ uint16_t DcBtcGfxMax;
+
+ uint16_t DcBtcSocMin;
+ uint16_t DcBtcSocMax;
+
+
+
+ uint32_t DebugOverrides;
+ QuadraticInt_t ReservedEquation0;
+ QuadraticInt_t ReservedEquation1;
+ QuadraticInt_t ReservedEquation2;
+ QuadraticInt_t ReservedEquation3;
+
+
+ uint32_t Reserved[15];
+
+
+
+ uint8_t Liquid1_I2C_address;
+ uint8_t Liquid2_I2C_address;
+ uint8_t Vr_I2C_address;
+ uint8_t Plx_I2C_address;
+
+ uint8_t Liquid_I2C_LineSCL;
+ uint8_t Liquid_I2C_LineSDA;
+ uint8_t Vr_I2C_LineSCL;
+ uint8_t Vr_I2C_LineSDA;
+
+ uint8_t Plx_I2C_LineSCL;
+ uint8_t Plx_I2C_LineSDA;
+ uint8_t VrSensorPresent;
+ uint8_t LiquidSensorPresent;
+
+ uint16_t MaxVoltageStepGfx;
+ uint16_t MaxVoltageStepSoc;
+
+ uint8_t VddGfxVrMapping;
+ uint8_t VddSocVrMapping;
+ uint8_t VddMem0VrMapping;
+ uint8_t VddMem1VrMapping;
+
+ uint8_t GfxUlvPhaseSheddingMask;
+ uint8_t SocUlvPhaseSheddingMask;
+ uint8_t ExternalSensorPresent;
+ uint8_t Padding8_V;
+
+
+ uint16_t GfxMaxCurrent;
+ int8_t GfxOffset;
+ uint8_t Padding_TelemetryGfx;
+
+ uint16_t SocMaxCurrent;
+ int8_t SocOffset;
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t Mem0MaxCurrent;
+ int8_t Mem0Offset;
+ uint8_t Padding_TelemetryMem0;
+
+ uint16_t Mem1MaxCurrent;
+ int8_t Mem1Offset;
+ uint8_t Padding_TelemetryMem1;
+
+
+ uint8_t AcDcGpio;
+ uint8_t AcDcPolarity;
+ uint8_t VR0HotGpio;
+ uint8_t VR0HotPolarity;
+
+ uint8_t VR1HotGpio;
+ uint8_t VR1HotPolarity;
+ uint8_t Padding1;
+ uint8_t Padding2;
+
+
+
+ uint8_t LedPin0;
+ uint8_t LedPin1;
+ uint8_t LedPin2;
+ uint8_t padding8_4;
+
+
+ uint8_t GfxclkSpreadEnabled;
+ uint8_t GfxclkSpreadPercent;
+ uint16_t GfxclkSpreadFreq;
+
+ uint8_t UclkSpreadEnabled;
+ uint8_t UclkSpreadPercent;
+ uint16_t UclkSpreadFreq;
+
+ uint8_t SocclkSpreadEnabled;
+ uint8_t SocclkSpreadPercent;
+ uint16_t SocclkSpreadFreq;
+
+ uint32_t BoardReserved[3];
+
+
+ uint32_t MmHubPadding[7];
+
+} PPTable_t;
+
+typedef struct {
+
+ uint16_t GfxclkAverageLpfTau;
+ uint16_t SocclkAverageLpfTau;
+ uint16_t UclkAverageLpfTau;
+ uint16_t GfxActivityLpfTau;
+ uint16_t UclkActivityLpfTau;
+
+
+ uint32_t MmHubPadding[7];
+} DriverSmuConfig_t;
+
+typedef struct {
+
+ uint16_t GfxclkFmin;
+ uint16_t GfxclkFmax;
+ uint16_t GfxclkFreq1;
+ uint16_t GfxclkOffsetVolt1;
+ uint16_t GfxclkFreq2;
+ uint16_t GfxclkOffsetVolt2;
+ uint16_t GfxclkFreq3;
+ uint16_t GfxclkOffsetVolt3;
+ uint16_t UclkFmax;
+ int16_t OverDrivePct;
+ uint16_t FanMaximumRpm;
+ uint16_t FanMinimumPwm;
+ uint16_t FanTargetTemperature;
+ uint16_t MaxOpTemp;
+
+} OverDriveTable_t;
+
+typedef struct {
+ uint16_t CurrClock[PPCLK_COUNT];
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageUclkFrequency ;
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint8_t CurrSocVoltageOffset ;
+ uint8_t CurrGfxVoltageOffset ;
+ uint8_t CurrMemVidOffset ;
+ uint8_t Padding8 ;
+ uint16_t CurrSocketPower ;
+ uint16_t TemperatureEdge ;
+ uint16_t TemperatureHotspot ;
+ uint16_t TemperatureHBM ;
+ uint16_t TemperatureVrGfx ;
+ uint16_t TemperatureVrMem ;
+ uint16_t TemperatureLiquid ;
+ uint16_t TemperaturePlx ;
+ uint32_t ThrottlerStatus ;
+
+ uint8_t LinkDpmLevel;
+ uint8_t Padding[3];
+
+
+ uint32_t MmHubPadding[7];
+} SmuMetrics_t;
+
+typedef struct {
+ uint16_t MinClock;
+ uint16_t MaxClock;
+ uint16_t MinUclk;
+ uint16_t MaxUclk;
+
+ uint8_t WmSetting;
+ uint8_t Padding[3];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCEFCLK,
+ WM_COUNT_PP,
+} WM_CLOCK_e;
+
+typedef struct {
+
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES];
+
+ uint32_t MmHubPadding[7];
+} Watermarks_t;
+
+typedef struct {
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+
+ uint32_t MmHubPadding[7];
+} AvfsDebugTable_t;
+
+typedef struct {
+ uint8_t AvfsEn;
+ uint8_t AvfsVersion;
+ uint8_t OverrideVFT;
+ uint8_t OverrideAvfsGb;
+
+ uint8_t OverrideTemperatures;
+ uint8_t OverrideVInversion;
+ uint8_t OverrideP2V;
+ uint8_t OverrideP2VCharzFreq;
+
+ int32_t VFT0_m1;
+ int32_t VFT0_m2;
+ int32_t VFT0_b;
+
+ int32_t VFT1_m1;
+ int32_t VFT1_m2;
+ int32_t VFT1_b;
+
+ int32_t VFT2_m1;
+ int32_t VFT2_m2;
+ int32_t VFT2_b;
+
+ int32_t AvfsGb0_m1;
+ int32_t AvfsGb0_m2;
+ int32_t AvfsGb0_b;
+
+ int32_t AcBtcGb_m1;
+ int32_t AcBtcGb_m2;
+ int32_t AcBtcGb_b;
+
+ uint32_t AvfsTempCold;
+ uint32_t AvfsTempMid;
+ uint32_t AvfsTempHot;
+
+ uint32_t GfxVInversion;
+ uint32_t SocVInversion;
+
+ int32_t P2V_m1;
+ int32_t P2V_m2;
+ int32_t P2V_b;
+
+ uint32_t P2VCharzFreq;
+
+ uint32_t EnabledAvfsModules;
+
+ uint32_t MmHubPadding[7];
+} AvfsFuseOverride_t;
+
+typedef struct {
+
+ uint8_t Gfx_ActiveHystLimit;
+ uint8_t Gfx_IdleHystLimit;
+ uint8_t Gfx_FPS;
+ uint8_t Gfx_MinActiveFreqType;
+ uint8_t Gfx_BoosterFreqType;
+ uint8_t Gfx_UseRlcBusy;
+ uint16_t Gfx_MinActiveFreq;
+ uint16_t Gfx_BoosterFreq;
+ uint16_t Gfx_PD_Data_time_constant;
+ uint32_t Gfx_PD_Data_limit_a;
+ uint32_t Gfx_PD_Data_limit_b;
+ uint32_t Gfx_PD_Data_limit_c;
+ uint32_t Gfx_PD_Data_error_coeff;
+ uint32_t Gfx_PD_Data_error_rate_coeff;
+
+ uint8_t Soc_ActiveHystLimit;
+ uint8_t Soc_IdleHystLimit;
+ uint8_t Soc_FPS;
+ uint8_t Soc_MinActiveFreqType;
+ uint8_t Soc_BoosterFreqType;
+ uint8_t Soc_UseRlcBusy;
+ uint16_t Soc_MinActiveFreq;
+ uint16_t Soc_BoosterFreq;
+ uint16_t Soc_PD_Data_time_constant;
+ uint32_t Soc_PD_Data_limit_a;
+ uint32_t Soc_PD_Data_limit_b;
+ uint32_t Soc_PD_Data_limit_c;
+ uint32_t Soc_PD_Data_error_coeff;
+ uint32_t Soc_PD_Data_error_rate_coeff;
+
+ uint8_t Mem_ActiveHystLimit;
+ uint8_t Mem_IdleHystLimit;
+ uint8_t Mem_FPS;
+ uint8_t Mem_MinActiveFreqType;
+ uint8_t Mem_BoosterFreqType;
+ uint8_t Mem_UseRlcBusy;
+ uint16_t Mem_MinActiveFreq;
+ uint16_t Mem_BoosterFreq;
+ uint16_t Mem_PD_Data_time_constant;
+ uint32_t Mem_PD_Data_limit_a;
+ uint32_t Mem_PD_Data_limit_b;
+ uint32_t Mem_PD_Data_limit_c;
+ uint32_t Mem_PD_Data_error_coeff;
+ uint32_t Mem_PD_Data_error_rate_coeff;
+
+} DpmActivityMonitorCoeffInt_t;
+
+
+
+
+#define TABLE_PPTABLE 0
+#define TABLE_WATERMARKS 1
+#define TABLE_AVFS 2
+#define TABLE_AVFS_PSM_DEBUG 3
+#define TABLE_AVFS_FUSE_OVERRIDE 4
+#define TABLE_PMSTATUSLOG 5
+#define TABLE_SMU_METRICS 6
+#define TABLE_DRIVER_SMU_CONFIG 7
+#define TABLE_ACTIVITY_MONITOR_COEFF 8
+#define TABLE_OVERDRIVE 9
+#define TABLE_COUNT 10
+
+
+#define UCLK_SWITCH_SLOW 0
+#define UCLK_SWITCH_FAST 1
+
+
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
+
+#define REMOVE_FMAX_MARGIN_BIT 0x0
+#define REMOVE_DCTOL_MARGIN_BIT 0x1
+#define REMOVE_PLATFORM_MARGIN_BIT 0x2
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h
new file mode 100644
index 0000000..f985c78
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VEGA12_PP_SMC_H
+#define VEGA12_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define SMU_UCODE_VERSION 0x00270a00
+
+/* SMU Response Codes: */
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetWorkloadMask 0xE
+#define PPSMC_MSG_SetPptLimit 0xF
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x10
+#define PPSMC_MSG_SetDriverDramAddrLow 0x11
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x12
+#define PPSMC_MSG_SetToolsDramAddrLow 0x13
+#define PPSMC_MSG_TransferTableSmu2Dram 0x14
+#define PPSMC_MSG_TransferTableDram2Smu 0x15
+#define PPSMC_MSG_UseDefaultPPTable 0x16
+#define PPSMC_MSG_UseBackupPPTable 0x17
+#define PPSMC_MSG_RunBtc 0x18
+#define PPSMC_MSG_RequestI2CBus 0x19
+#define PPSMC_MSG_ReleaseI2CBus 0x1A
+#define PPSMC_MSG_SetFloorSocVoltage 0x21
+#define PPSMC_MSG_SoftReset 0x22
+#define PPSMC_MSG_StartBacoMonitor 0x23
+#define PPSMC_MSG_CancelBacoMonitor 0x24
+#define PPSMC_MSG_EnterBaco 0x25
+#define PPSMC_MSG_SetSoftMinByFreq 0x26
+#define PPSMC_MSG_SetSoftMaxByFreq 0x27
+#define PPSMC_MSG_SetHardMinByFreq 0x28
+#define PPSMC_MSG_SetHardMaxByFreq 0x29
+#define PPSMC_MSG_GetMinDpmFreq 0x2A
+#define PPSMC_MSG_GetMaxDpmFreq 0x2B
+#define PPSMC_MSG_GetDpmFreqByIndex 0x2C
+#define PPSMC_MSG_GetDpmClockFreq 0x2D
+#define PPSMC_MSG_GetSsVoltageByDpm 0x2E
+#define PPSMC_MSG_SetMemoryChannelConfig 0x2F
+#define PPSMC_MSG_SetGeminiMode 0x30
+#define PPSMC_MSG_SetGeminiApertureHigh 0x31
+#define PPSMC_MSG_SetGeminiApertureLow 0x32
+#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33
+#define PPSMC_MSG_OverridePcieParameters 0x34
+#define PPSMC_MSG_OverDriveSetPercentage 0x35
+#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x37
+#define PPSMC_MSG_NotifyPowerSource 0x38
+#define PPSMC_MSG_SetUclkFastSwitch 0x39
+#define PPSMC_MSG_SetUclkDownHyst 0x3A
+#define PPSMC_MSG_GfxDeviceDriverReset 0x3B
+#define PPSMC_MSG_GetCurrentRpm 0x3C
+#define PPSMC_MSG_SetVideoFps 0x3D
+#define PPSMC_MSG_SetTjMax 0x3E
+#define PPSMC_MSG_SetFanTemperatureTarget 0x3F
+#define PPSMC_MSG_PrepareMp1ForUnload 0x40
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x42
+#define PPSMC_MSG_DramLogSetDramSize 0x43
+#define PPSMC_MSG_SetFanMaxRpm 0x44
+#define PPSMC_MSG_SetFanMinPwm 0x45
+#define PPSMC_MSG_ConfigureGfxDidt 0x46
+#define PPSMC_MSG_NumOfDisplays 0x47
+#define PPSMC_MSG_RemoveMargins 0x48
+#define PPSMC_MSG_ReadSerialNumTop32 0x49
+#define PPSMC_MSG_ReadSerialNumBottom32 0x4A
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
+#define PPSMC_MSG_RunAcgBtc 0x4D
+#define PPSMC_MSG_InitializeAcg 0x4E
+#define PPSMC_MSG_EnableAcgBtcTestMode 0x4F
+#define PPSMC_MSG_EnableAcgSpreadSpectrum 0x50
+#define PPSMC_MSG_AllowGfxOff 0x51
+#define PPSMC_MSG_DisallowGfxOff 0x52
+#define PPSMC_MSG_GetPptLimit 0x53
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54
+#define PPSMC_Message_Count 0x56
+
+typedef uint16_t PPSMC_Result;
+typedef int PPSMC_Msg;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 98e701e..9587550 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -23,9 +23,10 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
- smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
+ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
+ vega12_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 4d672cd..08d0001 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -236,13 +236,10 @@ static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t dev_id;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
+ dev_id = adev->pdev->device;
switch (dev_id) {
case 0x67BA:
@@ -411,8 +408,7 @@ static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
}
static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU7_Discrete_GraphicsLevel *level)
+ uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -438,14 +434,14 @@ static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
clock,
&level->MinVddcPhases);
- level->ActivityLevel = sclk_al_threshold;
+ level->ActivityLevel = data->current_profile_setting.sclk_activity;
level->CcPwrDynRm = 0;
level->CcPwrDynRm1 = 0;
level->EnabledForActivity = 0;
/* this level can be used for throttling.*/
level->EnabledForThrottle = 1;
- level->UpH = 0;
- level->DownH = 0;
+ level->UpH = data->current_profile_setting.sclk_up_hyst;
+ level->DownH = data->current_profile_setting.sclk_down_hyst;
level->VoltageDownH = 0;
level->PowerThrottle = 0;
@@ -492,7 +488,6 @@ static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->sclk_table.count; i++) {
result = ci_populate_single_graphic_level(hwmgr,
dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
&levels[i]);
if (result)
return result;
@@ -860,10 +855,13 @@ static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
/* GPIO voltage control */
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
- table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
- else
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+ table->VddcLevel[count].Smio = (uint8_t) count;
+ table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
+ table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
+ } else {
table->VddcLevel[count].Smio = 0;
+ }
}
CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
@@ -885,10 +883,13 @@ static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
&(data->vddci_voltage_table.entries[count]),
&(table->VddciLevel[count]));
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
- else
- table->VddciLevel[count].Smio |= 0;
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->VddciLevel[count].Smio = (uint8_t) count;
+ table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
+ table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
+ } else {
+ table->VddciLevel[count].Smio = 0;
+ }
}
CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
@@ -910,10 +911,13 @@ static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
&(data->mvdd_voltage_table.entries[count]),
&table->MvddLevel[count]);
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
- table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
- else
- table->MvddLevel[count].Smio |= 0;
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevel[count].Smio = (uint8_t) count;
+ table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
+ table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
+ } else {
+ table->MvddLevel[count].Smio = 0;
+ }
}
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
@@ -1217,12 +1221,12 @@ static int ci_populate_single_memory_level(
memory_level->EnabledForThrottle = 1;
memory_level->EnabledForActivity = 1;
- memory_level->UpH = 0;
- memory_level->DownH = 100;
+ memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
+ memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
memory_level->VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
memory_level->StutterEnable = 0;
memory_level->StrobeEnable = 0;
memory_level->EdcReadEnable = 0;
@@ -1302,7 +1306,7 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t dev_id;
uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
@@ -1323,10 +1327,7 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
+ dev_id = adev->pdev->device;
if ((dpm_table->mclk_table.count >= 2)
&& ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
@@ -1506,7 +1507,7 @@ static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.DownH = 100;
table->MemoryACPILevel.VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
table->MemoryACPILevel.StutterEnable = 0;
table->MemoryACPILevel.StrobeEnable = 0;
@@ -1732,8 +1733,7 @@ static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.GraphicsBootLevel = 0;
- pr_err("VBIOS did not find boot engine clock value \
- in dependency table. Using Graphics DPM level 0!");
+ pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
result = 0;
}
@@ -1743,8 +1743,7 @@ static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.MemoryBootLevel = 0;
- pr_err("VBIOS did not find boot engine clock value \
- in dependency table. Using Memory DPM level 0!");
+ pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
result = 0;
}
@@ -1943,6 +1942,37 @@ static int ci_start_smc(struct pp_hwmgr *hwmgr)
return 0;
}
+static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ pr_info("VDDCshould be on SVI2 controller!");
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -2066,6 +2096,11 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
table->PCIeGenInterval = 1;
+ result = ci_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+ data->vr_config = table->VRConfig;
+
ci_populate_smc_svi2_config(hwmgr, table);
for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
@@ -2086,6 +2121,7 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
@@ -2186,7 +2222,7 @@ static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempRespLim = cpu_to_be16(5);
- reference_clock = smu7_get_xclk(hwmgr);
+ reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
@@ -2220,10 +2256,7 @@ static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2321,6 +2354,7 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
hwmgr->is_kicker = info.is_kicker;
+ hwmgr->smu_version = info.version;
byte_count = info.image_size;
src = (uint8_t *)info.kptr;
start_addr = info.ucode_start_address;
@@ -2602,9 +2636,9 @@ static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
+
PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
-
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
@@ -2617,10 +2651,10 @@ static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ if (!data->is_memory_gddr5) {
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -2628,8 +2662,6 @@ static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
}
break;
@@ -2644,8 +2676,6 @@ static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
break;
default:
@@ -2736,35 +2766,8 @@ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
return ci_is_smc_ram_running(hwmgr);
}
-static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct ci_smumgr *smu_data = (struct ci_smumgr *)
- (hwmgr->smu_backend);
- struct SMU7_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
- SMU7_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpH = request->up_hyst;
- levels[i].DownH = request->down_hyst;
- }
-
- return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
-
-
static int ci_smu_init(struct pp_hwmgr *hwmgr)
{
- int i;
struct ci_smumgr *ci_priv = NULL;
ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
@@ -2772,9 +2775,6 @@ static int ci_smu_init(struct pp_hwmgr *hwmgr)
if (ci_priv == NULL)
return -ENOMEM;
- for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
- ci_priv->activity_target[i] = 30;
-
hwmgr->smu_backend = ci_priv;
return 0;
@@ -2793,6 +2793,102 @@ static int ci_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
+static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
+ void *profile_setting)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)
+ (hwmgr->smu_backend);
+ struct profile_mode_setting *setting;
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t mclk_array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ struct SMU7_Discrete_MemoryLevel *mclk_levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+ uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
+
+ if (profile_setting == NULL)
+ return -EINVAL;
+
+ setting = (struct profile_mode_setting *)profile_setting;
+
+ if (setting->bupdate_sclk) {
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ if (levels[i].ActivityLevel !=
+ cpu_to_be16(setting->sclk_activity)) {
+ levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
+
+ clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (levels[i].UpH != setting->sclk_up_hyst ||
+ levels[i].DownH != setting->sclk_down_hyst) {
+ levels[i].UpH = setting->sclk_up_hyst;
+ levels[i].DownH = setting->sclk_down_hyst;
+ up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
+ down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ }
+
+ if (setting->bupdate_mclk) {
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
+ if (mclk_levels[i].ActivityLevel !=
+ cpu_to_be16(setting->mclk_activity)) {
+ mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
+
+ clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
+ + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
+ mclk_levels[i].DownH != setting->mclk_down_hyst) {
+ mclk_levels[i].UpH = setting->mclk_up_hyst;
+ mclk_levels[i].DownH = setting->mclk_down_hyst;
+ up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
+ + offsetof(SMU7_Discrete_MemoryLevel, UpH);
+ down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
+ + offsetof(SMU7_Discrete_MemoryLevel, DownH);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ }
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.smu_init = ci_smu_init,
.smu_fini = ci_smu_fini,
@@ -2814,5 +2910,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.get_mac_definition = ci_get_mac_definition,
.initialize_mc_reg_table = ci_initialize_mc_reg_table,
.is_dpm_running = ci_is_dpm_running,
- .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+ .update_dpm_settings = ci_update_dpm_settings,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
index 8189cfa..a828270 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -70,8 +70,6 @@ struct ci_smumgr {
const struct ci_pt_defaults *power_tune_defaults;
SMU7_Discrete_MCRegisters mc_regs;
struct ci_mc_reg_table mc_reg_table;
- uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
-
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
deleted file mode 100644
index 78ab055..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ /dev/null
@@ -1,858 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include "cgs_common.h"
-#include "smu/smu_8_0_d.h"
-#include "smu/smu_8_0_sh_mask.h"
-#include "smu8.h"
-#include "smu8_fusion.h"
-#include "cz_smumgr.h"
-#include "cz_ppsmc.h"
-#include "smu_ucode_xfer_cz.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "smumgr.h"
-
-#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
-
-static const enum cz_scratch_entry firmware_list[] = {
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
-};
-
-static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- return cgs_read_register(hwmgr->device,
- mmSMU_MP1_SRBM2P_ARG_0);
-}
-
-static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- int result = 0;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
- if (result != 0) {
- pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
- return result;
- }
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
-
- return 0;
-}
-
-/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- int result = 0;
-
- result = cz_send_msg_to_smc_async(hwmgr, msg);
- if (result != 0)
- return result;
-
- return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-}
-
-static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
- uint32_t smc_address, uint32_t limit)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- if (0 != (3 & smc_address)) {
- pr_err("SMC address must be 4 byte aligned\n");
- return -EINVAL;
- }
-
- if (limit <= (smc_address + 3)) {
- pr_err("SMC address beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
- SMN_MP1_SRAM_START_ADDR + smc_address);
-
- return 0;
-}
-
-static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
- uint32_t smc_address, uint32_t value, uint32_t limit)
-{
- int result;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
- if (!result)
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
-
- return result;
-}
-
-static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
- return cz_send_msg_to_smc(hwmgr, msg);
-}
-
-static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
- uint32_t firmware)
-{
- int i;
- uint32_t index = SMN_MP1_SRAM_START_ADDR +
- SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- if (firmware ==
- (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
- break;
- udelay(1);
- }
-
- if (i >= hwmgr->usec_timeout) {
- pr_err("SMU check loaded firmware failed.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg_data;
- uint32_t tmp;
- int ret = 0;
- struct cgs_firmware_info info = {0};
- struct cz_smumgr *cz_smu;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- ret = cgs_get_firmware_info(hwmgr->device,
- CGS_UCODE_ID_CP_MEC, &info);
-
- if (ret)
- return -EINVAL;
-
- /* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(hwmgr->device,
- mmCP_MEC_CNTL);
- tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
-
- tmp = cgs_read_register(hwmgr->device,
- mmCP_CPC_IC_BASE_CNTL);
-
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
-
- reg_data = smu_lower_32_bits(info.mc_addr) &
- PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
-
- reg_data = smu_upper_32_bits(info.mc_addr) &
- PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
-
- return 0;
-}
-
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry firmware_enum)
-{
- uint8_t ret = 0;
-
- switch (firmware_enum) {
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
- ret = UCODE_ID_SDMA0;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (hwmgr->chip_id == CHIP_STONEY)
- ret = UCODE_ID_SDMA0;
- else
- ret = UCODE_ID_SDMA1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
- ret = UCODE_ID_CP_CE;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
- ret = UCODE_ID_CP_PFP;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
- ret = UCODE_ID_CP_ME;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
- ret = UCODE_ID_CP_MEC_JT1;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (hwmgr->chip_id == CHIP_STONEY)
- ret = UCODE_ID_CP_MEC_JT1;
- else
- ret = UCODE_ID_CP_MEC_JT2;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
- ret = UCODE_ID_GMCON_RENG;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
- ret = UCODE_ID_RLC_G;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
- ret = UCODE_ID_RLC_SCRATCH;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
- ret = UCODE_ID_RLC_SRM_ARAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
- ret = UCODE_ID_RLC_SRM_DRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
- ret = UCODE_ID_DMCU_ERAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
- ret = UCODE_ID_DMCU_IRAM;
- break;
- case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
- ret = TASK_ARG_INIT_MM_PWR_LOG;
- break;
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
- case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
- case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
- case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
- ret = TASK_ARG_REG_MMIO;
- break;
- case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
- ret = TASK_ARG_INIT_CLK_TABLE;
- break;
- }
-
- return ret;
-}
-
-static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static int cz_smu_populate_single_scratch_task(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry fw_enum,
- uint8_t type, bool is_last)
-{
- uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
- task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
- task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++)
- if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
- break;
-
- if (i >= cz_smu->scratch_buffer_length) {
- pr_err("Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
- task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
- task->size_bytes = cz_smu->scratch_buffer[i].data_size;
-
- if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
- struct cz_ih_meta_data *pIHReg_restore =
- (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
- pIHReg_restore->command =
- METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
- }
-
- return 0;
-}
-
-static int cz_smu_populate_single_ucode_load_task(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry fw_enum,
- bool is_last)
-{
- uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
- struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
-
- task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
- task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
-
- for (i = 0; i < cz_smu->driver_buffer_length; i++)
- if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
- break;
-
- if (i >= cz_smu->driver_buffer_length) {
- pr_err("Invalid Firmware Type\n");
- return -EINVAL;
- }
-
- task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
- task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
- task->size_bytes = cz_smu->driver_buffer[i].data_size;
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
- toc->JobList[i] = (uint8_t)IGNORE_JOB;
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_SAVE, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_SAVE, true);
-
- return 0;
-}
-
-
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
-
- toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-
- if (hwmgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- else
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
-
- /* populate scratch */
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- TASK_TYPE_UCODE_LOAD, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- TASK_TYPE_UCODE_LOAD, false);
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- TASK_TYPE_UCODE_LOAD, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- TASK_TYPE_INITIALIZE, true);
- return 0;
-}
-
-static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (hwmgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (hwmgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
-
- cz_smu_populate_single_scratch_task(hwmgr,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- TASK_TYPE_INITIALIZE, true);
-
- return 0;
-}
-
-static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
-
- cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(hwmgr);
- cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
- cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
- cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
- cz_smu_construct_toc_for_power_profiling(hwmgr);
- cz_smu_construct_toc_for_bootup(hwmgr);
- cz_smu_construct_toc_for_clock_table(hwmgr);
-
- return 0;
-}
-
-static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- uint32_t firmware_type;
- uint32_t i;
- int ret;
- enum cgs_ucode_id ucode_id;
- struct cgs_firmware_info info = {0};
-
- cz_smu->driver_buffer_length = 0;
-
- for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
-
- firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
- firmware_list[i]);
-
- ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
-
- ret = cgs_get_firmware_info(hwmgr->device,
- ucode_id, &info);
-
- if (ret == 0) {
- cz_smu->driver_buffer[i].mc_addr_high =
- smu_upper_32_bits(info.mc_addr);
-
- cz_smu->driver_buffer[i].mc_addr_low =
- smu_lower_32_bits(info.mc_addr);
-
- cz_smu->driver_buffer[i].data_size = info.image_size;
-
- cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
- cz_smu->driver_buffer_length++;
- }
- }
-
- return 0;
-}
-
-static int cz_smu_populate_single_scratch_entry(
- struct pp_hwmgr *hwmgr,
- enum cz_scratch_entry scratch_type,
- uint32_t ulsize_byte,
- struct cz_buffer_entry *entry)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- long long mc_addr =
- ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
- | cz_smu->smu_buffer.mc_addr_low;
-
- uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
-
- mc_addr += cz_smu->smu_buffer_used_bytes;
-
- entry->data_size = ulsize_byte;
- entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
- cz_smu->smu_buffer_used_bytes;
- entry->mc_addr_low = smu_lower_32_bits(mc_addr);
- entry->mc_addr_high = smu_upper_32_bits(mc_addr);
- entry->firmware_ID = scratch_type;
-
- cz_smu->smu_buffer_used_bytes += ulsize_aligned;
-
- return 0;
-}
-
-static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- unsigned long i;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
- if (cz_smu->scratch_buffer[i].firmware_ID
- == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
- }
-
- *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrHi,
- cz_smu->scratch_buffer[i].mc_addr_high);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrLo,
- cz_smu->scratch_buffer[i].mc_addr_low);
-
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_clock_table);
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
-
- return 0;
-}
-
-static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- unsigned long i;
-
- for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
- if (cz_smu->scratch_buffer[i].firmware_ID
- == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
- break;
- }
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrHi,
- cz_smu->scratch_buffer[i].mc_addr_high);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetClkTableAddrLo,
- cz_smu->scratch_buffer[i].mc_addr_low);
-
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_clock_table);
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
-
- return 0;
-}
-
-static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
- uint32_t smc_address;
-
- if (!hwmgr->reload_fw) {
- pr_info("skip reloading...\n");
- return 0;
- }
-
- cz_smu_populate_firmware_entries(hwmgr);
-
- cz_smu_construct_toc(hwmgr);
-
- smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
- offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
-
- cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DriverDramAddrHi,
- cz_smu->toc_buffer.mc_addr_high);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DriverDramAddrLo,
- cz_smu->toc_buffer.mc_addr_low);
-
- cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
-
- cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_power_profiling_index);
-
- return cz_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ExecuteJob,
- cz_smu->toc_entry_initialize_index);
-}
-
-static int cz_start_smu(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
- uint32_t fw_to_check = 0;
-
- fw_to_check = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
-
- if (hwmgr->chip_id == CHIP_STONEY)
- fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
-
- ret = cz_request_smu_load_fw(hwmgr);
- if (ret)
- pr_err("SMU firmware load failed\n");
-
- cz_check_fw_load_finish(hwmgr, fw_to_check);
-
- ret = cz_load_mec_firmware(hwmgr);
- if (ret)
- pr_err("Mec Firmware load failed\n");
-
- return ret;
-}
-
-static int cz_smu_init(struct pp_hwmgr *hwmgr)
-{
- uint64_t mc_addr = 0;
- int ret = 0;
- struct cz_smumgr *cz_smu;
-
- cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
- if (cz_smu == NULL)
- return -ENOMEM;
-
- hwmgr->smu_backend = cz_smu;
-
- cz_smu->toc_buffer.data_size = 4096;
- cz_smu->smu_buffer.data_size =
- ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
- ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
- ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
- ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
-
- ret = smu_allocate_memory(hwmgr->device,
- cz_smu->toc_buffer.data_size,
- CGS_GPU_MEM_TYPE__GART_CACHEABLE,
- PAGE_SIZE,
- &mc_addr,
- &cz_smu->toc_buffer.kaddr,
- &cz_smu->toc_buffer.handle);
- if (ret != 0)
- return -1;
-
- cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- ret = smu_allocate_memory(hwmgr->device,
- cz_smu->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__GART_CACHEABLE,
- PAGE_SIZE,
- &mc_addr,
- &cz_smu->smu_buffer.kaddr,
- &cz_smu->smu_buffer.handle);
- if (ret != 0)
- return -1;
-
- cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- sizeof(struct SMU8_MultimediaPowerLogData),
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
- sizeof(struct SMU8_Fusion_ClkTable),
- &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
- pr_err("Error when Populate Firmware Entry.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int cz_smu_fini(struct pp_hwmgr *hwmgr)
-{
- struct cz_smumgr *cz_smu;
-
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
- if (cz_smu) {
- cgs_free_gpu_mem(hwmgr->device,
- cz_smu->toc_buffer.handle);
- cgs_free_gpu_mem(hwmgr->device,
- cz_smu->smu_buffer.handle);
- kfree(cz_smu);
- }
-
- return 0;
-}
-
-const struct pp_smumgr_func cz_smu_funcs = {
- .smu_init = cz_smu_init,
- .smu_fini = cz_smu_fini,
- .start_smu = cz_start_smu,
- .check_fw_load_finish = cz_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_load_specific_fw = NULL,
- .get_argument = cz_smum_get_argument,
- .send_msg_to_smc = cz_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
- .download_pptable_settings = cz_download_pptable_settings,
- .upload_pptable_settings = cz_upload_pptable_settings,
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
deleted file mode 100644
index 7c3a290..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _CZ_SMUMGR_H_
-#define _CZ_SMUMGR_H_
-
-
-#define MAX_NUM_FIRMWARE 8
-#define MAX_NUM_SCRATCH 11
-#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
-#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
-#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
-#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
-
-enum cz_scratch_entry {
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
- CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
- CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
- CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
- CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
- CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
- CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
- CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
-};
-
-struct cz_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- enum cz_scratch_entry firmware_ID;
- unsigned long handle; /* as bo handle used when release bo */
-};
-
-struct cz_register_index_data_pair {
- uint32_t offset;
- uint32_t value;
-};
-
-struct cz_ih_meta_data {
- uint32_t command;
- struct cz_register_index_data_pair register_index_value_pair[1];
-};
-
-struct cz_smumgr {
- uint8_t driver_buffer_length;
- uint8_t scratch_buffer_length;
- uint16_t toc_entry_used_count;
- uint16_t toc_entry_initialize_index;
- uint16_t toc_entry_power_profiling_index;
- uint16_t toc_entry_aram;
- uint16_t toc_entry_ih_register_restore_task_index;
- uint16_t toc_entry_clock_table;
- uint16_t ih_register_restore_task_size;
- uint16_t smu_buffer_used_bytes;
-
- struct cz_buffer_entry toc_buffer;
- struct cz_buffer_entry smu_buffer;
- struct cz_buffer_entry firmware_buffer;
- struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
- struct cz_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
- struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index f572bef..faef783 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -205,9 +205,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
int result = 0;
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- if (0 != smu_data->avfs.avfs_btc_param) {
+ if (0 != smu_data->avfs_btc_param) {
if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
}
@@ -261,43 +261,24 @@ static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ if (!hwmgr->avfs_supported)
+ return 0;
- switch (smu_data->avfs.avfs_btc_status) {
- case AVFS_BTC_COMPLETED_PREVIOUSLY:
- break;
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
+ " table over to SMU",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Could not setup "
+ "Pwr Virus for AVFS ",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
+ "[AVFS][fiji_avfs_event_mgr] Failure at "
+ "fiji_start_avfs_btc. AVFS Disabled",
+ return -EINVAL);
- case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
- if (!smu_started)
- break;
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
- " table over to SMU",
- return -EINVAL;);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Could not setup "
- "Pwr Virus for AVFS ",
- return -EINVAL;);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
- "[AVFS][fiji_avfs_event_mgr] Failure at "
- "fiji_start_avfs_btc. AVFS Disabled",
- return -EINVAL;);
-
- smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
- break;
- case AVFS_BTC_DISABLED: /* Do nothing */
- case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
- case AVFS_BTC_ENABLEAVFS:
- break;
- default:
- pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
- break;
- }
return 0;
}
@@ -309,8 +290,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
/* Only start SMC if SMC RAM is not running */
if (!(smu7_is_smc_ram_running(hwmgr)
|| cgs_is_virtualization_enabled(hwmgr->device))) {
- fiji_avfs_event_mgr(hwmgr, false);
-
/* Check if SMU is running in protected mode */
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
@@ -323,7 +302,8 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
if (result)
return result;
}
- fiji_avfs_event_mgr(hwmgr, true);
+ if (fiji_avfs_event_mgr(hwmgr))
+ hwmgr->avfs_supported = false;
}
/* To initialize all clock gating before RLC loaded and running.*/
@@ -368,7 +348,6 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
- int i;
struct fiji_smumgr *fiji_priv = NULL;
fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
@@ -378,11 +357,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = fiji_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(fiji_priv);
return -EINVAL;
-
- for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
- fiji_priv->activity_target[i] = 30;
+ }
return 0;
}
@@ -972,8 +950,7 @@ static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
}
static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *level)
{
int result;
/* PP_Clocks minClocks; */
@@ -981,12 +958,18 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
result = fiji_calculate_sclk_params(hwmgr, clock, level);
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
+ else
+ vdd_dep_table = table_info->vdd_dep_on_sclk;
+
/* populate graphics levels */
result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
+ vdd_dep_table, clock,
(uint32_t *)(&level->MinVoltage), &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"can not find VDDC voltage value for "
@@ -994,13 +977,13 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
return result);
level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
+ level->ActivityLevel = data->current_profile_setting.sclk_activity;
level->CcPwrDynRm = 0;
level->CcPwrDynRm1 = 0;
level->EnabledForActivity = 0;
level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
+ level->UpHyst = data->current_profile_setting.sclk_up_hyst;
+ level->DownHyst = data->current_profile_setting.sclk_down_hyst;
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;
@@ -1057,7 +1040,6 @@ static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->sclk_table.count; i++) {
result = fiji_populate_single_graphic_level(hwmgr,
dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
&levels[i]);
if (result)
return result;
@@ -1202,10 +1184,16 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int result = 0;
uint32_t mclk_stutter_mode_threshold = 60000;
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
- if (table_info->vdd_dep_on_mclk) {
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
+ else
+ vdd_dep_table = table_info->vdd_dep_on_mclk;
+
+ if (vdd_dep_table) {
result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
+ vdd_dep_table, clock,
(uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory "
@@ -1214,10 +1202,10 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
mem_level->EnabledForThrottle = 1;
mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
+ mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
+ mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->ActivityLevel = data->current_profile_setting.mclk_activity;
mem_level->StutterEnable = false;
mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -1435,7 +1423,7 @@ static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.DownHyst = 100;
table->MemoryACPILevel.VoltageDownHyst = 0;
table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+ PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
table->MemoryACPILevel.StutterEnable = false;
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
@@ -1799,7 +1787,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
+ "Stretch Amount in PPTable not supported",
return -EINVAL);
}
@@ -1954,44 +1942,6 @@ static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
}
-static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
- return 0;
-}
-
static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
{
pp_atomctrl_voltage_table param_led_dpm;
@@ -2141,7 +2091,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
result = fiji_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate VRConfig setting!", return result);
-
+ data->vr_config = table->VRConfig;
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
@@ -2232,8 +2182,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to setup dpm led config", return result);
- fiji_save_default_power_profile(hwmgr);
-
return 0;
}
@@ -2309,7 +2257,7 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempRespLim = cpu_to_be16(5);
- reference_clock = smu7_get_xclk(hwmgr);
+ reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
@@ -2349,19 +2297,12 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- int ret;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
-
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
- return ret;
+ return 0;
}
static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
@@ -2385,10 +2326,7 @@ static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2691,29 +2629,100 @@ static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
? true : false;
}
-static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
+static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
+ void *profile_setting)
{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
(hwmgr->smu_backend);
+ struct profile_mode_setting *setting;
struct SMU73_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
+
+ uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ struct SMU73_Discrete_MemoryLevel *mclk_levels =
+ smu_data->smc_state_table.MemoryLevel;
uint32_t i;
+ uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
+ if (profile_setting == NULL)
+ return -EINVAL;
+
+ setting = (struct profile_mode_setting *)profile_setting;
+
+ if (setting->bupdate_sclk) {
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ if (levels[i].ActivityLevel !=
+ cpu_to_be16(setting->sclk_activity)) {
+ levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
+
+ clk_activity_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (levels[i].UpHyst != setting->sclk_up_hyst ||
+ levels[i].DownHyst != setting->sclk_down_hyst) {
+ levels[i].UpHyst = setting->sclk_up_hyst;
+ levels[i].DownHyst = setting->sclk_down_hyst;
+ up_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU73_Discrete_GraphicsLevel, UpHyst);
+ down_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU73_Discrete_GraphicsLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
}
- return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
+ if (setting->bupdate_mclk) {
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
+ if (mclk_levels[i].ActivityLevel !=
+ cpu_to_be16(setting->mclk_activity)) {
+ mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
+
+ clk_activity_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
+ + offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
+ mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
+ mclk_levels[i].UpHyst = setting->mclk_up_hyst;
+ mclk_levels[i].DownHyst = setting->mclk_down_hyst;
+ up_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
+ + offsetof(SMU73_Discrete_MemoryLevel, UpHyst);
+ down_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i)
+ + offsetof(SMU73_Discrete_MemoryLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ }
+ return 0;
}
const struct pp_smumgr_func fiji_smu_funcs = {
@@ -2739,6 +2748,6 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.get_mac_definition = fiji_get_mac_definition,
.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
.is_dpm_running = fiji_is_dpm_running,
- .populate_requested_graphic_levels = fiji_populate_requested_graphic_levels,
.is_hw_avfs_present = fiji_is_hw_avfs_present,
+ .update_dpm_settings = fiji_update_dpm_settings,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 2796477..6d37462 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -43,8 +43,6 @@ struct fiji_smumgr {
struct SMU73_Discrete_Ulv ulv_setting;
struct SMU73_Discrete_PmFuses power_tune_table;
const struct fiji_pt_defaults *power_tune_defaults;
- uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
-
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 3412882..d4bb934 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -204,7 +204,7 @@ static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
-
+ hwmgr->smu_version = info.version;
/* wait for smc boot up */
PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
@@ -262,7 +262,6 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
- int i;
struct iceland_smumgr *iceland_priv = NULL;
iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
@@ -272,11 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = iceland_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(iceland_priv);
return -EINVAL;
-
- for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
- iceland_priv->activity_target[i] = 30;
+ }
return 0;
}
@@ -285,13 +283,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t dev_id;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
+ dev_id = adev->pdev->device;
switch (dev_id) {
case DEVICE_ID_VI_ICELAND_M_6900:
@@ -546,7 +541,7 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
/* SCLK/VDDC Dependency Table has to exist. */
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
- "The SCLK/VDDC Dependency Table does not exist.\n",
+ "The SCLK/VDDC Dependency Table does not exist.",
return -EINVAL);
if (NULL == hwmgr->dyn_state.cac_leakage_table) {
@@ -898,7 +893,6 @@ static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
SMU71_Discrete_GraphicsLevel *graphic_level)
{
int result;
@@ -911,8 +905,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
&graphic_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
+ "can not find VDDC voltage value for VDDC engine clock dependency table", return result);
/* SCLK frequency in units of 10KHz*/
graphic_level->SclkFrequency = engine_clock;
@@ -925,7 +918,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
&graphic_level->MinVddcPhases);
/* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
+ graphic_level->ActivityLevel = data->current_profile_setting.sclk_activity;
graphic_level->CcPwrDynRm = 0;
graphic_level->CcPwrDynRm1 = 0;
@@ -933,8 +926,8 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
graphic_level->EnabledForActivity = 0;
/* this level can be used for throttling.*/
graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 100;
+ graphic_level->UpHyst = data->current_profile_setting.sclk_up_hyst;
+ graphic_level->DownHyst = data->current_profile_setting.sclk_down_hyst;
graphic_level->VoltageDownHyst = 0;
graphic_level->PowerThrottle = 0;
@@ -990,7 +983,6 @@ static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->sclk_table.count; i++) {
result = iceland_populate_single_graphic_level(hwmgr,
dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
&(smu_data->smc_state_table.GraphicsLevel[i]));
if (result != 0)
return result;
@@ -1276,12 +1268,12 @@ static int iceland_populate_single_memory_level(
memory_level->EnabledForThrottle = 1;
memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
+ memory_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
+ memory_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
memory_level->VoltageDownHyst = 0;
/* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
memory_level->StutterEnable = 0;
memory_level->StrobeEnable = 0;
memory_level->EdcReadEnable = 0;
@@ -1562,7 +1554,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.DownHyst = 100;
table->MemoryACPILevel.VoltageDownHyst = 0;
/* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
table->MemoryACPILevel.StutterEnable = 0;
table->MemoryACPILevel.StrobeEnable = 0;
@@ -1678,8 +1670,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.GraphicsBootLevel = 0;
- pr_err("VBIOS did not find boot engine clock value \
- in dependency table. Using Graphics DPM level 0!");
+ pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
result = 0;
}
@@ -1689,8 +1680,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
if (0 != result) {
smu_data->smc_state_table.MemoryBootLevel = 0;
- pr_err("VBIOS did not find boot engine clock value \
- in dependency table. Using Memory DPM level 0!");
+ pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
result = 0;
}
@@ -2168,7 +2158,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempRespLim = cpu_to_be16(5);
- reference_clock = smu7_get_xclk(hwmgr);
+ reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
@@ -2205,10 +2195,7 @@ static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2552,9 +2539,9 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
+
PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
-
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
@@ -2568,10 +2555,10 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
}
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ if (!data->is_memory_gddr5) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -2579,8 +2566,6 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
}
break;
@@ -2595,8 +2580,6 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
break;
default:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 8024725..f32c506 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -65,7 +65,6 @@ struct iceland_smumgr {
const struct iceland_pt_defaults *power_tune_defaults;
SMU71_Discrete_MCRegisters mc_regs;
struct iceland_mc_reg_table mc_reg_table;
- uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index bd6be77..997a777 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,13 +99,13 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
int result = 0;
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu_data->avfs_btc_param) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
}
- if (smu_data->avfs.avfs_btc_param > 1) {
+ if (smu_data->avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
@@ -172,47 +172,28 @@ static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
}
-static int
-polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
+static int polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- switch (smu_data->avfs.avfs_btc_status) {
- case AVFS_BTC_COMPLETED_PREVIOUSLY:
- break;
-
- case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
-
- smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
- return -EINVAL);
-
- if (smu_data->avfs.avfs_btc_param > 1) {
- pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
- return -EINVAL);
- }
-
- smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
- "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
- return -EINVAL);
- smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
- break;
+ if (!hwmgr->avfs_supported)
+ return 0;
- case AVFS_BTC_DISABLED:
- case AVFS_BTC_ENABLEAVFS:
- case AVFS_BTC_NOTSUPPORTED:
- break;
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+ return -EINVAL);
- default:
- pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
- break;
+ if (smu_data->avfs_btc_param > 1) {
+ pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
+ return -EINVAL);
}
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
+ return -EINVAL);
+
return 0;
}
@@ -312,11 +293,10 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(hwmgr)) {
- SMU_VFT_INTACT = false;
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
@@ -337,11 +317,9 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(hwmgr, true);
- } else
- SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
+ polaris10_avfs_event_mgr(hwmgr);
+ }
- polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
@@ -366,7 +344,6 @@ static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data;
- int i;
smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
if (smu_data == NULL)
@@ -374,11 +351,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = smu_data;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(smu_data);
return -EINVAL;
-
- for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
- smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ }
return 0;
}
@@ -837,7 +813,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
- ref_clk = smu7_get_xclk(hwmgr);
+ ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
for (i = 0; i < NUM_SCLK_RANGE; i++) {
@@ -902,7 +878,7 @@ static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
return result;
}
- ref_clock = smu7_get_xclk(hwmgr);
+ ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
for (i = 0; i < NUM_SCLK_RANGE; i++) {
if (clock > smu_data->range_table[i].trans_lower_frequency
@@ -938,8 +914,7 @@ static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
}
static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
+ uint32_t clock, struct SMU74_Discrete_GraphicsLevel *level)
{
int result;
/* PP_Clocks minClocks; */
@@ -948,26 +923,32 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMU_SclkSetting curr_sclk_setting = { 0 };
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
+ else
+ vdd_dep_table = table_info->vdd_dep_on_sclk;
+
/* populate graphics levels */
result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
+ vdd_dep_table, clock,
&level->MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((0 == result),
"can not find VDDC voltage value for "
"VDDC engine clock dependency table",
return result);
- level->ActivityLevel = sclk_al_threshold;
+ level->ActivityLevel = data->current_profile_setting.sclk_activity;
level->CcPwrDynRm = 0;
level->CcPwrDynRm1 = 0;
level->EnabledForActivity = 0;
level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
+ level->UpHyst = data->current_profile_setting.sclk_up_hyst;
+ level->DownHyst = data->current_profile_setting.sclk_down_hyst;
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;
data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
@@ -1031,7 +1012,6 @@ static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
result = polaris10_populate_single_graphic_level(hwmgr,
dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
&(smu_data->smc_state_table.GraphicsLevel[i]));
if (result)
return result;
@@ -1107,12 +1087,18 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
int result = 0;
struct cgs_display_info info = {0, 0, NULL};
uint32_t mclk_stutter_mode_threshold = 40000;
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
cgs_get_active_displays_info(hwmgr->device, &info);
- if (table_info->vdd_dep_on_mclk) {
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
+ else
+ vdd_dep_table = table_info->vdd_dep_on_mclk;
+
+ if (vdd_dep_table) {
result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
+ vdd_dep_table, clock,
&mem_level->MinVoltage, &mem_level->MinMvdd);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory "
@@ -1122,10 +1108,10 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
mem_level->MclkFrequency = clock;
mem_level->EnabledForThrottle = 1;
mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
+ mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
+ mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->ActivityLevel = data->current_profile_setting.mclk_activity;
mem_level->StutterEnable = false;
mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -1306,7 +1292,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.DownHyst = 100;
table->MemoryACPILevel.VoltageDownHyst = 0;
table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+ PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
@@ -1652,7 +1638,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
+ "Stretch Amount in PPTable not supported",
return -EINVAL);
}
@@ -1726,8 +1712,8 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
- if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
+ if (!hwmgr->avfs_supported)
+ return 0;
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
@@ -1834,42 +1820,6 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
}
-static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -1991,7 +1941,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
result = polaris10_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate VRConfig setting!", return result);
-
+ hw_data->vr_config = table->VRConfig;
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
@@ -2084,8 +2034,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate PM fuses to SMC memory!", return result);
- polaris10_save_default_power_profile(hwmgr);
-
return 0;
}
@@ -2102,24 +2050,17 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- int ret;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
- ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
- return ret;
+ return 0;
}
static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
@@ -2193,7 +2134,7 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempRespLim = cpu_to_be16(5);
- reference_clock = smu7_get_xclk(hwmgr);
+ reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
@@ -2369,10 +2310,7 @@ static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2547,29 +2485,100 @@ static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
? true : false;
}
-static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
+static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
+ void *profile_setting)
{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
(hwmgr->smu_backend);
+ struct profile_mode_setting *setting;
struct SMU74_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
+
+ uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ struct SMU74_Discrete_MemoryLevel *mclk_levels =
+ smu_data->smc_state_table.MemoryLevel;
uint32_t i;
+ uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
+ if (profile_setting == NULL)
+ return -EINVAL;
+
+ setting = (struct profile_mode_setting *)profile_setting;
+
+ if (setting->bupdate_sclk) {
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ if (levels[i].ActivityLevel !=
+ cpu_to_be16(setting->sclk_activity)) {
+ levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
+
+ clk_activity_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU74_Discrete_GraphicsLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (levels[i].UpHyst != setting->sclk_up_hyst ||
+ levels[i].DownHyst != setting->sclk_down_hyst) {
+ levels[i].UpHyst = setting->sclk_up_hyst;
+ levels[i].DownHyst = setting->sclk_down_hyst;
+ up_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU74_Discrete_GraphicsLevel, UpHyst);
+ down_hyst_offset = array + (sizeof(SMU74_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU74_Discrete_GraphicsLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
}
- return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
+ if (setting->bupdate_mclk) {
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
+ if (mclk_levels[i].ActivityLevel !=
+ cpu_to_be16(setting->mclk_activity)) {
+ mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
+
+ clk_activity_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
+ + offsetof(SMU74_Discrete_MemoryLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
+ mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
+ mclk_levels[i].UpHyst = setting->mclk_up_hyst;
+ mclk_levels[i].DownHyst = setting->mclk_down_hyst;
+ up_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
+ + offsetof(SMU74_Discrete_MemoryLevel, UpHyst);
+ down_hyst_offset = mclk_array + (sizeof(SMU74_Discrete_MemoryLevel) * i)
+ + offsetof(SMU74_Discrete_MemoryLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ }
+ return 0;
}
const struct pp_smumgr_func polaris10_smu_funcs = {
@@ -2594,6 +2603,6 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.populate_all_memory_levels = polaris10_populate_all_memory_levels,
.get_mac_definition = polaris10_get_mac_definition,
.is_dpm_running = polaris10_is_dpm_running,
- .populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels,
.is_hw_avfs_present = polaris10_is_hw_avfs_present,
+ .update_dpm_settings = polaris10_update_dpm_settings,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index 5e19c24..1ec425d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -59,7 +59,6 @@ struct polaris10_smumgr {
struct SMU74_Discrete_PmFuses power_tune_table;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
const struct polaris10_pt_defaults *power_tune_defaults;
- uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
deleted file mode 100644
index b98ade6..0000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "smumgr.h"
-#include "rv_inc.h"
-#include "pp_soc15.h"
-#include "rv_smumgr.h"
-#include "ppatomctrl.h"
-#include "rv_ppsmc.h"
-#include "smu10_driver_if.h"
-#include "smu10.h"
-#include "ppatomctrl.h"
-#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072
-
-#define MP0_Public 0x03800000
-#define MP0_SRAM 0x03900000
-#define MP1_Public 0x03b00000
-#define MP1_SRAM 0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS 0x3010028
-
-
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
- uint32_t mp1_fw_flags, reg;
-
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
- cgs_write_register(hwmgr->device, reg,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
- mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
-
- if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- return true;
-
- return false;
-}
-
-static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg;
-
- if (!rv_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
-
- phm_wait_for_register_unequal(hwmgr, reg,
- 0, MP1_C2PMSG_90__CONTENT_MASK);
-
- return cgs_read_register(hwmgr->device, reg);
-}
-
-int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg)
-{
- uint32_t reg;
-
- if (!rv_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, msg);
-
- return 0;
-}
-
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
- *arg = cgs_read_register(hwmgr->device, reg);
-
- return 0;
-}
-
-int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- uint32_t reg;
-
- rv_wait_for_response(hwmgr);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
-
- rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
- if (rv_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
-
- return 0;
-}
-
-
-int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- uint32_t reg;
-
- rv_wait_for_response(hwmgr);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
-
- rv_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-
- if (rv_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
-
- return 0;
-}
-
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrHigh,
- priv->smu_tables.entry[table_id].table_addr_high) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrLow,
- priv->smu_tables.entry[table_id].table_addr_low) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
- return -EINVAL;);
-
- memcpy(table, priv->smu_tables.entry[table_id].table,
- priv->smu_tables.entry[table_id].size);
-
- return 0;
-}
-
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -EINVAL;);
-
- memcpy(priv->smu_tables.entry[table_id].table, table,
- priv->smu_tables.entry[table_id].size);
-
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrHigh,
- priv->smu_tables.entry[table_id].table_addr_high) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetDriverDramAddrLow,
- priv->smu_tables.entry[table_id].table_addr_low) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
- return -EINVAL;);
-
- return 0;
-}
-
-static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
- uint32_t smc_driver_if_version;
-
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
- "Attempt to get SMC IF Version Number Failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
- &smc_driver_if_version),
- "Attempt to read SMC IF Version Number Failed!",
- return -EINVAL);
-
- if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION)
- return -EINVAL;
-
- return 0;
-}
-
-/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerUpSdma),
- "Attempt to power up sdma Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PowerDownSdma),
- "Attempt to power down sdma Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0),
- "Attempt to power up vcn Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0),
- "Attempt to power down vcn Failed!",
- return -EINVAL);
-
- return 0;
-}
-
-static int rv_smu_fini(struct pp_hwmgr *hwmgr)
-{
- struct rv_smumgr *priv =
- (struct rv_smumgr *)(hwmgr->smu_backend);
-
- if (priv) {
- rv_smc_disable_sdma(hwmgr);
- rv_smc_disable_vcn(hwmgr);
- cgs_free_gpu_mem(hwmgr->device,
- priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- priv->smu_tables.entry[CLOCKTABLE].handle);
- kfree(hwmgr->smu_backend);
- hwmgr->smu_backend = NULL;
- }
-
- return 0;
-}
-
-static int rv_start_smu(struct pp_hwmgr *hwmgr)
-{
- if (rv_verify_smc_interface(hwmgr))
- return -EINVAL;
- if (rv_smc_enable_sdma(hwmgr))
- return -EINVAL;
- if (rv_smc_enable_vcn(hwmgr))
- return -EINVAL;
-
- return 0;
-}
-
-static int rv_smu_init(struct pp_hwmgr *hwmgr)
-{
- struct rv_smumgr *priv;
- uint64_t mc_addr;
- void *kaddr = NULL;
- unsigned long handle;
-
- priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
-
- if (!priv)
- return -ENOMEM;
-
- hwmgr->smu_backend = priv;
-
- /* allocate space for watermarks table */
- smu_allocate_memory(hwmgr->device,
- sizeof(Watermarks_t),
- CGS_GPU_MEM_TYPE__GART_CACHEABLE,
- PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[rv_smu_init] Out of memory for wmtable.",
- kfree(hwmgr->smu_backend);
- hwmgr->smu_backend = NULL;
- return -EINVAL);
-
- priv->smu_tables.entry[WMTABLE].version = 0x01;
- priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
- priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
- priv->smu_tables.entry[WMTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[WMTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[WMTABLE].table = kaddr;
- priv->smu_tables.entry[WMTABLE].handle = handle;
-
- /* allocate space for watermarks table */
- smu_allocate_memory(hwmgr->device,
- sizeof(DpmClocks_t),
- CGS_GPU_MEM_TYPE__GART_CACHEABLE,
- PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[rv_smu_init] Out of memory for CLOCKTABLE.",
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- kfree(hwmgr->smu_backend);
- hwmgr->smu_backend = NULL;
- return -EINVAL);
-
- priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
- priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
- priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
- priv->smu_tables.entry[CLOCKTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[CLOCKTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
- priv->smu_tables.entry[CLOCKTABLE].handle = handle;
-
- return 0;
-}
-
-const struct pp_smumgr_func rv_smu_funcs = {
- .smu_init = &rv_smu_init,
- .smu_fini = &rv_smu_fini,
- .start_smu = &rv_start_smu,
- .request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = &rv_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &rv_send_msg_to_smc_with_parameter,
- .download_pptable_settings = NULL,
- .upload_pptable_settings = NULL,
-};
-
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
new file mode 100644
index 0000000..bc53f2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "smu10_inc.h"
+#include "pp_soc15.h"
+#include "smu10_smumgr.h"
+#include "ppatomctrl.h"
+#include "rv_ppsmc.h"
+#include "smu10_driver_if.h"
+#include "smu10.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+
+
+#define VOLTAGE_SCALE 4
+
+#define BUFFER_SIZE 80000
+#define MAX_STRING_SIZE 15
+#define BUFFER_SIZETWO 131072
+
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+
+
+static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+
+ phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ return cgs_read_register(hwmgr->device, reg);
+}
+
+static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
+ cgs_write_register(hwmgr->device, reg, msg);
+
+ return 0;
+}
+
+static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+
+ return cgs_read_register(hwmgr->device, reg);
+}
+
+static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ uint32_t reg;
+
+ smu10_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ if (smu10_wait_for_response(hwmgr) == 0)
+ printk("Failed to send Message %x.\n", msg);
+
+ return 0;
+}
+
+
+static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ uint32_t reg;
+
+ smu10_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ cgs_write_register(hwmgr->device, reg, parameter);
+
+ smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+
+ if (smu10_wait_for_response(hwmgr) == 0)
+ printk("Failed to send Message %x.\n", msg);
+
+ return 0;
+}
+
+static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+ "Invalid SMU Table ID!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL;);
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram,
+ priv->smu_tables.entry[table_id].table_id);
+
+ memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+
+ return 0;
+}
+
+static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
+ "Invalid SMU Table ID!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL;);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL;);
+
+ memcpy(priv->smu_tables.entry[table_id].table, table,
+ priv->smu_tables.entry[table_id].size);
+
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ priv->smu_tables.entry[table_id].table_id);
+
+ return 0;
+}
+
+static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
+{
+ uint32_t smc_driver_if_version;
+
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion);
+ smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+
+ if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) {
+ pr_err("Attempt to read SMC IF Version Number Failed!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* sdma is disabled by default in vbios, need to re-enable in driver */
+static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_PowerUpSdma);
+}
+
+static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_PowerDownSdma);
+}
+
+/* vcn is disabled by default in vbios, need to re-enable in driver */
+static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PowerUpVcn, 0);
+}
+
+static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
+{
+ smu10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PowerDownVcn, 0);
+}
+
+static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_smumgr *priv =
+ (struct smu10_smumgr *)(hwmgr->smu_backend);
+
+ if (priv) {
+ smu10_smc_disable_sdma(hwmgr);
+ smu10_smc_disable_vcn(hwmgr);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ }
+
+ return 0;
+}
+
+static int smu10_start_smu(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+ adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ if (smu10_verify_smc_interface(hwmgr))
+ return -EINVAL;
+ smu10_smc_enable_sdma(hwmgr);
+ smu10_smc_enable_vcn(hwmgr);
+ return 0;
+}
+
+static int smu10_smu_init(struct pp_hwmgr *hwmgr)
+{
+ struct smu10_smumgr *priv;
+ int r;
+
+ priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL);
+
+ if (!priv)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = priv;
+
+ /* allocate space for watermarks table */
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(Watermarks_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+
+ if (r)
+ goto err0;
+
+ priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01;
+ priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t);
+ priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS;
+
+ /* allocate space for watermarks table */
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(DpmClocks_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
+
+ if (r)
+ goto err1;
+
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01;
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t);
+ priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
+
+ return 0;
+
+err1:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
+ &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
+ &priv->smu_tables.entry[SMU10_WMTABLE].table);
+err0:
+ kfree(priv);
+ return -EINVAL;
+}
+
+static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+{
+ int ret;
+
+ if (rw)
+ ret = smu10_copy_table_from_smc(hwmgr, table, table_id);
+ else
+ ret = smu10_copy_table_to_smc(hwmgr, table, table_id);
+
+ return ret;
+}
+
+
+const struct pp_smumgr_func smu10_smu_funcs = {
+ .smu_init = &smu10_smu_init,
+ .smu_fini = &smu10_smu_fini,
+ .start_smu = &smu10_start_smu,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu10_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_argument = smu10_read_arg_from_smc,
+ .smc_table_manager = smu10_smc_table_manager,
+};
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
index 5888840..9c2be74 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
@@ -21,42 +21,30 @@
*
*/
-#ifndef PP_RAVEN_SMUMANAGER_H
-#define PP_RAVEN_SMUMANAGER_H
+#ifndef PP_SMU10_SMUMANAGER_H
+#define PP_SMU10_SMUMANAGER_H
#include "rv_ppsmc.h"
#include "smu10_driver_if.h"
-enum SMU_TABLE_ID {
- WMTABLE = 0,
- CLOCKTABLE,
- MAX_SMU_TABLE,
-};
+#define MAX_SMU_TABLE 2
struct smu_table_entry {
uint32_t version;
uint32_t size;
uint32_t table_id;
- uint32_t table_addr_high;
- uint32_t table_addr_low;
- uint8_t *table;
- uint32_t handle;
+ uint64_t mc_addr;
+ void *table;
+ struct amdgpu_bo *handle;
};
struct smu_table_array {
struct smu_table_entry entry[MAX_SMU_TABLE];
};
-struct rv_smumgr {
+struct smu10_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
-int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 7f5359a..0399c10 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -369,8 +369,8 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
if (!result) {
entry->version = info.fw_version;
entry->id = (uint16_t)fw_type;
- entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+ entry->image_addr_high = upper_32_bits(info.mc_addr);
+ entry->image_addr_low = lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
@@ -412,10 +412,10 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
if (!cgs_is_virtualization_enabled(hwmgr->device)) {
smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
- smu_data->smu_buffer.mc_addr_high);
+ upper_32_bits(smu_data->smu_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
- smu_data->smu_buffer.mc_addr_low);
+ lower_32_bits(smu_data->smu_buffer.mc_addr));
}
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
@@ -472,8 +472,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
@@ -535,7 +535,7 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
hwmgr->is_kicker = info.is_kicker;
-
+ hwmgr->smu_version = info.version;
result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
@@ -585,9 +585,8 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
- uint8_t *internal_buf;
uint64_t mc_addr = 0;
-
+ int r;
/* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
@@ -595,52 +594,42 @@ int smu7_init(struct pp_hwmgr *hwmgr)
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
- smu_allocate_memory(hwmgr->device,
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
smu_data->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu_data->header_buffer.handle,
&mc_addr,
- &smu_data->header_buffer.kaddr,
- &smu_data->header_buffer.handle);
+ &smu_data->header_buffer.kaddr);
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+ if (r)
+ return -EINVAL;
- PP_ASSERT_WITH_CODE((NULL != smu_data->header),
- "Out of memory.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)smu_data->header_buffer.handle);
- return -EINVAL);
+ smu_data->header = smu_data->header_buffer.kaddr;
+ smu_data->header_buffer.mc_addr = mc_addr;
if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
smu_data->smu_buffer.data_size = 200*4096;
- smu_allocate_memory(hwmgr->device,
+ r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
smu_data->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu_data->smu_buffer.handle,
&mc_addr,
- &smu_data->smu_buffer.kaddr,
- &smu_data->smu_buffer.handle);
-
- internal_buf = smu_data->smu_buffer.kaddr;
- smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+ &smu_data->smu_buffer.kaddr);
- PP_ASSERT_WITH_CODE((NULL != internal_buf),
- "Out of memory.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)smu_data->smu_buffer.handle);
- return -EINVAL);
+ if (r) {
+ amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
+ &smu_data->header_buffer.mc_addr,
+ &smu_data->header_buffer.kaddr);
+ return -EINVAL;
+ }
+ smu_data->smu_buffer.mc_addr = mc_addr;
if (smum_is_hw_avfs_present(hwmgr))
- smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
- else
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+ hwmgr->avfs_supported = true;
return 0;
}
@@ -648,6 +637,17 @@ int smu7_init(struct pp_hwmgr *hwmgr)
int smu7_smu_fini(struct pp_hwmgr *hwmgr)
{
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
+ &smu_data->header_buffer.mc_addr,
+ &smu_data->header_buffer.kaddr);
+
+ if (!cgs_is_virtualization_enabled(hwmgr->device))
+ amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
+ &smu_data->smu_buffer.mc_addr,
+ &smu_data->smu_buffer.kaddr);
+
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index c87263b..126d300 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -31,15 +31,9 @@
struct smu7_buffer_entry {
uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
+ uint64_t mc_addr;
void *kaddr;
- unsigned long handle;
-};
-
-struct smu7_avfs {
- enum AVFS_BTC_STATUS avfs_btc_status;
- uint32_t avfs_btc_param;
+ struct amdgpu_bo *handle;
};
struct smu7_smumgr {
@@ -56,7 +50,7 @@ struct smu7_smumgr {
uint32_t ulv_setting_starts;
uint8_t security_hard_key;
uint32_t acpi_optimization;
- struct smu7_avfs avfs;
+ uint32_t avfs_btc_param;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
new file mode 100644
index 0000000..c861d30
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "cgs_common.h"
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "smu8.h"
+#include "smu8_fusion.h"
+#include "smu8_smumgr.h"
+#include "cz_ppsmc.h"
+#include "smu_ucode_xfer_cz.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "smumgr.h"
+
+#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
+
+static const enum smu8_scratch_entry firmware_list[] = {
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+};
+
+static int smu8_get_argument(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ return cgs_read_register(hwmgr->device,
+ mmSMU_MP1_SRBM2P_ARG_0);
+}
+
+static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ int result = 0;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+ if (result != 0) {
+ pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
+ return result;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+
+ return 0;
+}
+
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ int result = 0;
+
+ result = smu8_send_msg_to_smc_async(hwmgr, msg);
+ if (result != 0)
+ return result;
+
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+}
+
+static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_address, uint32_t limit)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ if (0 != (3 & smc_address)) {
+ pr_err("SMC address must be 4 byte aligned\n");
+ return -EINVAL;
+ }
+
+ if (limit <= (smc_address + 3)) {
+ pr_err("SMC address beyond the SMC RAM area\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
+ SMN_MP1_SRAM_START_ADDR + smc_address);
+
+ return 0;
+}
+
+static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
+ uint32_t smc_address, uint32_t value, uint32_t limit)
+{
+ int result;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
+ if (!result)
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
+
+ return result;
+}
+
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+
+ return smu8_send_msg_to_smc(hwmgr, msg);
+}
+
+static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
+ uint32_t firmware)
+{
+ int i;
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ if (firmware ==
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
+ break;
+ udelay(1);
+ }
+
+ if (i >= hwmgr->usec_timeout) {
+ pr_err("SMU check loaded firmware failed.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg_data;
+ uint32_t tmp;
+ int ret = 0;
+ struct cgs_firmware_info info = {0};
+ struct smu8_smumgr *smu8_smu;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ smu8_smu = hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
+ CGS_UCODE_ID_CP_MEC, &info);
+
+ if (ret)
+ return -EINVAL;
+
+ /* Disable MEC parsing/prefetching */
+ tmp = cgs_read_register(hwmgr->device,
+ mmCP_MEC_CNTL);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
+
+ tmp = cgs_read_register(hwmgr->device,
+ mmCP_CPC_IC_BASE_CNTL);
+
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+
+ reg_data = lower_32_bits(info.mc_addr) &
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+
+ reg_data = upper_32_bits(info.mc_addr) &
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+
+ return 0;
+}
+
+static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry firmware_enum)
+{
+ uint8_t ret = 0;
+
+ switch (firmware_enum) {
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
+ ret = UCODE_ID_SDMA0;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
+ if (hwmgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_SDMA0;
+ else
+ ret = UCODE_ID_SDMA1;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
+ ret = UCODE_ID_CP_CE;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
+ ret = UCODE_ID_CP_PFP;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
+ ret = UCODE_ID_CP_ME;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
+ ret = UCODE_ID_CP_MEC_JT1;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
+ if (hwmgr->chip_id == CHIP_STONEY)
+ ret = UCODE_ID_CP_MEC_JT1;
+ else
+ ret = UCODE_ID_CP_MEC_JT2;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
+ ret = UCODE_ID_GMCON_RENG;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
+ ret = UCODE_ID_RLC_G;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
+ ret = UCODE_ID_RLC_SCRATCH;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
+ ret = UCODE_ID_RLC_SRM_ARAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
+ ret = UCODE_ID_RLC_SRM_DRAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
+ ret = UCODE_ID_DMCU_ERAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
+ ret = UCODE_ID_DMCU_IRAM;
+ break;
+ case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
+ ret = TASK_ARG_INIT_MM_PWR_LOG;
+ break;
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
+ case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
+ ret = TASK_ARG_REG_MMIO;
+ break;
+ case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
+ ret = TASK_ARG_INIT_CLK_TABLE;
+ break;
+ }
+
+ return ret;
+}
+
+static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ result = CGS_UCODE_ID_SDMA0;
+ break;
+ case UCODE_ID_SDMA1:
+ result = CGS_UCODE_ID_SDMA1;
+ break;
+ case UCODE_ID_CP_CE:
+ result = CGS_UCODE_ID_CP_CE;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = CGS_UCODE_ID_CP_PFP;
+ break;
+ case UCODE_ID_CP_ME:
+ result = CGS_UCODE_ID_CP_ME;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ result = CGS_UCODE_ID_CP_MEC_JT1;
+ break;
+ case UCODE_ID_CP_MEC_JT2:
+ result = CGS_UCODE_ID_CP_MEC_JT2;
+ break;
+ case UCODE_ID_RLC_G:
+ result = CGS_UCODE_ID_RLC_G;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int smu8_smu_populate_single_scratch_task(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry fw_enum,
+ uint8_t type, bool is_last)
+{
+ uint8_t i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+ struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
+
+ task->type = type;
+ task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+ task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
+ if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
+ break;
+
+ if (i >= smu8_smu->scratch_buffer_length) {
+ pr_err("Invalid Firmware Type\n");
+ return -EINVAL;
+ }
+
+ task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
+ task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
+ task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
+
+ if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
+ struct smu8_ih_meta_data *pIHReg_restore =
+ (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
+ pIHReg_restore->command =
+ METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
+ }
+
+ return 0;
+}
+
+static int smu8_smu_populate_single_ucode_load_task(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry fw_enum,
+ bool is_last)
+{
+ uint8_t i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+ struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
+
+ task->type = TASK_TYPE_UCODE_LOAD;
+ task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
+ task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
+
+ for (i = 0; i < smu8_smu->driver_buffer_length; i++)
+ if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
+ break;
+
+ if (i >= smu8_smu->driver_buffer_length) {
+ pr_err("Invalid Firmware Type\n");
+ return -EINVAL;
+ }
+
+ task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
+ task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
+ task->size_bytes = smu8_smu->driver_buffer[i].data_size;
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ TASK_TYPE_UCODE_SAVE, true);
+
+ return 0;
+}
+
+static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
+ toc->JobList[i] = (uint8_t)IGNORE_JOB;
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ TASK_TYPE_UCODE_SAVE, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ TASK_TYPE_UCODE_SAVE, true);
+
+ return 0;
+}
+
+
+static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
+
+ toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+
+ if (hwmgr->chip_id == CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ else
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
+
+ /* populate scratch */
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ TASK_TYPE_UCODE_LOAD, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ TASK_TYPE_UCODE_LOAD, false);
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ TASK_TYPE_UCODE_LOAD, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ TASK_TYPE_INITIALIZE, true);
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+ if (hwmgr->chip_id != CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+ if (hwmgr->chip_id != CHIP_STONEY)
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+ smu8_smu_populate_single_ucode_load_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
+
+ smu8_smu_populate_single_scratch_task(hwmgr,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+ TASK_TYPE_INITIALIZE, true);
+
+ return 0;
+}
+
+static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+
+ smu8_smu->toc_entry_used_count = 0;
+ smu8_smu_initialize_toc_empty_job_list(hwmgr);
+ smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ smu8_smu_construct_toc_for_power_profiling(hwmgr);
+ smu8_smu_construct_toc_for_bootup(hwmgr);
+ smu8_smu_construct_toc_for_clock_table(hwmgr);
+
+ return 0;
+}
+
+static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t firmware_type;
+ uint32_t i;
+ int ret;
+ enum cgs_ucode_id ucode_id;
+ struct cgs_firmware_info info = {0};
+
+ smu8_smu->driver_buffer_length = 0;
+
+ for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
+
+ firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
+ firmware_list[i]);
+
+ ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
+
+ ret = cgs_get_firmware_info(hwmgr->device,
+ ucode_id, &info);
+
+ if (ret == 0) {
+ smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
+
+ smu8_smu->driver_buffer[i].data_size = info.image_size;
+
+ smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
+ smu8_smu->driver_buffer_length++;
+ }
+ }
+
+ return 0;
+}
+
+static int smu8_smu_populate_single_scratch_entry(
+ struct pp_hwmgr *hwmgr,
+ enum smu8_scratch_entry scratch_type,
+ uint32_t ulsize_byte,
+ struct smu8_buffer_entry *entry)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
+
+ entry->data_size = ulsize_byte;
+ entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
+ smu8_smu->smu_buffer_used_bytes;
+ entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
+ entry->firmware_ID = scratch_type;
+
+ smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
+
+ return 0;
+}
+
+static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ unsigned long i;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
+ if (smu8_smu->scratch_buffer[i].firmware_ID
+ == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+ break;
+ }
+
+ *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrHi,
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrLo,
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table);
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+
+ return 0;
+}
+
+static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ unsigned long i;
+
+ for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
+ if (smu8_smu->scratch_buffer[i].firmware_ID
+ == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
+ break;
+ }
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrHi,
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetClkTableAddrLo,
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table);
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+
+ return 0;
+}
+
+static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
+ uint32_t smc_address;
+
+ if (!hwmgr->reload_fw) {
+ pr_info("skip reloading...\n");
+ return 0;
+ }
+
+ smu8_smu_populate_firmware_entries(hwmgr);
+
+ smu8_smu_construct_toc(hwmgr);
+
+ smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
+
+ smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DriverDramAddrHi,
+ upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DriverDramAddrLo,
+ lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+
+ smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+
+ smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_aram);
+ smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index);
+
+ return smu8_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_initialize_index);
+}
+
+static int smu8_start_smu(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ uint32_t fw_to_check = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, Version);
+
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+ hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ adev->pm.fw_version = hwmgr->smu_version >> 8;
+
+ fw_to_check = UCODE_ID_RLC_G_MASK |
+ UCODE_ID_SDMA0_MASK |
+ UCODE_ID_SDMA1_MASK |
+ UCODE_ID_CP_CE_MASK |
+ UCODE_ID_CP_ME_MASK |
+ UCODE_ID_CP_PFP_MASK |
+ UCODE_ID_CP_MEC_JT1_MASK |
+ UCODE_ID_CP_MEC_JT2_MASK;
+
+ if (hwmgr->chip_id == CHIP_STONEY)
+ fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
+ ret = smu8_request_smu_load_fw(hwmgr);
+ if (ret)
+ pr_err("SMU firmware load failed\n");
+
+ smu8_check_fw_load_finish(hwmgr, fw_to_check);
+
+ ret = smu8_load_mec_firmware(hwmgr);
+ if (ret)
+ pr_err("Mec Firmware load failed\n");
+
+ return ret;
+}
+
+static int smu8_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ struct smu8_smumgr *smu8_smu;
+
+ smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
+ if (smu8_smu == NULL)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = smu8_smu;
+
+ smu8_smu->toc_buffer.data_size = 4096;
+ smu8_smu->smu_buffer.data_size =
+ ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
+ ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
+ ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
+ ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
+ ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
+
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ smu8_smu->toc_buffer.data_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+ if (ret)
+ goto err2;
+
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ smu8_smu->smu_buffer.data_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+ if (ret)
+ goto err1;
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ sizeof(struct SMU8_MultimediaPowerLogData),
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
+ sizeof(struct SMU8_Fusion_ClkTable),
+ &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
+ pr_err("Error when Populate Firmware Entry.\n");
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+err1:
+ amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+err2:
+ kfree(smu8_smu);
+ return -EINVAL;
+}
+
+static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ struct smu8_smumgr *smu8_smu;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ smu8_smu = hwmgr->smu_backend;
+ if (smu8_smu) {
+ amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
+ &smu8_smu->toc_buffer.mc_addr,
+ &smu8_smu->toc_buffer.kaddr);
+ amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
+ &smu8_smu->smu_buffer.mc_addr,
+ &smu8_smu->smu_buffer.kaddr);
+ kfree(smu8_smu);
+ }
+
+ return 0;
+}
+
+static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
+ unsigned long check_feature)
+{
+ int result;
+ unsigned long features;
+
+ result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+ if (result == 0) {
+ features = smum_get_argument(hwmgr);
+ if (features & check_feature)
+ return true;
+ }
+
+ return false;
+}
+
+static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
+ return true;
+ return false;
+}
+
+const struct pp_smumgr_func smu8_smu_funcs = {
+ .smu_init = smu8_smu_init,
+ .smu_fini = smu8_smu_fini,
+ .start_smu = smu8_start_smu,
+ .check_fw_load_finish = smu8_check_fw_load_finish,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .get_argument = smu8_get_argument,
+ .send_msg_to_smc = smu8_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = smu8_download_pptable_settings,
+ .upload_pptable_settings = smu8_upload_pptable_settings,
+ .is_dpm_running = smu8_is_dpm_running,
+};
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
new file mode 100644
index 0000000..c7b6122
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU8_SMUMGR_H_
+#define _SMU8_SMUMGR_H_
+
+
+#define MAX_NUM_FIRMWARE 8
+#define MAX_NUM_SCRATCH 11
+#define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
+#define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
+#define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024
+#define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4)
+
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
+
+enum smu8_scratch_entry {
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
+ SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
+ SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
+ SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START,
+ SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
+ SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
+};
+
+struct smu8_buffer_entry {
+ uint32_t data_size;
+ uint64_t mc_addr;
+ void *kaddr;
+ enum smu8_scratch_entry firmware_ID;
+ struct amdgpu_bo *handle; /* as bo handle used when release bo */
+};
+
+struct smu8_register_index_data_pair {
+ uint32_t offset;
+ uint32_t value;
+};
+
+struct smu8_ih_meta_data {
+ uint32_t command;
+ struct smu8_register_index_data_pair register_index_value_pair[1];
+};
+
+struct smu8_smumgr {
+ uint8_t driver_buffer_length;
+ uint8_t scratch_buffer_length;
+ uint16_t toc_entry_used_count;
+ uint16_t toc_entry_initialize_index;
+ uint16_t toc_entry_power_profiling_index;
+ uint16_t toc_entry_aram;
+ uint16_t toc_entry_ih_register_restore_task_index;
+ uint16_t toc_entry_clock_table;
+ uint16_t ih_register_restore_task_size;
+ uint16_t smu_buffer_used_bytes;
+
+ struct smu8_buffer_entry toc_buffer;
+ struct smu8_buffer_entry smu_buffer;
+ struct smu8_buffer_entry firmware_buffer;
+ struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
+ struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE];
+ struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 8673884..c28b60a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
#include "smumgr.h"
-#include "cgs_common.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
@@ -44,6 +43,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
+MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
@@ -144,57 +144,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
hwmgr, msg, parameter);
}
-int smu_allocate_memory(void *device, uint32_t size,
- enum cgs_gpu_mem_type type,
- uint32_t byte_align, uint64_t *mc_addr,
- void **kptr, void *handle)
-{
- int ret = 0;
- cgs_handle_t cgs_handle;
-
- if (device == NULL || handle == NULL ||
- mc_addr == NULL || kptr == NULL)
- return -EINVAL;
-
- ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
- (cgs_handle_t *)handle);
- if (ret)
- return -ENOMEM;
-
- cgs_handle = *(cgs_handle_t *)handle;
-
- ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
- if (ret)
- goto error_gmap;
-
- ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
- if (ret)
- goto error_kmap;
-
- return 0;
-
-error_kmap:
- cgs_gunmap_gpu_mem(device, cgs_handle);
-
-error_gmap:
- cgs_free_gpu_mem(device, cgs_handle);
- return ret;
-}
-
-int smu_free_memory(void *device, void *handle)
-{
- cgs_handle_t cgs_handle = (cgs_handle_t)handle;
-
- if (device == NULL || handle == NULL)
- return -EINVAL;
-
- cgs_kunmap_gpu_mem(device, cgs_handle);
- cgs_gunmap_gpu_mem(device, cgs_handle);
- cgs_free_gpu_mem(device, cgs_handle);
-
- return 0;
-}
-
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr_funcs->init_smc_table)
@@ -236,16 +185,6 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
return true;
}
-int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
- return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
- hwmgr, request);
-
- return 0;
-}
-
bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
if (hwmgr->smumgr_funcs->is_hw_avfs_present)
@@ -253,3 +192,19 @@ bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
return false;
}
+
+int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting)
+{
+ if (hwmgr->smumgr_funcs->update_dpm_settings)
+ return hwmgr->smumgr_funcs->update_dpm_settings(hwmgr, profile_setting);
+
+ return -EINVAL;
+}
+
+int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+{
+ if (hwmgr->smumgr_funcs->smc_table_manager)
+ return hwmgr->smumgr_funcs->smc_table_manager(hwmgr, table, table_id, rw);
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 0a8e48b..b51d746 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -222,7 +222,6 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *tonga_priv = NULL;
- int i;
tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
if (tonga_priv == NULL)
@@ -230,11 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = tonga_priv;
- if (smu7_init(hwmgr))
+ if (smu7_init(hwmgr)) {
+ kfree(tonga_priv);
return -EINVAL;
-
- for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
- tonga_priv->activity_target[i] = 30;
+ }
return 0;
}
@@ -416,7 +414,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
}
- if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
/* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
for (count = 0; count < vddgfx_level_count; count++) {
index = phm_get_voltage_index(vddgfx_lookup_table,
@@ -612,7 +610,6 @@ static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
SMU72_Discrete_GraphicsLevel *graphic_level)
{
int result;
@@ -620,12 +617,18 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *pptable_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
+ else
+ vdd_dep_table = pptable_info->vdd_dep_on_sclk;
+
/* populate graphics levels*/
result = tonga_get_dependency_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
+ vdd_dep_table, engine_clock,
&graphic_level->MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((!result),
"can not find VDDC voltage value for VDDC "
@@ -634,7 +637,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
/* SCLK frequency in units of 10KHz*/
graphic_level->SclkFrequency = engine_clock;
/* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
+ graphic_level->ActivityLevel = data->current_profile_setting.sclk_activity;
graphic_level->CcPwrDynRm = 0;
graphic_level->CcPwrDynRm1 = 0;
@@ -642,8 +645,8 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
graphic_level->EnabledForActivity = 0;
/* this level can be used for throttling.*/
graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
+ graphic_level->UpHyst = data->current_profile_setting.sclk_up_hyst;
+ graphic_level->DownHyst = data->current_profile_setting.sclk_down_hyst;
graphic_level->VoltageDownHyst = 0;
graphic_level->PowerThrottle = 0;
@@ -702,7 +705,6 @@ static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->sclk_table.count; i++) {
result = tonga_populate_single_graphic_level(hwmgr,
dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
&(smu_data->smc_state_table.GraphicsLevel[i]));
if (result != 0)
return result;
@@ -966,10 +968,16 @@ static int tonga_populate_single_memory_level(
uint32_t mclk_stutter_mode_threshold = 30000;
uint32_t mclk_edc_enable_threshold = 40000;
uint32_t mclk_strobe_mode_threshold = 40000;
+ phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
+
+ if (hwmgr->od_enabled)
+ vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
+ else
+ vdd_dep_table = pptable_info->vdd_dep_on_mclk;
- if (NULL != pptable_info->vdd_dep_on_mclk) {
+ if (NULL != vdd_dep_table) {
result = tonga_get_dependency_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk,
+ vdd_dep_table,
memory_clock,
&memory_level->MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE(
@@ -986,12 +994,12 @@ static int tonga_populate_single_memory_level(
memory_level->EnabledForThrottle = 1;
memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
+ memory_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
+ memory_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
memory_level->VoltageDownHyst = 0;
/* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
memory_level->StutterEnable = 0;
memory_level->StrobeEnable = 0;
memory_level->EdcReadEnable = 0;
@@ -1281,7 +1289,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.VoltageDownHyst = 0;
/* Indicates maximum activity level for this performance level.*/
table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+ PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
table->MemoryACPILevel.StutterEnable = 0;
table->MemoryACPILevel.StrobeEnable = 0;
@@ -1617,19 +1625,12 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
table_info->vdd_dep_on_sclk;
uint32_t hw_revision, dev_id;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
- sys_info.size = sizeof(struct cgs_system_info);
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- hw_revision = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
+ hw_revision = adev->pdev->revision;
+ dev_id = adev->pdev->device;
/* Read SMU_Eefuse to read and calculate RO and determine
* if the part is SS or FF. if RO >= 1660MHz, part is FF.
@@ -1699,7 +1700,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
+ "Stretch Amount in PPTable not supported",
return -EINVAL);
}
@@ -2257,42 +2258,6 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
}
-static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
@@ -2434,7 +2399,7 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
result = tonga_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(!result,
"Failed to populate VRConfig setting !", return result);
-
+ data->vr_config = table->VRConfig;
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
@@ -2501,7 +2466,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
@@ -2535,8 +2499,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((!result),
"Failed to populate initialize MC Reg table !", return result);
- tonga_save_default_power_profile(hwmgr);
-
return 0;
}
@@ -2612,7 +2574,7 @@ static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempRespLim = cpu_to_be16(5);
- reference_clock = smu7_get_xclk(hwmgr);
+ reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
@@ -2654,10 +2616,7 @@ static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -3106,9 +3065,9 @@ static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
+
PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
-
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
@@ -3121,18 +3080,16 @@ static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
if (!data->is_memory_gddr5) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
}
break;
@@ -3147,8 +3104,6 @@ static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
break;
default:
@@ -3261,29 +3216,100 @@ static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
? true : false;
}
-static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
+static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
+ void *profile_setting)
{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
(hwmgr->smu_backend);
+ struct profile_mode_setting *setting;
struct SMU72_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
+
+ uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ struct SMU72_Discrete_MemoryLevel *mclk_levels =
+ smu_data->smc_state_table.MemoryLevel;
uint32_t i;
+ uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
+
+ if (profile_setting == NULL)
+ return -EINVAL;
+
+ setting = (struct profile_mode_setting *)profile_setting;
+
+ if (setting->bupdate_sclk) {
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ if (levels[i].ActivityLevel !=
+ cpu_to_be16(setting->sclk_activity)) {
+ levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
+
+ clk_activity_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU72_Discrete_GraphicsLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
+ }
+ if (levels[i].UpHyst != setting->sclk_up_hyst ||
+ levels[i].DownHyst != setting->sclk_down_hyst) {
+ levels[i].UpHyst = setting->sclk_up_hyst;
+ levels[i].DownHyst = setting->sclk_down_hyst;
+ up_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU72_Discrete_GraphicsLevel, UpHyst);
+ down_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
+ + offsetof(SMU72_Discrete_GraphicsLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
}
- return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
+ if (setting->bupdate_mclk) {
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
+ if (mclk_levels[i].ActivityLevel !=
+ cpu_to_be16(setting->mclk_activity)) {
+ mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
+
+ clk_activity_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
+ + offsetof(SMU72_Discrete_MemoryLevel, ActivityLevel);
+ offset = clk_activity_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+
+ }
+ if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
+ mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
+ mclk_levels[i].UpHyst = setting->mclk_up_hyst;
+ mclk_levels[i].DownHyst = setting->mclk_down_hyst;
+ up_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
+ + offsetof(SMU72_Discrete_MemoryLevel, UpHyst);
+ down_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
+ + offsetof(SMU72_Discrete_MemoryLevel, DownHyst);
+ offset = up_hyst_offset & ~0x3;
+ tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
+ tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
+ tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
+ }
+ }
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ }
+ return 0;
}
const struct pp_smumgr_func tonga_smu_funcs = {
@@ -3308,5 +3334,5 @@ const struct pp_smumgr_func tonga_smu_funcs = {
.get_mac_definition = tonga_get_mac_definition,
.initialize_mc_reg_table = tonga_initialize_mc_reg_table,
.is_dpm_running = tonga_is_dpm_running,
- .populate_requested_graphic_levels = tonga_populate_requested_graphic_levels,
+ .update_dpm_settings = tonga_update_dpm_settings,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 5d70a00..d664fed 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -69,9 +69,6 @@ struct tonga_smumgr {
const struct tonga_pt_defaults *power_tune_defaults;
SMU72_Discrete_MCRegisters mc_regs;
struct tonga_mc_reg_table mc_reg_table;
-
- uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS];
-
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 2f979fb..4aafb04 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -25,13 +25,12 @@
#include "vega10_inc.h"
#include "pp_soc15.h"
#include "vega10_smumgr.h"
+#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
#include "smu9_driver_if.h"
-
#include "ppatomctrl.h"
#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
+
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
@@ -83,16 +82,17 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
-
- if (!vega10_is_smc_ram_running(hwmgr))
- return -EINVAL;
+ uint32_t ret;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- phm_wait_for_register_unequal(hwmgr, reg,
+ ret = phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
+ if (ret)
+ pr_err("No response from smu\n");
+
return cgs_read_register(hwmgr->device, reg);
}
@@ -102,14 +102,11 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
cgs_write_register(hwmgr->device, reg, msg);
@@ -123,12 +120,10 @@ int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
-
- if (!vega10_is_smc_ram_running(hwmgr))
- return -EINVAL;
+ uint32_t ret;
vega10_wait_for_response(hwmgr);
@@ -138,8 +133,9 @@ int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
+ ret = vega10_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
return 0;
}
@@ -151,13 +147,11 @@ int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
* @param parameter: the parameter to send
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
-
- if (!vega10_is_smc_ram_running(hwmgr))
- return -EINVAL;
+ uint32_t ret;
vega10_wait_for_response(hwmgr);
@@ -171,60 +165,27 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(hwmgr) != 1)
- pr_err("Failed to send message: 0x%x\n", msg);
+ ret = vega10_wait_for_response(hwmgr);
+ if (ret != 1)
+ pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
return 0;
}
-
-/*
- * Send a message to the SMC with parameter, do not wait for response
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int vega10_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+static int vega10_get_argument(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
- return vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-}
-
-/*
- * Retrieve an argument from SMC.
- * @param hwmgr the address of the powerplay hardware manager.
- * @param arg pointer to store the argument from SMC.
- * @return Always return 0.
- */
-int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
-
- *arg = cgs_read_register(hwmgr->device, reg);
-
- return 0;
+ return cgs_read_register(hwmgr->device, reg);
}
-/*
- * Copy table from SMC into driver FB
- * @param hwmgr the address of the HW manager
- * @param table_id the driver's table ID to copy from
- */
-int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
- struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(hwmgr->smu_backend);
+ struct vega10_smumgr *priv = hwmgr->smu_backend;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -232,20 +193,15 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- priv->smu_tables.entry[table_id].table_addr_high) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- priv->smu_tables.entry[table_id].table_addr_low) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
- return -EINVAL);
+ priv->smu_tables.entry[table_id].table_id);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -253,16 +209,10 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return 0;
}
-/*
- * Copy table from Driver FB into SMC
- * @param hwmgr the address of the HW manager
- * @param table_id the table to copy from
- */
-int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
- struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(hwmgr->smu_backend);
+ struct vega10_smumgr *priv = hwmgr->smu_backend;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -274,81 +224,54 @@ int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- priv->smu_tables.entry[table_id].table_addr_high) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
- return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- priv->smu_tables.entry[table_id].table_addr_low) == 0,
- "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id) == 0,
- "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
- return -EINVAL);
+ priv->smu_tables.entry[table_id].table_id);
return 0;
}
-int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
-{
- PP_ASSERT_WITH_CODE(avfs_table,
- "No access to SMC AVFS Table",
- return -EINVAL);
-
- return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE);
-}
-
-int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
+static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
+ uint32_t *features_enabled)
{
- PP_ASSERT_WITH_CODE(avfs_table,
- "No access to SMC AVFS Table",
- return -EINVAL);
-
- return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE);
-}
+ if (features_enabled == NULL)
+ return -EINVAL;
-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
- bool enable, uint32_t feature_mask)
-{
- int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
- PPSMC_MSG_DisableSmuFeatures;
+ vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+ *features_enabled = vega10_get_argument(hwmgr);
- return vega10_send_msg_to_smc_with_parameter(hwmgr,
- msg, feature_mask);
+ return 0;
}
-int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
- uint32_t *features_enabled)
+static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- if (features_enabled == NULL)
- return -EINVAL;
+ uint32_t features_enabled = 0;
- if (!vega10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeatures)) {
- vega10_read_arg_from_smc(hwmgr, features_enabled);
- return 0;
- }
+ vega10_get_smc_features(hwmgr, &features_enabled);
- return -EINVAL;
+ if (features_enabled & SMC_DPM_FEATURES)
+ return true;
+ else
+ return false;
}
-int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
+static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
{
- struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(hwmgr->smu_backend);
+ struct vega10_smumgr *priv = hwmgr->smu_backend;
- if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
- priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
- if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
+ if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- priv->smu_tables.entry[TOOLSTABLE].table_addr_high))
- vega10_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetToolsDramAddrLow,
- priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
+ upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
}
return 0;
}
@@ -356,7 +279,7 @@ int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- struct cgs_system_info sys_info = {0};
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t dev_id;
uint32_t rev_id;
@@ -364,27 +287,18 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
+ smc_driver_if_version = vega10_get_argument(hwmgr);
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- rev_id = (uint32_t)sys_info.value;
+ dev_id = adev->pdev->device;
+ rev_id = adev->pdev->revision;
if (!((dev_id == 0x687f) &&
((rev_id == 0xc0) ||
(rev_id == 0xc1) ||
(rev_id == 0xc3)))) {
if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+ pr_err("Your firmware(0x%x) doesn't match SMU9_DRIVER_IF_VERSION(0x%x). Please update your firmware!\n",
+ smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
return -EINVAL;
}
}
@@ -395,14 +309,12 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv;
- uint64_t mc_addr;
- void *kaddr = NULL;
- unsigned long handle, tools_size;
+ unsigned long tools_size;
int ret;
struct cgs_firmware_info info = {0};
ret = cgs_get_firmware_info(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ CGS_UCODE_ID_SMU,
&info);
if (ret || !info.kptr)
return -EINVAL;
@@ -415,166 +327,130 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = priv;
/* allocate space for pptable */
- smu_allocate_memory(hwmgr->device,
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(PPTable_t),
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[vega10_smu_init] Out of memory for pptable.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)handle);
- return -EINVAL);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[PPTABLE].handle,
+ &priv->smu_tables.entry[PPTABLE].mc_addr,
+ &priv->smu_tables.entry[PPTABLE].table);
+ if (ret)
+ goto free_backend;
priv->smu_tables.entry[PPTABLE].version = 0x01;
priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE;
- priv->smu_tables.entry[PPTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[PPTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[PPTABLE].table = kaddr;
- priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(hwmgr->device,
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(Watermarks_t),
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[vega10_smu_init] Out of memory for wmtable.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)handle);
- return -EINVAL);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[WMTABLE].handle,
+ &priv->smu_tables.entry[WMTABLE].mc_addr,
+ &priv->smu_tables.entry[WMTABLE].table);
+
+ if (ret)
+ goto err0;
priv->smu_tables.entry[WMTABLE].version = 0x01;
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
- priv->smu_tables.entry[WMTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[WMTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[WMTABLE].table = kaddr;
- priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
- smu_allocate_memory(hwmgr->device,
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(AvfsTable_t),
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[vega10_smu_init] Out of memory for avfs table.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)handle);
- return -EINVAL);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[AVFSTABLE].handle,
+ &priv->smu_tables.entry[AVFSTABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSTABLE].table);
+
+ if (ret)
+ goto err1;
priv->smu_tables.entry[AVFSTABLE].version = 0x01;
priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS;
- priv->smu_tables.entry[AVFSTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[AVFSTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[AVFSTABLE].table = kaddr;
- priv->smu_tables.entry[AVFSTABLE].handle = handle;
tools_size = 0x19000;
if (tools_size) {
- smu_allocate_memory(hwmgr->device,
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
tools_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- if (kaddr) {
- priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
- priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
- priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
- priv->smu_tables.entry[TOOLSTABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[TOOLSTABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
- priv->smu_tables.entry[TOOLSTABLE].handle = handle;
- }
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TOOLSTABLE].handle,
+ &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+ &priv->smu_tables.entry[TOOLSTABLE].table);
+ if (ret)
+ goto err2;
+ priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
+ priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
+ priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
}
/* allocate space for AVFS Fuse table */
- smu_allocate_memory(hwmgr->device,
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(AvfsFuseOverride_t),
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
- &mc_addr,
- &kaddr,
- &handle);
-
- PP_ASSERT_WITH_CODE(kaddr,
- "[vega10_smu_init] Out of memory for avfs fuse table.",
- kfree(hwmgr->smu_backend);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)handle);
- return -EINVAL);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[AVFSFUSETABLE].handle,
+ &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSFUSETABLE].table);
+ if (ret)
+ goto err3;
priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
- priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high =
- smu_upper_32_bits(mc_addr);
- priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low =
- smu_lower_32_bits(mc_addr);
- priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
- priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
+
return 0;
+
+err3:
+ if (priv->smu_tables.entry[TOOLSTABLE].table)
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
+ &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+ &priv->smu_tables.entry[TOOLSTABLE].table);
+err2:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
+ &priv->smu_tables.entry[AVFSTABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSTABLE].table);
+err1:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+ &priv->smu_tables.entry[WMTABLE].mc_addr,
+ &priv->smu_tables.entry[WMTABLE].table);
+err0:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
+ &priv->smu_tables.entry[PPTABLE].mc_addr,
+ &priv->smu_tables.entry[PPTABLE].table);
+free_backend:
+ kfree(hwmgr->smu_backend);
+
+ return -EINVAL;
}
static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
{
- struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(hwmgr->smu_backend);
+ struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv) {
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
+ &priv->smu_tables.entry[PPTABLE].mc_addr,
+ &priv->smu_tables.entry[PPTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
+ &priv->smu_tables.entry[WMTABLE].mc_addr,
+ &priv->smu_tables.entry[WMTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
+ &priv->smu_tables.entry[AVFSTABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSTABLE].table);
if (priv->smu_tables.entry[TOOLSTABLE].table)
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(hwmgr->device,
- (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
+ &priv->smu_tables.entry[TOOLSTABLE].mc_addr,
+ &priv->smu_tables.entry[TOOLSTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSFUSETABLE].handle,
+ &priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
+ &priv->smu_tables.entry[AVFSFUSETABLE].table);
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
@@ -583,6 +459,9 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
+ if (!vega10_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
"Failed to verify SMC interface!",
return -EINVAL);
@@ -592,6 +471,18 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+{
+ int ret;
+
+ if (rw)
+ ret = vega10_copy_table_from_smc(hwmgr, table, table_id);
+ else
+ ret = vega10_copy_table_to_smc(hwmgr, table, table_id);
+
+ return ret;
+}
+
const struct pp_smumgr_func vega10_smu_funcs = {
.smu_init = &vega10_smu_init,
.smu_fini = &vega10_smu_fini,
@@ -601,4 +492,7 @@ const struct pp_smumgr_func vega10_smu_funcs = {
.send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .is_dpm_running = vega10_is_dpm_running,
+ .get_argument = vega10_get_argument,
+ .smc_table_manager = vega10_smc_table_manager,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 0695455..424e868 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -23,25 +23,15 @@
#ifndef _VEGA10_SMUMANAGER_H_
#define _VEGA10_SMUMANAGER_H_
-#include "vega10_hwmgr.h"
-
-enum smu_table_id {
- PPTABLE = 0,
- WMTABLE,
- AVFSTABLE,
- TOOLSTABLE,
- AVFSFUSETABLE,
- MAX_SMU_TABLE,
-};
+#define MAX_SMU_TABLE 5
struct smu_table_entry {
uint32_t version;
uint32_t size;
uint32_t table_id;
- uint32_t table_addr_high;
- uint32_t table_addr_low;
- uint8_t *table;
- unsigned long handle;
+ uint64_t mc_addr;
+ void *table;
+ struct amdgpu_bo *handle;
};
struct smu_table_array {
@@ -52,19 +42,6 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
-int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
-int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
- uint8_t *table, int16_t table_id);
-int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
- bool enable, uint32_t feature_mask);
-int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
- uint32_t *features_enabled);
-int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-
-int vega10_set_tools_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
new file mode 100644
index 0000000..55cd204
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "vega12_inc.h"
+#include "pp_soc15.h"
+#include "vega12_smumgr.h"
+#include "vega12_ppsmc.h"
+#include "vega12/smu9_driver_if.h"
+
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu7_smumgr.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+#define smnMP0_FW_INTF 0x3010104
+#define smnMP1_PUB_CTRL 0x3010b14
+
+static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ uint32_t mp1_fw_flags, reg;
+
+ reg = soc15_get_register_offset(NBIF_HWID, 0,
+ mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
+
+ cgs_write_register(hwmgr->device, reg,
+ (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+ reg = soc15_get_register_offset(NBIF_HWID, 0,
+ mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
+
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param smumgr the address of the powerplay hardware manager.
+ * @return TRUE SMC has responded, FALSE otherwise.
+ */
+static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+
+ phm_wait_for_register_unequal(hwmgr, reg,
+ 0, MP1_C2PMSG_90__CONTENT_MASK);
+
+ return cgs_read_register(hwmgr->device, reg);
+}
+
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+ uint16_t msg)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
+ cgs_write_register(hwmgr->device, reg, msg);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
+int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ uint32_t reg;
+
+ vega12_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ if (vega12_wait_for_response(hwmgr) != 1)
+ pr_err("Failed to send message: 0x%x\n", msg);
+
+ return 0;
+}
+
+/*
+ * Send a message to the SMC with parameter
+ * @param hwmgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return Always return 0.
+ */
+int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ uint32_t reg;
+
+ vega12_wait_for_response(hwmgr);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ cgs_write_register(hwmgr->device, reg, 0);
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ cgs_write_register(hwmgr->device, reg, parameter);
+
+ vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+ if (vega12_wait_for_response(hwmgr) != 1)
+ pr_err("Failed to send message: 0x%x\n", msg);
+
+ return 0;
+}
+
+
+/*
+ * Send a message to the SMC with parameter, do not wait for response
+ * @param hwmgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return The response that came from the SMC.
+ */
+int vega12_send_msg_to_smc_with_parameter_without_waiting(
+ struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
+ cgs_write_register(hwmgr->device, reg, parameter);
+
+ return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
+}
+
+/*
+ * Retrieve an argument from SMC.
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param arg pointer to store the argument from SMC.
+ * @return Always return 0.
+ */
+int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
+{
+ uint32_t reg;
+
+ reg = soc15_get_register_offset(MP1_HWID, 0,
+ mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+
+ *arg = cgs_read_register(hwmgr->device, reg);
+
+ return 0;
+}
+
+/*
+ * Copy table from SMC into driver FB
+ * @param hwmgr the address of the HW manager
+ * @param table_id the driver's table ID to copy from
+ */
+int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
+ "Invalid SMU Table ID!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram,
+ table_id) == 0,
+ "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
+ return -EINVAL);
+
+ memcpy(table, priv->smu_tables.entry[table_id].table,
+ priv->smu_tables.entry[table_id].size);
+
+ return 0;
+}
+
+/*
+ * Copy table from Driver FB into SMC
+ * @param hwmgr the address of the HW manager
+ * @param table_id the table to copy from
+ */
+int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id)
+{
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+
+ PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
+ "Invalid SMU Table ID!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
+ "Invalid SMU Table version!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
+ "Invalid SMU Table Length!", return -EINVAL);
+
+ memcpy(priv->smu_tables.entry[table_id].table, table,
+ priv->smu_tables.entry[table_id].size);
+
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
+ return -EINVAL;);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ table_id) == 0,
+ "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
+ return -EINVAL);
+
+ return 0;
+}
+
+int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask)
+{
+ uint32_t smu_features_low, smu_features_high;
+
+ smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
+ smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
+
+ if (enable) {
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+ "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+ "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
+ return -EINVAL);
+ } else {
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+ "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+ "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
+ return -EINVAL);
+ }
+
+ return 0;
+}
+
+int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled)
+{
+ uint32_t smc_features_low, smc_features_high;
+
+ if (features_enabled == NULL)
+ return -EINVAL;
+
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
+ &smc_features_low) == 0,
+ "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+ "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
+ &smc_features_high) == 0,
+ "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
+ return -EINVAL);
+
+ *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
+ (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
+
+ return 0;
+}
+
+static bool vega12_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ uint64_t features_enabled = 0;
+
+ vega12_get_enabled_smc_features(hwmgr, &features_enabled);
+
+ if (features_enabled & SMC_DPM_FEATURES)
+ return true;
+ else
+ return false;
+}
+
+static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+
+ if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
+ if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
+ vega12_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetToolsDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ }
+ return 0;
+}
+
+#if 0 /* tentatively remove */
+static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
+{
+ uint32_t smc_driver_if_version;
+
+ PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion),
+ "Attempt to get SMC IF Version Number Failed!",
+ return -EINVAL);
+ vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
+
+ if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
+ pr_err("Your firmware(0x%x) doesn't match \
+ SMU9_DRIVER_IF_VERSION(0x%x). \
+ Please update your firmware!\n",
+ smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static int vega12_smu_init(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_smumgr *priv;
+ unsigned long tools_size;
+ struct cgs_firmware_info info = {0};
+ int ret;
+
+ ret = cgs_get_firmware_info(hwmgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ &info);
+ if (ret || !info.kptr)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct vega12_smumgr), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = priv;
+
+ /* allocate space for pptable */
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(PPTable_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_PPTABLE].handle,
+ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
+ &priv->smu_tables.entry[TABLE_PPTABLE].table);
+ if (ret)
+ goto free_backend;
+
+ priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
+ priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
+
+ /* allocate space for watermarks table */
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(Watermarks_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].handle,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
+
+ if (ret)
+ goto err0;
+
+ priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
+ priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
+
+ tools_size = 0x19000;
+ if (tools_size) {
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ tools_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
+ if (ret)
+ goto err1;
+
+ priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
+ priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
+ }
+
+ /* allocate space for AVFS Fuse table */
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(AvfsFuseOverride_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].handle,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].mc_addr,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
+
+ if (ret)
+ goto err2;
+
+ priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].version = 0x01;
+ priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].size = sizeof(AvfsFuseOverride_t);
+
+ /* allocate space for OverDrive table */
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(OverDriveTable_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
+ if (ret)
+ goto err3;
+
+ priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
+ priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
+
+ return 0;
+
+err3:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].handle,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].mc_addr,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
+err2:
+ if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table)
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
+err1:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
+err0:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
+ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
+ &priv->smu_tables.entry[TABLE_PPTABLE].table);
+free_backend:
+ kfree(hwmgr->smu_backend);
+
+ return -EINVAL;
+}
+
+static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_smumgr *priv =
+ (struct vega12_smumgr *)(hwmgr->smu_backend);
+
+ if (priv) {
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
+ &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
+ &priv->smu_tables.entry[TABLE_PPTABLE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
+ &priv->smu_tables.entry[TABLE_WATERMARKS].table);
+ if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].table)
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].handle,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].mc_addr,
+ &priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ }
+ return 0;
+}
+
+static int vega12_start_smu(struct pp_hwmgr *hwmgr)
+{
+ PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
+ "SMC is not running!",
+ return -EINVAL);
+
+#if 0 /* tentatively remove */
+ PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
+ "Failed to verify SMC interface!",
+ return -EINVAL);
+#endif
+
+ vega12_set_tools_address(hwmgr);
+
+ return 0;
+}
+
+const struct pp_smumgr_func vega12_smu_funcs = {
+ .smu_init = &vega12_smu_init,
+ .smu_fini = &vega12_smu_fini,
+ .start_smu = &vega12_start_smu,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &vega12_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .is_dpm_running = vega12_is_dpm_running,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
new file mode 100644
index 0000000..2810d38
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _VEGA12_SMUMANAGER_H_
+#define _VEGA12_SMUMANAGER_H_
+
+#include "hwmgr.h"
+#include "vega12/smu9_driver_if.h"
+#include "vega12_hwmgr.h"
+
+struct smu_table_entry {
+ uint32_t version;
+ uint32_t size;
+ uint64_t mc_addr;
+ void *table;
+ struct amdgpu_bo *handle;
+};
+
+struct smu_table_array {
+ struct smu_table_entry entry[TABLE_COUNT];
+};
+
+struct vega12_smumgr {
+ struct smu_table_array smu_tables;
+};
+
+#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF
+#define SMU_FEATURES_LOW_SHIFT 0
+#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+#define SMU_FEATURES_HIGH_SHIFT 32
+
+int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id);
+int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
+ uint8_t *table, int16_t table_id);
+int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
+ bool enable, uint64_t feature_mask);
+int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
deleted file mode 100644
index 07129e6..0000000
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _GPU_SCHED_TRACE_H_
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-#include <drm/drmP.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM gpu_sched
-#define TRACE_INCLUDE_FILE gpu_sched_trace
-
-TRACE_EVENT(amd_sched_job,
- TP_PROTO(struct amd_sched_job *sched_job),
- TP_ARGS(sched_job),
- TP_STRUCT__entry(
- __field(struct amd_sched_entity *, entity)
- __field(struct dma_fence *, fence)
- __field(const char *, name)
- __field(uint64_t, id)
- __field(u32, job_count)
- __field(int, hw_job_count)
- ),
-
- TP_fast_assign(
- __entry->entity = sched_job->s_entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
- __entry->name = sched_job->sched->name;
- __entry->job_count = kfifo_len(
- &sched_job->s_entity->job_queue) / sizeof(sched_job);
- __entry->hw_job_count = atomic_read(
- &sched_job->sched->hw_rq_count);
- ),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __entry->name,
- __entry->job_count, __entry->hw_job_count)
-);
-
-TRACE_EVENT(amd_sched_process_job,
- TP_PROTO(struct amd_sched_fence *fence),
- TP_ARGS(fence),
- TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
- ),
-
- TP_fast_assign(
- __entry->fence = &fence->finished;
- ),
- TP_printk("fence=%p signaled", __entry->fence)
-);
-
-#endif
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
deleted file mode 100644
index 52c8e54..0000000
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _GPU_SCHEDULER_H_
-#define _GPU_SCHEDULER_H_
-
-#include <linux/kfifo.h>
-#include <linux/dma-fence.h>
-
-struct amd_gpu_scheduler;
-struct amd_sched_rq;
-
-/**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
-*/
-struct amd_sched_entity {
- struct list_head list;
- struct amd_sched_rq *rq;
- spinlock_t rq_lock;
- struct amd_gpu_scheduler *sched;
-
- spinlock_t queue_lock;
- struct kfifo job_queue;
-
- atomic_t fence_seq;
- uint64_t fence_context;
-
- struct dma_fence *dependency;
- struct dma_fence_cb cb;
-};
-
-/**
- * Run queue is a set of entities scheduling command submissions for
- * one specific ring. It implements the scheduling policy that selects
- * the next entity to emit commands from.
-*/
-struct amd_sched_rq {
- spinlock_t lock;
- struct list_head entities;
- struct amd_sched_entity *current_entity;
-};
-
-struct amd_sched_fence {
- struct dma_fence scheduled;
- struct dma_fence finished;
- struct dma_fence_cb cb;
- struct dma_fence *parent;
- struct amd_gpu_scheduler *sched;
- spinlock_t lock;
- void *owner;
-};
-
-struct amd_sched_job {
- struct amd_gpu_scheduler *sched;
- struct amd_sched_entity *s_entity;
- struct amd_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct work_struct finish_work;
- struct list_head node;
- struct delayed_work work_tdr;
- uint64_t id;
- atomic_t karma;
-};
-
-extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
-extern const struct dma_fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
-{
- if (f->ops == &amd_sched_fence_ops_scheduled)
- return container_of(f, struct amd_sched_fence, scheduled);
-
- if (f->ops == &amd_sched_fence_ops_finished)
- return container_of(f, struct amd_sched_fence, finished);
-
- return NULL;
-}
-
-static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
-{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
-}
-
-/**
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
-struct amd_sched_backend_ops {
- struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
- struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*timedout_job)(struct amd_sched_job *sched_job);
- void (*free_job)(struct amd_sched_job *sched_job);
-};
-
-enum amd_sched_priority {
- AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_NORMAL,
- AMD_SCHED_PRIORITY_HIGH_SW,
- AMD_SCHED_PRIORITY_HIGH_HW,
- AMD_SCHED_PRIORITY_KERNEL,
- AMD_SCHED_PRIORITY_MAX,
- AMD_SCHED_PRIORITY_INVALID = -1,
- AMD_SCHED_PRIORITY_UNSET = -2
-};
-
-/**
- * One scheduler is implemented for each hardware ring
-*/
-struct amd_gpu_scheduler {
- const struct amd_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
- long timeout;
- const char *name;
- struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
- wait_queue_head_t wake_up_worker;
- wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
- atomic64_t job_id_count;
- struct task_struct *thread;
- struct list_head ring_mirror_list;
- spinlock_t job_list_lock;
-};
-
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
- uint32_t hw_submission, long timeout, const char *name);
-void amd_sched_fini(struct amd_gpu_scheduler *sched);
-
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
- uint32_t jobs);
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity);
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq);
-
-int amd_sched_fence_slab_init(void);
-void amd_sched_fence_slab_fini(void);
-
-struct amd_sched_fence *amd_sched_fence_create(
- struct amd_sched_entity *s_entity, void *owner);
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_finished(struct amd_sched_fence *fence);
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void *owner);
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity);
-void amd_sched_job_kickout(struct amd_sched_job *s_job);
-
-static inline enum amd_sched_priority
-amd_sched_get_job_priority(struct amd_sched_job *job)
-{
- return (job->s_entity->rq - job->sched->sched_rq);
-}
-
-#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 074fd4e..f067de4 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -155,7 +155,6 @@ static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
- { "fb", drm_fb_cma_debugfs_show, 0 },
};
static int arcpgu_debugfs_init(struct drm_minor *minor)
@@ -180,6 +179,7 @@ static struct drm_driver arcpgu_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_print_info = drm_gem_cma_print_info,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 0ce7f39..977dfa5 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -15,7 +15,8 @@
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder_slave.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_device.h>
#include "arcpgu.h"
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
index bca3a67..b8f6f9a 100644
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -15,7 +15,6 @@
*/
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_atomic_helper.h>
#include "arcpgu.h"
@@ -29,7 +28,6 @@
struct arcpgu_drm_connector {
struct drm_connector connector;
- struct drm_encoder_slave *encoder_slave;
};
static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
@@ -68,7 +66,7 @@ static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
{
struct arcpgu_drm_connector *arcpgu_connector;
- struct drm_encoder_slave *encoder;
+ struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
@@ -76,10 +74,10 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
if (encoder == NULL)
return -ENOMEM;
- encoder->base.possible_crtcs = 1;
- encoder->base.possible_clones = 0;
+ encoder->possible_crtcs = 1;
+ encoder->possible_clones = 0;
- ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
@@ -101,21 +99,19 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
goto error_encoder_cleanup;
}
- ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
goto error_connector_cleanup;
}
- arcpgu_connector->encoder_slave = encoder;
-
return 0;
error_connector_cleanup:
drm_connector_cleanup(connector);
error_encoder_cleanup:
- drm_encoder_cleanup(&encoder->base);
+ drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 5a5427b..cf5cbd6 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,7 +229,6 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct drm_rect clip = { 0 };
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
@@ -249,13 +248,10 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
-
- return drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 0afb53b..feaa8bc 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -231,7 +231,6 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
static struct drm_info_list hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
- { "fb", drm_fb_cma_debugfs_show, 0 },
};
static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -253,6 +252,7 @@ static struct drm_driver hdlcd_driver = {
.irq_postinstall = hdlcd_irq_postinstall,
.irq_uninstall = hdlcd_irq_uninstall,
.gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_print_info = drm_gem_cma_print_info,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 904fff8..fcc62bc 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -288,8 +288,14 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
(v_upscale_factor >> 16) >= 2);
- s->input_w = pstate->src_w >> 16;
- s->input_h = pstate->src_h >> 16;
+ if (pstate->rotation & MALIDP_ROTATED_MASK) {
+ s->input_w = pstate->src_h >> 16;
+ s->input_h = pstate->src_w >> 16;
+ } else {
+ s->input_w = pstate->src_w >> 16;
+ s->input_h = pstate->src_h >> 16;
+ }
+
s->output_w = pstate->crtc_w;
s->output_h = pstate->crtc_h;
@@ -525,14 +531,13 @@ int malidp_crtc_init(struct drm_device *drm)
if (!primary) {
DRM_ERROR("no primary plane found\n");
- ret = -EINVAL;
- goto crtc_cleanup_planes;
+ return -EINVAL;
}
ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
&malidp_crtc_funcs, NULL);
if (ret)
- goto crtc_cleanup_planes;
+ return ret;
drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
@@ -542,9 +547,4 @@ int malidp_crtc_init(struct drm_device *drm)
malidp_se_set_enh_coeffs(malidp->dev);
return 0;
-
-crtc_cleanup_planes:
- malidp_de_planes_destroy(drm);
-
- return ret;
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 91f2b01..8d20faa 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/console.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -24,9 +23,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include "malidp_drv.h"
@@ -182,34 +183,31 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
return (ret > 0) ? 0 : -ETIMEDOUT;
}
-static void malidp_output_poll_changed(struct drm_device *drm)
-{
- struct malidp_drm *malidp = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(malidp->fbdev);
-}
-
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
- struct drm_pending_vblank_event *event;
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
- if (malidp->crtc.enabled) {
- /* only set config_valid if the CRTC is enabled */
- if (malidp_set_and_wait_config_valid(drm))
- DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
- }
+ malidp->event = malidp->crtc.state->event;
+ malidp->crtc.state->event = NULL;
- event = malidp->crtc.state->event;
- if (event) {
- malidp->crtc.state->event = NULL;
+ if (malidp->crtc.state->active) {
+ /*
+ * if we have an event to deliver to userspace, make sure
+ * the vblank is enabled as we are sending it from the IRQ
+ * handler.
+ */
+ if (malidp->event)
+ drm_crtc_vblank_get(&malidp->crtc);
+ /* only set config_valid if the CRTC is enabled */
+ if (malidp_set_and_wait_config_valid(drm) < 0)
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ } else if (malidp->event) {
+ /* CRTC inactive means vblank IRQ is disabled, send event directly */
spin_lock_irq(&drm->event_lock);
- if (drm_crtc_vblank_get(&malidp->crtc) == 0)
- drm_crtc_arm_vblank_event(&malidp->crtc, event);
- else
- drm_crtc_send_vblank_event(&malidp->crtc, event);
+ drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
+ malidp->event = NULL;
spin_unlock_irq(&drm->event_lock);
}
drm_atomic_helper_commit_hw_done(state);
@@ -238,8 +236,6 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
pm_runtime_put(drm->dev);
drm_atomic_helper_cleanup_planes(drm, state);
@@ -251,7 +247,7 @@ static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = malidp_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -282,7 +278,7 @@ static int malidp_init(struct drm_device *drm)
static void malidp_fini(struct drm_device *drm)
{
- malidp_de_planes_destroy(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
}
@@ -316,22 +312,28 @@ static int malidp_irq_init(struct platform_device *pdev)
return 0;
}
-static void malidp_lastclose(struct drm_device *drm)
+DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+static int malidp_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
{
struct malidp_drm *malidp = drm->dev_private;
+ /* allocate for the worst case scenario, i.e. rotated buffers */
+ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1);
- drm_fbdev_cma_restore_mode(malidp->fbdev);
-}
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
- .lastclose = malidp_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = malidp_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -622,14 +624,9 @@ static int malidp_bind(struct device *dev)
drm_mode_config_reset(drm);
- malidp->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(malidp->fbdev)) {
- ret = PTR_ERR(malidp->fbdev);
- malidp->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto fbdev_fail;
- }
drm_kms_helper_poll_init(drm);
@@ -640,10 +637,7 @@ static int malidp_bind(struct device *dev)
return 0;
register_fail:
- if (malidp->fbdev) {
- drm_fbdev_cma_fini(malidp->fbdev);
- malidp->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
fbdev_fail:
pm_runtime_get_sync(dev);
@@ -680,14 +674,13 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
drm_dev_unregister(drm);
- if (malidp->fbdev) {
- drm_fbdev_cma_fini(malidp->fbdev);
- malidp->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&malidp->crtc);
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
@@ -745,34 +738,15 @@ static int malidp_platform_remove(struct platform_device *pdev)
static int __maybe_unused malidp_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct malidp_drm *malidp = drm->dev_private;
-
- drm_kms_helper_poll_disable(drm);
- console_lock();
- drm_fbdev_cma_set_suspend(malidp->fbdev, 1);
- console_unlock();
- malidp->pm_state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(malidp->pm_state)) {
- console_lock();
- drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
- console_unlock();
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(malidp->pm_state);
- }
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused malidp_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct malidp_drm *malidp = drm->dev_private;
- drm_atomic_helper_resume(drm, malidp->pm_state);
- console_lock();
- drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
- console_unlock();
- drm_kms_helper_poll_enable(drm);
+ drm_mode_config_helper_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 2e20331..c70989b 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -20,11 +20,10 @@
struct malidp_drm {
struct malidp_hw_device *dev;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
wait_queue_head_t wq;
+ struct drm_pending_vblank_event *event;
atomic_t config_valid;
- struct drm_atomic_state *pm_state;
u32 core_id;
};
@@ -61,7 +60,6 @@ struct malidp_crtc_state {
#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base)
int malidp_de_planes_init(struct drm_device *drm);
-void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
/* often used combination of rotational bits */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 2bfb542..d789b46 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -75,16 +75,16 @@ static const struct malidp_format_id malidp550_de_formats[] = {
};
static const struct malidp_layer malidp500_layers[] = {
- { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE },
- { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE },
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
};
static const struct malidp_layer malidp550_layers[] = {
- { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
- { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE, 0 },
};
#define SE_N_SCALING_COEFFS 96
@@ -782,9 +782,15 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
/* first handle the config valid IRQ */
dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
if (dc_status & hw->map.dc_irq_map.vsync_irq) {
- /* we have a page flip event */
- atomic_set(&malidp->config_valid, 1);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
+ /* do we have a page flip event? */
+ if (malidp->event != NULL) {
+ spin_lock(&drm->event_lock);
+ drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
+ malidp->event = NULL;
+ spin_unlock(&drm->event_lock);
+ }
+ atomic_set(&malidp->config_valid, 1);
ret = IRQ_WAKE_THREAD;
}
@@ -794,7 +800,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
status &= mask;
- if (status & de->vsync_irq)
+ if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index b0690eb..b5dd6c7 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -58,7 +58,8 @@ struct malidp_layer {
u16 id; /* layer ID */
u16 base; /* address offset for the register bank */
u16 ptr; /* address offset for the pointer register */
- u16 stride_offset; /* Offset to the first stride register. */
+ u16 stride_offset; /* offset to the first stride register. */
+ s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
};
enum malidp_scaling_coeff_set {
@@ -285,10 +286,16 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
-static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
- unsigned int pitch)
+static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
{
- return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
+ /*
+ * only hardware that cannot do 8 bytes bus alignments have further
+ * constraints on rotated planes
+ */
+ if (hwdev->hw->map.bus_align_bytes == 8)
+ return 8;
+ else
+ return hwdev->hw->map.bus_align_bytes << (rotated ? 2 : 0);
}
/* U16.16 */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index e741979..7a44897 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -35,6 +35,9 @@
#define LAYER_COMP_MASK (0x3 << 12)
#define LAYER_COMP_PIXEL (0x3 << 12)
#define LAYER_COMP_PLANE (0x2 << 12)
+#define LAYER_ALPHA_OFFSET (16)
+#define LAYER_ALPHA_MASK (0xff)
+#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
@@ -56,12 +59,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
- if (mp->base.fb)
- drm_framebuffer_put(mp->base.fb);
-
- drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
- devm_kfree(plane->dev->dev, mp);
+ kfree(mp);
}
/*
@@ -141,21 +140,27 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(state->state, state->crtc);
struct malidp_crtc_state *mc;
- struct drm_rect clip = { 0 };
u32 src_w, src_h;
int ret;
if (!crtc_state)
return -EINVAL;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_state(state, &clip, 0, INT_MAX, true, true);
+ mc = to_malidp_crtc_state(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX, true, true);
if (ret)
return ret;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ if (state->rotation & MALIDP_ROTATED_MASK) {
+ src_w = state->src_h >> 16;
+ src_h = state->src_w >> 16;
+ } else {
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ }
+
if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
/* Scaling not necessary for this plane. */
mc->scaled_planes_mask &= ~(mp->layer->id);
@@ -165,8 +170,6 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
return -EINVAL;
- mc = to_malidp_crtc_state(crtc_state);
-
mc->scaled_planes_mask |= mp->layer->id;
/* Defer scaling requirements calculation to the crtc check. */
return 0;
@@ -177,6 +180,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
+ bool rotated = state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
int i, ret;
@@ -193,7 +197,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
- if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
+ if (fb->pitches[i] & (alignment - 1)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
@@ -261,6 +266,60 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
mp->layer->stride_offset + i * 4);
}
+static const s16
+malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1192, 0, 1634,
+ 1192, -401, -832,
+ 1192, 2066, 0,
+ 64, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1436,
+ 1024, -352, -731,
+ 1024, 1815, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1192, 0, 1836,
+ 1192, -218, -546,
+ 1192, 2163, 0,
+ 64, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1613,
+ 1024, -192, -479,
+ 1024, 1900, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1024, 0, 1476,
+ 1024, -165, -572,
+ 1024, 1884, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1510,
+ 1024, -168, -585,
+ 1024, 1927, 0,
+ 0, 512, 512
+ }
+};
+
+static void malidp_de_set_color_encoding(struct malidp_plane *plane,
+ enum drm_color_encoding enc,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ /* coefficients are signed, two's complement values */
+ malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
+ plane->layer->base + plane->layer->yuv2rgb_offset +
+ i * 4);
+ }
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -268,6 +327,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u32 src_w, src_h, dest_w, dest_h, val;
int i;
+ bool format_has_alpha = plane->state->fb->format->has_alpha;
mp = to_malidp_plane(plane);
@@ -291,6 +351,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_de_set_plane_pitches(mp, ms->n_planes,
plane->state->fb->pitches);
+ if ((plane->state->color_encoding != old_state->color_encoding) ||
+ (plane->state->color_range != old_state->color_range))
+ malidp_de_set_color_encoding(mp, plane->state->color_encoding,
+ plane->state->color_range);
+
malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP_LAYER_SIZE);
@@ -319,12 +384,25 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
- /*
- * always enable pixel alpha blending until we have a way to change
- * blend modes
- */
val &= ~LAYER_COMP_MASK;
- val |= LAYER_COMP_PIXEL;
+ if (format_has_alpha) {
+
+ /*
+ * always enable pixel alpha blending until we have a way
+ * to change blend modes
+ */
+ val |= LAYER_COMP_PIXEL;
+ } else {
+
+ /*
+ * do not enable pixel alpha blending as the color channel
+ * does not have any alpha information
+ */
+ val |= LAYER_COMP_PLANE;
+
+ /* Set layer alpha coefficient to 0xff ie fully opaque */
+ val |= LAYER_ALPHA(0xff);
+ }
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
if (plane->state->crtc) {
@@ -419,6 +497,26 @@ int malidp_de_planes_init(struct drm_device *drm)
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
plane->layer->base + MALIDP_LAYER_COMPOSE);
+
+ /* Attach the YUV->RGB property only to video layers */
+ if (id & (DE_VIDEO1 | DE_VIDEO2)) {
+ /* default encoding for YUV->RGB is BT601 NARROW */
+ enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
+ enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ ret = drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) | \
+ BIT(DRM_COLOR_YCBCR_BT709) | \
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ enc, range);
+ if (!ret)
+ /* program the HW registers */
+ malidp_de_set_color_encoding(plane, enc, range);
+ else
+ DRM_WARN("Failed to create video layer %d color properties\n", id);
+ }
}
kfree(formats);
@@ -426,18 +524,7 @@ int malidp_de_planes_init(struct drm_device *drm)
return 0;
cleanup:
- malidp_de_planes_destroy(drm);
kfree(formats);
return ret;
}
-
-void malidp_de_planes_destroy(struct drm_device *drm)
-{
- struct drm_plane *p, *pt;
-
- list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
- drm_plane_cleanup(p);
- kfree(p);
- }
-}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 2039f85..149024f 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -170,10 +170,7 @@
#define MALIDP500_CONFIG_3D 0x00038
#define MALIDP500_BGND_COLOR 0x0003c
#define MALIDP500_OUTPUT_DEPTH 0x00044
-#define MALIDP500_YUV_RGB_COEF 0x00048
-#define MALIDP500_COLOR_ADJ_COEF 0x00078
-#define MALIDP500_COEF_TABLE_ADDR 0x000a8
-#define MALIDP500_COEF_TABLE_DATA 0x000ac
+#define MALIDP500_COEFFS_BASE 0x00078
/*
* The YUV2RGB coefficients on the DP500 are not in the video layer's register
@@ -181,11 +178,6 @@
* the negative offset.
*/
#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
-/*
- * To match DP550/650, the start of the coeffs registers is
- * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1.
- */
-#define MALIDP500_COEFFS_BASE 0x00078
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
#define MALIDP500_DE_LG1_BASE 0x00200
@@ -213,6 +205,7 @@
#define MALIDP550_DE_BGND_COLOR 0x00044
#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
#define MALIDP550_COEFFS_BASE 0x00050
+#define MALIDP550_LV_YUV2RGB 0x00084
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
#define MALIDP550_DE_LV2_BASE 0x00200
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a0f4d2a..03eeee1 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -20,13 +21,6 @@
#include "armada_hw.h"
#include "armada_trace.h"
-struct armada_frame_work {
- struct armada_plane_work work;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[4];
- struct drm_framebuffer *old_fb;
-};
-
enum csc_mode {
CSC_AUTO = 0,
CSC_YUV_CCIR601 = 1,
@@ -216,6 +210,38 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
return i;
}
+static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work,
+ void (*fn)(struct armada_crtc *, struct armada_plane_work *))
+{
+ struct armada_plane *dplane = drm_to_armada_plane(work->plane);
+ struct drm_pending_vblank_event *event;
+ struct drm_framebuffer *fb;
+
+ if (fn)
+ fn(dcrtc, work);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+
+ event = work->event;
+ fb = work->old_fb;
+ if (event || fb) {
+ struct drm_device *dev = dcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (event)
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ if (fb)
+ __armada_drm_queue_unref_work(dev, fb);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ if (work->need_kfree)
+ kfree(work);
+
+ wake_up(&dplane->frame_wait);
+}
+
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
struct drm_plane *plane)
{
@@ -223,24 +249,19 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
- if (work) {
- work->fn(dcrtc, dplane, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
- }
-
- wake_up(&dplane->frame_wait);
+ if (work)
+ armada_drm_plane_work_call(dcrtc, work, work->fn);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+ struct armada_plane_work *work)
{
+ struct armada_plane *plane = drm_to_armada_plane(work->plane);
int ret;
ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret) {
- DRM_ERROR("failed to acquire vblank counter\n");
+ if (ret)
return ret;
- }
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
if (ret)
@@ -254,51 +275,60 @@ int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
}
-struct armada_plane_work *armada_drm_plane_work_cancel(
- struct armada_crtc *dcrtc, struct armada_plane *plane)
+void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
+ struct armada_plane *dplane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
if (work)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return work;
+ armada_drm_plane_work_call(dcrtc, work, work->cancel);
}
-static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
- struct armada_frame_work *work)
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
+ unsigned long flags;
- return armada_drm_plane_work_queue(dcrtc, plane, &work->work);
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work)
{
- struct armada_frame_work *fwork = container_of(work, struct armada_frame_work, work);
- struct drm_device *dev = dcrtc->crtc.dev;
unsigned long flags;
+ if (dcrtc->plane == work->plane)
+ dcrtc->plane = NULL;
+
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, fwork->regs);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- if (fwork->event) {
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+static struct armada_plane_work *
+armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
+{
+ struct armada_plane_work *work;
+ int i = 0;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
- /* Finally, queue the process-half of the cleanup. */
- __armada_drm_queue_unref_work(dcrtc->crtc.dev, fwork->old_fb);
- kfree(fwork);
+ work->plane = plane;
+ work->fn = armada_drm_crtc_complete_frame_work;
+ work->need_kfree = true;
+ armada_reg_queue_end(work->regs, i);
+
+ return work;
}
static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
struct drm_framebuffer *fb, bool force)
{
- struct armada_frame_work *work;
+ struct armada_plane_work *work;
if (!fb)
return;
@@ -309,15 +339,11 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
return;
}
- work = kmalloc(sizeof(*work), GFP_KERNEL);
+ work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
if (work) {
- int i = 0;
- work->work.fn = armada_drm_crtc_complete_frame_work;
- work->event = NULL;
work->old_fb = fb;
- armada_reg_queue_end(work->regs, i);
- if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+ if (armada_drm_plane_work_queue(dcrtc, work) == 0)
return;
kfree(work);
@@ -380,8 +406,11 @@ static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
* the new mode parameters.
*/
plane = dcrtc->plane;
- if (plane)
+ if (plane) {
drm_plane_force_disable(plane);
+ WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
+ HZ));
+ }
}
/* The mode_config.mutex will be held for this call */
@@ -447,11 +476,11 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
if (ovl_plane)
armada_drm_plane_work_run(dcrtc, ovl_plane);
+ spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
uint32_t val;
@@ -543,18 +572,14 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
+static void armada_drm_gra_plane_regs(struct armada_regs *regs,
+ struct drm_framebuffer *fb, struct armada_plane_state *state,
+ int x, int y, bool interlaced)
{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
- unsigned i;
+ unsigned int i;
u32 ctrl0;
- i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
-
+ i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
@@ -566,9 +591,21 @@ static void armada_drm_primary_set(struct drm_crtc *crtc,
armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_HSMOOTH | CFG_GRA_ENA,
LCD_SPU_DMA_CTRL0);
armada_reg_queue_end(regs, i);
+}
+
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+
+ armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
armada_drm_crtc_update_regs(dcrtc, regs);
}
@@ -588,7 +625,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val = CFG_GRA_ENA;
val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
@@ -640,8 +677,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
if (interlaced ^ dcrtc->interlaced) {
@@ -654,6 +689,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ /* Ensure graphic fifo is enabled */
+ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
@@ -736,46 +774,13 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- u32 sram_para1, dma_ctrl0_mask;
-
- /*
- * Drop our reference on any framebuffer attached to this plane.
- * We don't need to NULL this out as drm_plane_force_disable(),
- * and __setplane_internal() will do so for an overlay plane, and
- * __drm_helper_disable_unused_functions() will do so for the
- * primary plane.
- */
- if (plane->fb)
- drm_framebuffer_put(plane->fb);
-
- /* Power down most RAMs and FIFOs if this is the primary plane */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- dma_ctrl0_mask = CFG_GRA_ENA;
- } else {
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
- dma_ctrl0_mask = CFG_DMA_ENA;
- }
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(0, dma_ctrl0_mask, dcrtc->base + LCD_SPU_DMA_CTRL0);
- spin_unlock_irq(&dcrtc->irq_lock);
-
- armada_updatel(sram_para1, 0, dcrtc->base + LCD_SPU_SRAM_PARA1);
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- armada_drm_crtc_plane_disable(dcrtc, crtc->primary);
+
+ /* Disable our primary plane when we disable the CRTC. */
+ crtc->primary->funcs->disable_plane(crtc->primary, NULL);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
@@ -885,9 +890,11 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
return 0;
}
+ spin_lock_irq(&dcrtc->irq_lock);
para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
dcrtc->base + LCD_SPU_SRAM_PARA1);
+ spin_unlock_irq(&dcrtc->irq_lock);
/*
* Initialize the transparency if the SRAM was powered down.
@@ -1027,7 +1034,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_frame_work *work;
+ struct armada_plane_work *work;
unsigned i;
int ret;
@@ -1035,11 +1042,10 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
if (fb->format != crtc->primary->fb->format)
return -EINVAL;
- work = kmalloc(sizeof(*work), GFP_KERNEL);
+ work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
if (!work)
return -ENOMEM;
- work->work.fn = armada_drm_crtc_complete_frame_work;
work->event = event;
work->old_fb = dcrtc->crtc.primary->fb;
@@ -1053,7 +1059,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
*/
drm_framebuffer_get(fb);
- ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+ ret = armada_drm_plane_work_queue(dcrtc, work);
if (ret) {
/* Undo our reference above */
drm_framebuffer_put(fb);
@@ -1133,14 +1139,196 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
.disable_vblank = armada_drm_crtc_disable_vblank,
};
+static void armada_drm_primary_update_state(struct drm_plane_state *state,
+ struct armada_regs *regs)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(state->plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
+ bool was_disabled;
+ unsigned int idx = 0;
+ u32 val;
+
+ val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
+ if (dfb->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+ if (state->visible)
+ val |= CFG_GRA_ENA;
+ if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
+ val |= CFG_GRA_HSMOOTH;
+
+ was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
+ if (was_disabled)
+ armada_reg_queue_mod(regs, idx,
+ 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+
+ dplane->state.ctrl0 = val;
+ dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
+ drm_rect_width(&state->src) >> 16;
+ dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
+ drm_rect_width(&state->dst);
+ dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
+
+ armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ dcrtc->interlaced);
+
+ dplane->state.vsync_update = !was_disabled;
+ dplane->state.changed = true;
+}
+
+static int armada_drm_primary_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_plane_work *work;
+ struct drm_plane_state state = {
+ .plane = plane,
+ .crtc = crtc,
+ .fb = fb,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .rotation = DRM_MODE_ROTATE_0,
+ };
+ struct drm_crtc_state crtc_state = {
+ .crtc = crtc,
+ .enable = crtc->enabled,
+ .mode = crtc->mode,
+ };
+ int ret;
+
+ ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
+ INT_MAX, true, false);
+ if (ret)
+ return ret;
+
+ work = &dplane->works[dplane->next_work];
+ work->fn = armada_drm_crtc_complete_frame_work;
+
+ if (plane->fb != fb) {
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ work->old_fb = plane->fb;
+ } else {
+ work->old_fb = NULL;
+ }
+
+ armada_drm_primary_update_state(&state, work->regs);
+
+ if (!dplane->state.changed)
+ return 0;
+
+ /* Wait for pending work to complete */
+ if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
+ armada_drm_plane_work_cancel(dcrtc, dplane);
+
+ if (!dplane->state.vsync_update) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ return 0;
+ }
+
+ /* Queue it for update on the next interrupt if we are enabled */
+ ret = armada_drm_plane_work_queue(dcrtc, work);
+ if (ret) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ }
+
+ dplane->next_work = !dplane->next_work;
+
+ return 0;
+}
+
+int armada_drm_plane_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc;
+ struct armada_plane_work *work;
+ unsigned int idx = 0;
+ u32 sram_para1, enable_mask;
+
+ if (!plane->crtc)
+ return 0;
+
+ /*
+ * Arrange to power down most RAMs and FIFOs if this is the primary
+ * plane, otherwise just the YUV FIFOs for the overlay plane.
+ */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
+ enable_mask = CFG_GRA_ENA;
+ } else {
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
+ enable_mask = CFG_DMA_ENA;
+ }
+
+ dplane->state.ctrl0 &= ~enable_mask;
+
+ dcrtc = drm_to_armada_crtc(plane->crtc);
+
+ /*
+ * Try to disable the plane and drop our ref on the framebuffer
+ * at the next frame update. If we fail for any reason, disable
+ * the plane immediately.
+ */
+ work = &dplane->works[dplane->next_work];
+ work->fn = armada_drm_crtc_complete_disable_work;
+ work->cancel = armada_drm_crtc_complete_disable_work;
+ work->old_fb = plane->fb;
+
+ armada_reg_queue_mod(work->regs, idx,
+ 0, enable_mask, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(work->regs, idx,
+ sram_para1, 0, LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_end(work->regs, idx);
+
+ /* Wait for any preceding work to complete, but don't wedge */
+ if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
+ armada_drm_plane_work_cancel(dcrtc, dplane);
+
+ if (armada_drm_plane_work_queue(dcrtc, work)) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ }
+
+ dplane->next_work = !dplane->next_work;
+
+ return 0;
+}
+
static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = drm_primary_helper_update,
- .disable_plane = drm_primary_helper_disable,
+ .update_plane = armada_drm_primary_update,
+ .disable_plane = armada_drm_plane_disable,
.destroy = drm_primary_helper_destroy,
};
int armada_drm_plane_init(struct armada_plane *plane)
{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(plane->works); i++)
+ plane->works[i].plane = &plane->base;
+
init_waitqueue_head(&plane->frame_wait);
return 0;
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bfd3514..445829b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -36,9 +36,13 @@ struct armada_plane;
struct armada_variant;
struct armada_plane_work {
- void (*fn)(struct armada_crtc *,
- struct armada_plane *,
- struct armada_plane_work *);
+ void (*fn)(struct armada_crtc *, struct armada_plane_work *);
+ void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
+ bool need_kfree;
+ struct drm_plane *plane;
+ struct drm_framebuffer *old_fb;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs regs[14];
};
struct armada_plane_state {
@@ -48,11 +52,15 @@ struct armada_plane_state {
u32 dst_hw;
u32 dst_yx;
u32 ctrl0;
+ bool changed;
+ bool vsync_update;
};
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
+ bool next_work;
+ struct armada_plane_work works[2];
struct armada_plane_work *work;
struct armada_plane_state state;
};
@@ -60,10 +68,10 @@ struct armada_plane {
int armada_drm_plane_init(struct armada_plane *plane);
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work);
+ struct armada_plane_work *work);
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-struct armada_plane_work *armada_drm_plane_work_cancel(
- struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
+ struct armada_plane *plane);
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y);
@@ -106,8 +114,8 @@ struct armada_crtc {
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
- struct drm_plane *plane);
+int armada_drm_plane_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
extern struct platform_driver armada_lcd_platform_driver;
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index b064879..cc4c557 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -84,7 +84,6 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
-void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e857b88..4b11b6b 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -54,15 +55,10 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
};
-static void armada_drm_lastclose(struct drm_device *dev)
-{
- armada_fbdev_lastclose(dev);
-}
-
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static struct drm_driver armada_drm_driver = {
- .lastclose = armada_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index a38d5a0..ac92bce 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -154,16 +154,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
return ERR_PTR(ret);
}
-static void armada_output_poll_changed(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct drm_fb_helper *fbh = priv->fbdev;
-
- if (fbh)
- drm_fb_helper_hotplug_event(fbh);
-}
-
const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
.fb_create = armada_fb_create,
- .output_poll_changed = armada_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index a2ce83f..2a59db0 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -159,14 +159,6 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
-void armada_fbdev_lastclose(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-}
-
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index aba9476..c391955 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -32,11 +32,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
- struct drm_framebuffer *old_fb;
- struct {
- struct armada_plane_work work;
- struct armada_regs regs[13];
- } vbl;
struct armada_ovl_plane_properties prop;
};
#define drm_to_armada_ovl_plane(p) \
@@ -67,218 +62,205 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
spin_unlock_irq(&dcrtc->irq_lock);
}
-static void armada_ovl_retire_fb(struct armada_ovl_plane *dplane,
- struct drm_framebuffer *fb)
-{
- struct drm_framebuffer *old_fb;
-
- old_fb = xchg(&dplane->old_fb, fb);
-
- if (old_fb)
- armada_drm_queue_unref_work(dplane->base.base.dev, old_fb);
-}
-
/* === Plane support === */
static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+ struct armada_plane_work *work)
{
- struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ unsigned long flags;
- trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
- armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
- armada_ovl_retire_fb(dplane, NULL);
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
-static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+static void armada_ovl_plane_update_state(struct drm_plane_state *state,
+ struct armada_regs *regs)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
const struct drm_format_info *format;
- struct drm_rect src = {
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- struct drm_rect dest = {
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- const struct drm_rect clip = {
- .x2 = crtc->mode.hdisplay,
- .y2 = crtc->mode.vdisplay,
- };
- uint32_t val, ctrl0;
- unsigned idx = 0;
- bool visible, fb_changed;
- int ret;
-
- trace_armada_ovl_plane_update(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
-
- ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
- DRM_MODE_ROTATE_0,
- 0, INT_MAX, true, false, &visible);
- if (ret)
- return ret;
-
- ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
- CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
- CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+ unsigned int idx = 0;
+ bool fb_changed;
+ u32 val, ctrl0;
+ u16 src_x, src_y;
- /* Does the position/size result in nothing to display? */
- if (!visible)
- ctrl0 &= ~CFG_DMA_ENA;
+ ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
+ if (state->visible)
+ ctrl0 |= CFG_DMA_ENA;
+ if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
+ ctrl0 |= CFG_DMA_HSMOOTH;
/*
* Shifting a YUV packed format image by one pixel causes the U/V
* planes to swap. Compensate for it by also toggling the UV swap.
*/
- format = fb->format;
- if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+ format = dfb->fb.format;
+ if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
- fb_changed = plane->fb != fb ||
- dplane->base.state.src_x != src.x1 >> 16 ||
- dplane->base.state.src_y != src.y1 >> 16;
-
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
- }
-
- /* FIXME: overlay on an interlaced display */
- /* Just updating the position/size? */
- if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
- val = (drm_rect_height(&src) & 0xffff0000) |
- drm_rect_width(&src) >> 16;
- dplane->base.state.src_hw = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
-
- val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->base.state.dst_hw = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
-
- val = dest.y1 << 16 | dest.x1;
- dplane->base.state.dst_yx = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
-
- return 0;
- } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
- armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
- dcrtc->base + LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_mod(regs, idx,
+ 0, CFG_PDWN16x66 | CFG_PDWN32x66,
+ LCD_SPU_SRAM_PARA1);
}
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+ fb_changed = dplane->base.base.fb != &dfb->fb ||
+ dplane->base.state.src_x != state->src.x1 >> 16 ||
+ dplane->base.state.src_y != state->src.y1 >> 16;
+
+ dplane->base.state.vsync_update = fb_changed;
+ /* FIXME: overlay on an interlaced display */
if (fb_changed) {
u32 addrs[3];
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_get(fb);
-
- if (plane->fb)
- armada_ovl_retire_fb(dplane, plane->fb);
+ dplane->base.state.src_y = src_y = state->src.y1 >> 16;
+ dplane->base.state.src_x = src_x = state->src.x1 >> 16;
- dplane->base.state.src_y = src_y = src.y1 >> 16;
- dplane->base.state.src_x = src_x = src.x1 >> 16;
+ armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
- armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
-
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
- val = fb->pitches[0] << 16 | fb->pitches[0];
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_PITCH_YC);
- val = fb->pitches[1] << 16 | fb->pitches[2];
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_PITCH_UV);
}
- val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
+ val = (drm_rect_height(&state->src) & 0xffff0000) |
+ drm_rect_width(&state->src) >> 16;
if (dplane->base.state.src_hw != val) {
dplane->base.state.src_hw = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
- val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
+ val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
if (dplane->base.state.dst_hw != val) {
dplane->base.state.dst_hw = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
- val = dest.y1 << 16 | dest.x1;
+ val = state->dst.y1 << 16 | state->dst.x1;
if (dplane->base.state.dst_yx != val) {
dplane->base.state.dst_yx = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
if (dplane->base.state.ctrl0 != ctrl0) {
dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+ armada_reg_queue_mod(regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
CFG_YUV2RGB) | CFG_DMA_ENA,
LCD_SPU_DMA_CTRL0);
+ dplane->base.state.vsync_update = true;
}
- if (idx) {
- armada_reg_queue_end(dplane->vbl.regs, idx);
- armada_drm_plane_work_queue(dcrtc, &dplane->base,
- &dplane->vbl.work);
- }
- return 0;
+
+ dplane->base.state.changed = idx != 0;
+
+ armada_reg_queue_end(regs, idx);
}
-static int armada_ovl_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+static int
+armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct drm_framebuffer *fb;
- struct armada_crtc *dcrtc;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_plane_work *work;
+ struct drm_plane_state state = {
+ .plane = plane,
+ .crtc = crtc,
+ .fb = fb,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .rotation = DRM_MODE_ROTATE_0,
+ };
+ struct drm_crtc_state crtc_state = {
+ .crtc = crtc,
+ .enable = crtc->enabled,
+ .mode = crtc->mode,
+ };
+ int ret;
- if (!dplane->base.base.crtc)
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
+ ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
+ INT_MAX, true, false);
+ if (ret)
+ return ret;
+
+ work = &dplane->base.works[dplane->base.next_work];
+
+ if (plane->fb != fb) {
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ work->old_fb = plane->fb;
+ } else {
+ work->old_fb = NULL;
+ }
+
+ armada_ovl_plane_update_state(&state, work->regs);
+
+ if (!dplane->base.state.changed)
return 0;
- dcrtc = drm_to_armada_crtc(dplane->base.base.crtc);
+ /* Wait for pending work to complete */
+ if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
+ armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- armada_drm_crtc_plane_disable(dcrtc, plane);
+ /* Just updating the position/size? */
+ if (!dplane->base.state.vsync_update) {
+ armada_ovl_plane_work(dcrtc, work);
+ return 0;
+ }
- dcrtc->plane = NULL;
- dplane->base.state.ctrl0 = 0;
+ if (!dcrtc->plane) {
+ dcrtc->plane = plane;
+ armada_ovl_update_attr(&dplane->prop, dcrtc);
+ }
+
+ /* Queue it for update on the next interrupt if we are enabled */
+ ret = armada_drm_plane_work_queue(dcrtc, work);
+ if (ret)
+ DRM_ERROR("failed to queue plane work: %d\n", ret);
- fb = xchg(&dplane->old_fb, NULL);
- if (fb)
- drm_framebuffer_put(fb);
+ dplane->base.next_work = !dplane->base.next_work;
return 0;
}
@@ -362,7 +344,7 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
.update_plane = armada_ovl_plane_update,
- .disable_plane = armada_ovl_plane_disable,
+ .disable_plane = armada_drm_plane_disable,
.destroy = armada_ovl_plane_destroy,
.set_property = armada_ovl_plane_set_property,
};
@@ -454,7 +436,8 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
return ret;
}
- dplane->vbl.work.fn = armada_ovl_plane_work;
+ dplane->base.works[0].fn = armada_ovl_plane_work;
+ dplane->base.works[1].fn = armada_ovl_plane_work;
ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
&armada_ovl_plane_funcs,
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index 8dbfea7..f03a56b 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -34,14 +34,34 @@ TRACE_EVENT(armada_ovl_plane_update,
__field(struct drm_plane *, plane)
__field(struct drm_crtc *, crtc)
__field(struct drm_framebuffer *, fb)
+ __field(int, crtc_x)
+ __field(int, crtc_y)
+ __field(unsigned int, crtc_w)
+ __field(unsigned int, crtc_h)
+ __field(u32, src_x)
+ __field(u32, src_y)
+ __field(u32, src_w)
+ __field(u32, src_h)
),
TP_fast_assign(
__entry->plane = plane;
__entry->crtc = crtc;
__entry->fb = fb;
+ __entry->crtc_x = crtc_x;
+ __entry->crtc_y = crtc_y;
+ __entry->crtc_w = crtc_w;
+ __entry->crtc_h = crtc_h;
+ __entry->src_x = src_x;
+ __entry->src_y = src_y;
+ __entry->src_w = src_w;
+ __entry->src_h = src_h;
),
- TP_printk("plane %p crtc %p fb %p",
- __entry->plane, __entry->crtc, __entry->fb)
+ TP_printk("plane %p crtc %p fb %p crtc @ (%d,%d, %ux%u) src @ (%u,%u, %ux%u)",
+ __entry->plane, __entry->crtc, __entry->fb,
+ __entry->crtc_x, __entry->crtc_y,
+ __entry->crtc_w, __entry->crtc_h,
+ __entry->src_x >> 16, __entry->src_y >> 16,
+ __entry->src_w >> 16, __entry->src_h >> 16)
);
TRACE_EVENT(armada_ovl_plane_work,
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9555a35..831b733 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ ast_crtc_load_lut(crtc);
}
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 5f4c2e8..d665dd5 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
{0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
{0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
{0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
{0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
{0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
{0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 696a15d..fe354eb 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -199,9 +199,8 @@ static struct ttm_backend_func ast_tt_backend_func = {
};
-static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -209,27 +208,15 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &ast_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
return tt;
}
-static int ast_ttm_tt_populate(struct ttm_tt *ttm)
-{
- return ttm_pool_populate(ttm);
-}
-
-static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_create = ast_ttm_tt_create,
- .ttm_tt_populate = ast_ttm_tt_populate,
- .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = ast_bo_evict_flags,
@@ -237,7 +224,6 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
@@ -335,7 +321,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, acc_size,
NULL, NULL, ast_bo_ttm_destroy);
if (ret)
goto error;
@@ -354,6 +340,7 @@ static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
@@ -365,7 +352,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -377,6 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
int ast_bo_unpin(struct ast_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
@@ -388,11 +376,12 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
}
int ast_bo_push_sysram(struct ast_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
@@ -409,7 +398,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c6e8061..c1ea5c3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -461,13 +461,6 @@ static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(dc->fbdev);
-}
-
struct atmel_hlcdc_dc_commit {
struct work_struct work;
struct drm_device *dev;
@@ -563,7 +556,7 @@ error:
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
- .output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -665,10 +658,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
platform_set_drvdata(pdev, dev);
- dc->fbdev = drm_fbdev_cma_init(dev, 24,
- dev->mode_config.num_connector);
- if (IS_ERR(dc->fbdev))
- dc->fbdev = NULL;
+ drm_fb_cma_fbdev_init(dev, 24, 0);
drm_kms_helper_poll_init(dev);
@@ -688,8 +678,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- if (dc->fbdev)
- drm_fbdev_cma_fini(dc->fbdev);
+ drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -705,13 +694,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
destroy_workqueue(dc->wq);
}
-static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(dc->fbdev);
-}
-
static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -744,7 +726,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = atmel_hlcdc_dc_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 6833ee2..ab32d5b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -32,6 +32,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -374,7 +375,6 @@ struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
struct dma_pool *dscrpool;
struct atmel_hlcdc *hlcdc;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct workqueue_struct *wq;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 703c2d1..e18800e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -194,20 +194,6 @@ static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
return 0;
}
-static bool atmel_hlcdc_format_embeds_alpha(u32 format)
-{
- int i;
-
- for (i = 0; i < sizeof(format); i++) {
- char tmp = (format >> (8 * i)) & 0xff;
-
- if (tmp == 'A')
- return true;
- }
-
- return false;
-}
-
static u32 heo_downscaling_xcoef[] = {
0x11343311,
0x000000f7,
@@ -377,13 +363,13 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
{
unsigned int cfg = ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 | state->ahb_id;
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
- u32 format = state->base.fb->format->format;
+ const struct drm_format_info *format = state->base.fb->format;
/*
* Rotation optimization is not working on RGB888 (rotation is still
* working but without any optimization).
*/
- if (format == DRM_FORMAT_RGB888)
+ if (format->format == DRM_FORMAT_RGB888)
cfg |= ATMEL_HLCDC_LAYER_DMA_ROTDIS;
atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_HLCDC_LAYER_DMA_CFG,
@@ -395,7 +381,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER;
- if (atmel_hlcdc_format_embeds_alpha(format))
+ if (format->has_alpha)
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
cfg |= ATMEL_HLCDC_LAYER_GAEN |
@@ -566,7 +552,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s);
if (!ovl_s->fb ||
- atmel_hlcdc_format_embeds_alpha(ovl_s->fb->format->format) ||
+ ovl_s->fb->format->has_alpha ||
ovl_state->alpha != 255)
continue;
@@ -769,7 +755,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
(!desc->layout.memsize ||
- atmel_hlcdc_format_embeds_alpha(state->base.fb->format->format)))
+ state->base.fb->format->has_alpha))
return -EINVAL;
if (state->crtc_x < 0 || state->crtc_y < 0)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index c4cadb6..39cd084 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -176,10 +176,8 @@ static struct ttm_backend_func bochs_tt_backend_func = {
.destroy = &bochs_ttm_backend_destroy,
};
-static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -187,17 +185,15 @@ static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &bochs_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
return tt;
}
-struct ttm_bo_driver bochs_bo_driver = {
+static struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_create = bochs_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bochs_bo_evict_flags,
@@ -205,7 +201,6 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
@@ -283,6 +278,7 @@ static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
@@ -295,7 +291,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -307,6 +303,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
int bochs_bo_unpin(struct bochs_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
@@ -320,7 +317,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -370,7 +367,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
ttm_bo_type_device, &bochsbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, acc_size,
NULL, NULL, bochs_bo_ttm_destroy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3b99d5a..3aa65bd 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -30,7 +30,8 @@ config DRM_DUMB_VGA_DAC
depends on OF
select DRM_KMS_HELPER
help
- Support for RGB to VGA DAC based bridges
+ Support for non-programmable RGB to VGA DAC bridges, such as ADI
+ ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 9385eb0..b490438 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -977,8 +977,6 @@ static int anx78xx_get_modes(struct drm_connector *connector)
}
num_modes = drm_add_edid_modes(connector, anx78xx->edid);
- /* Store the ELD */
- drm_edid_to_eld(connector, anx78xx->edid);
unlock:
mutex_unlock(&anx78xx->lock);
@@ -1303,8 +1301,7 @@ static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++)
- if (anx78xx->i2c_dummy[i])
- i2c_unregister_device(anx78xx->i2c_dummy[i]);
+ i2c_unregister_device(anx78xx->i2c_dummy[i]);
}
static const struct regmap_config anx78xx_regmap_config = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index a890504..5c52307 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -35,6 +36,8 @@
#define to_dp(nm) container_of(nm, struct analogix_dp_device, nm)
+static const bool verify_fast_training;
+
struct bridge_init {
struct i2c_client *client;
struct device_node *node;
@@ -98,20 +101,18 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
return 0;
}
-int analogix_dp_psr_supported(struct device *dev)
+int analogix_dp_psr_enabled(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
- return dp->psr_support;
+ return dp->psr_enable;
}
-EXPORT_SYMBOL_GPL(analogix_dp_psr_supported);
+EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
-int analogix_dp_enable_psr(struct device *dev)
+int analogix_dp_enable_psr(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
struct edp_vsc_psr psr_vsc;
- if (!dp->psr_support)
+ if (!dp->psr_enable)
return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
@@ -124,18 +125,16 @@ int analogix_dp_enable_psr(struct device *dev)
psr_vsc.DB0 = 0;
psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
- analogix_dp_send_psr_spd(dp, &psr_vsc);
- return 0;
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
-int analogix_dp_disable_psr(struct device *dev)
+int analogix_dp_disable_psr(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
struct edp_vsc_psr psr_vsc;
int ret;
- if (!dp->psr_support)
+ if (!dp->psr_enable)
return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
@@ -152,8 +151,7 @@ int analogix_dp_disable_psr(struct device *dev)
if (ret != 1)
dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
- analogix_dp_send_psr_spd(dp, &psr_vsc);
- return 0;
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
}
EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
@@ -533,7 +531,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
{
int lane, lane_count, retval;
u32 reg;
- u8 link_align, link_status[2], adjust_request[2];
+ u8 link_align, link_status[2], adjust_request[2], spread;
usleep_range(400, 401);
@@ -576,6 +574,20 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
dev_dbg(dp->dev, "final lane count = %.2x\n",
dp->link_train.lane_count);
+ retval = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD,
+ &spread);
+ if (retval != 1) {
+ dev_err(dp->dev, "failed to read downspread %d\n",
+ retval);
+ dp->fast_train_support = false;
+ } else {
+ dp->fast_train_support =
+ (spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
+ true : false;
+ }
+ dev_dbg(dp->dev, "fast link training %s\n",
+ dp->fast_train_support ? "supported" : "unsupported");
+
/* set enhanced mode if available */
analogix_dp_set_enhanced_mode(dp);
dp->link_train.lt_state = FINISHED;
@@ -632,10 +644,12 @@ static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
*lane_count = DPCD_MAX_LANE_COUNT(data);
}
-static void analogix_dp_init_training(struct analogix_dp_device *dp,
- enum link_lane_count_type max_lane,
- int max_rate)
+static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
+ u32 max_lanes, u32 max_rate)
{
+ int retval = 0;
+ bool training_finished = false;
+
/*
* MACRO_RST must be applied after the PLL_LOCK to avoid
* the DP inter pair skew issue for at least 10 us
@@ -661,18 +675,13 @@ static void analogix_dp_init_training(struct analogix_dp_device *dp,
}
/* Setup TX lane count & rate */
- if (dp->link_train.lane_count > max_lane)
- dp->link_train.lane_count = max_lane;
+ if (dp->link_train.lane_count > max_lanes)
+ dp->link_train.lane_count = max_lanes;
if (dp->link_train.link_rate > max_rate)
dp->link_train.link_rate = max_rate;
/* All DP analog module power up */
analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
-}
-
-static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
-{
- int retval = 0, training_finished = 0;
dp->link_train.lt_state = START;
@@ -707,27 +716,92 @@ static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
return retval;
}
-static int analogix_dp_set_link_train(struct analogix_dp_device *dp,
- u32 count, u32 bwtype)
+static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
{
- int i;
- int retval;
+ int i, ret;
+ u8 link_align, link_status[2];
+ enum pll_status status;
- for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
- analogix_dp_init_training(dp, count, bwtype);
- retval = analogix_dp_sw_link_training(dp);
- if (retval == 0)
- break;
+ analogix_dp_reset_macro(dp);
+
+ analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
+ analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
- usleep_range(100, 110);
+ for (i = 0; i < dp->link_train.lane_count; i++) {
+ analogix_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[i], i);
}
- return retval;
+ ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status,
+ status != PLL_UNLOCKED, 120,
+ 120 * DP_TIMEOUT_LOOP_COUNT);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret);
+ return ret;
+ }
+
+ /* source Set training pattern 1 */
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
+ /* From DP spec, pattern must be on-screen for a minimum 500us */
+ usleep_range(500, 600);
+
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
+ /* From DP spec, pattern must be on-screen for a minimum 500us */
+ usleep_range(500, 600);
+
+ /* TODO: enhanced_mode?*/
+ analogix_dp_set_training_pattern(dp, DP_NONE);
+
+ /*
+ * Useful for debugging issues with fast link training, disable for more
+ * speed
+ */
+ if (verify_fast_training) {
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+ &link_align);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Read align status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status,
+ 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Read link status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ if (analogix_dp_clock_recovery_ok(link_status,
+ dp->link_train.lane_count)) {
+ DRM_DEV_ERROR(dp->dev, "Clock recovery failed\n");
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ if (analogix_dp_channel_eq_ok(link_status, link_align,
+ dp->link_train.lane_count)) {
+ DRM_DEV_ERROR(dp->dev, "Channel EQ failed\n");
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int analogix_dp_train_link(struct analogix_dp_device *dp)
+{
+ if (dp->fast_train_support)
+ return analogix_dp_fast_link_train(dp);
+
+ return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count,
+ dp->video_info.max_link_rate);
}
static int analogix_dp_config_video(struct analogix_dp_device *dp)
{
- int retval = 0;
int timeout_loop = 0;
int done_count = 0;
@@ -783,10 +857,7 @@ static int analogix_dp_config_video(struct analogix_dp_device *dp)
usleep_range(1000, 1001);
}
- if (retval != 0)
- dev_err(dp->dev, "Video stream is not detected!\n");
-
- return retval;
+ return 0;
}
static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
@@ -855,10 +926,10 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
DRM_ERROR("failed to disable the panel\n");
}
- ret = analogix_dp_set_link_train(dp, dp->video_info.max_lane_count,
- dp->video_info.max_link_rate);
+ ret = readx_poll_timeout(analogix_dp_train_link, dp, ret, !ret, 100,
+ DP_TIMEOUT_TRAINING_US * 5);
if (ret) {
- dev_err(dp->dev, "unable to do link train\n");
+ dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
return;
}
@@ -880,8 +951,8 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
/* Enable video */
analogix_dp_start_video(dp);
- dp->psr_support = analogix_dp_detect_sink_psr(dp);
- if (dp->psr_support)
+ dp->psr_enable = analogix_dp_detect_sink_psr(dp);
+ if (dp->psr_enable)
analogix_dp_enable_sink_psr(dp);
}
@@ -1019,27 +1090,30 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
- struct drm_connector *connector = &dp->connector;
- int ret;
+ struct drm_connector *connector = NULL;
+ int ret = 0;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (!dp->plat_data->skip_connector) {
+ connector = &dp->connector;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(dp->drm_dev, connector,
- &analogix_dp_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
+ ret = drm_connector_init(dp->drm_dev, connector,
+ &analogix_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
- drm_connector_helper_add(connector,
- &analogix_dp_connector_helper_funcs);
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_helper_add(connector,
+ &analogix_dp_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
/*
* NOTE: the connector registration is implemented in analogix
@@ -1123,6 +1197,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ dp->psr_enable = false;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
@@ -1283,8 +1358,9 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
return analogix_dp_transfer(dp, msg);
}
-int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data)
+struct analogix_dp_device *
+analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+ struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
@@ -1294,14 +1370,12 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
if (!plat_data) {
dev_err(dev, "Invalided input plat_data\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
if (!dp)
- return -ENOMEM;
-
- dev_set_drvdata(dev, dp);
+ return ERR_PTR(-ENOMEM);
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1318,7 +1392,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
ret = analogix_dp_dt_parse_pdata(dp);
if (ret)
- return ret;
+ return ERR_PTR(ret);
dp->phy = devm_phy_get(dp->dev, "dp");
if (IS_ERR(dp->phy)) {
@@ -1332,14 +1406,14 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
if (ret == -ENOSYS || ret == -ENODEV)
dp->phy = NULL;
else
- return ret;
+ return ERR_PTR(ret);
}
}
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
- return PTR_ERR(dp->clock);
+ return ERR_CAST(dp->clock);
}
clk_prepare_enable(dp->clock);
@@ -1348,7 +1422,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dp->reg_base))
- return PTR_ERR(dp->reg_base);
+ return ERR_CAST(dp->reg_base);
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1369,7 +1443,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
"hpd_gpio");
if (ret) {
dev_err(&pdev->dev, "failed to get hpd gpio\n");
- return ret;
+ return ERR_PTR(ret);
}
dp->irq = gpio_to_irq(dp->hpd_gpio);
irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -1381,16 +1455,9 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- pm_runtime_enable(dev);
-
- pm_runtime_get_sync(dev);
- phy_power_on(dp->phy);
-
- analogix_dp_init_dp(dp);
-
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
analogix_dp_hardirq,
analogix_dp_irq_thread,
@@ -1410,38 +1477,30 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
ret = drm_dp_aux_register(&dp->aux);
if (ret)
- goto err_disable_pm_runtime;
+ return ERR_PTR(ret);
+
+ pm_runtime_enable(dev);
ret = analogix_dp_create_bridge(drm_dev, dp);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
- drm_encoder_cleanup(dp->encoder);
goto err_disable_pm_runtime;
}
- phy_power_off(dp->phy);
- pm_runtime_put(dev);
-
- return 0;
+ return dp;
err_disable_pm_runtime:
- phy_power_off(dp->phy);
- pm_runtime_put(dev);
pm_runtime_disable(dev);
- return ret;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(analogix_dp_bind);
-void analogix_dp_unbind(struct device *dev, struct device *master,
- void *data)
+void analogix_dp_unbind(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
-
analogix_dp_bridge_disable(dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- dp->encoder->funcs->destroy(dp->encoder);
if (dp->plat_data->panel) {
if (drm_panel_unprepare(dp->plat_data->panel))
@@ -1451,16 +1510,14 @@ void analogix_dp_unbind(struct device *dev, struct device *master,
}
drm_dp_aux_unregister(&dp->aux);
- pm_runtime_disable(dev);
+ pm_runtime_disable(dp->dev);
clk_disable_unprepare(dp->clock);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
#ifdef CONFIG_PM
-int analogix_dp_suspend(struct device *dev)
+int analogix_dp_suspend(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
-
clk_disable_unprepare(dp->clock);
if (dp->plat_data->panel) {
@@ -1472,9 +1529,8 @@ int analogix_dp_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
-int analogix_dp_resume(struct device *dev)
+int analogix_dp_resume(struct analogix_dp_device *dp)
{
- struct analogix_dp_device *dp = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(dp->clock);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 5c6a288..6a96ef7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -20,6 +20,10 @@
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
+/* Training takes 22ms if AUX channel comm fails. Use this as retry interval */
+#define DP_TIMEOUT_TRAINING_US 22000
+#define DP_TIMEOUT_PSR_LOOP_MS 300
+
/* DP_MAX_LANE_COUNT */
#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
@@ -168,7 +172,8 @@ struct analogix_dp_device {
int dpms_mode;
int hpd_gpio;
bool force_hpd;
- bool psr_support;
+ bool psr_enable;
+ bool fast_train_support;
struct mutex panel_lock;
bool panel_is_modeset;
@@ -247,8 +252,8 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
-void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc);
+int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc, bool blocking);
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
struct drm_dp_aux_msg *msg);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 303083a..9df2f3e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -10,10 +10,11 @@
* option) any later version.
*/
-#include <linux/device.h>
-#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
#include <drm/bridge/analogix_dp.h>
@@ -992,10 +993,25 @@ void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
}
-void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc)
+static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp)
+{
+ ssize_t val;
+ u8 status;
+
+ val = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &status);
+ if (val < 0) {
+ dev_err(dp->dev, "PSR_STATUS read failed ret=%zd", val);
+ return val;
+ }
+ return status;
+}
+
+int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc, bool blocking)
{
unsigned int val;
+ int ret;
+ ssize_t psr_status;
/* don't send info frame */
val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
@@ -1036,6 +1052,20 @@ void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
val |= IF_EN;
writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+ if (!blocking)
+ return 0;
+
+ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
+ psr_status >= 0 &&
+ ((vsc->DB1 && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
+ (!vsc->DB1 && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
+ DP_TIMEOUT_PSR_LOOP_MS * 1000);
+ if (ret) {
+ dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
+ return ret;
+ }
+ return 0;
}
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index de5e7de..498d594 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
@@ -204,6 +205,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
vga->bridge.funcs = &dumb_vga_bridge_funcs;
vga->bridge.of_node = pdev->dev.of_node;
+ vga->bridge.timings = of_device_get_match_data(&pdev->dev);
drm_bridge_add(&vga->bridge);
@@ -222,10 +224,61 @@ static int dumb_vga_remove(struct platform_device *pdev)
return 0;
}
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_dac_timings = {
+ /* Timing specifications, datasheet page 7 */
+ .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .setup_time_ps = 500,
+ .hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_dac_timings = {
+ /* From timing diagram, datasheet page 9 */
+ .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ /* From datasheet, page 12 */
+ .setup_time_ps = 3000,
+ /* I guess this means latched input */
+ .hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_dac_timings = {
+ /* From timing diagram, datasheet page 14 */
+ .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ /* From datasheet, page 16 */
+ .setup_time_ps = 2000,
+ .hold_time_ps = 500,
+};
+
static const struct of_device_id dumb_vga_match[] = {
- { .compatible = "dumb-vga-dac" },
- { .compatible = "adi,adv7123" },
- { .compatible = "ti,ths8135" },
+ {
+ .compatible = "dumb-vga-dac",
+ .data = NULL,
+ },
+ {
+ .compatible = "adi,adv7123",
+ .data = &default_dac_timings,
+ },
+ {
+ .compatible = "ti,ths8135",
+ .data = &ti_ths8135_dac_timings,
+ },
+ {
+ .compatible = "ti,ths8134",
+ .data = &ti_ths8134_dac_timings,
+ },
{},
};
MODULE_DEVICE_TABLE(of, dumb_vga_match);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b1ab4ab..60373d7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector)
struct sii902x *sii902x = connector_to_sii902x(connector);
struct regmap *regmap = sii902x->regmap;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct device *dev = &sii902x->i2c->dev;
unsigned long timeout;
+ unsigned int retries;
unsigned int status;
struct edid *edid;
int num = 0;
@@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
time_before(jiffies, timeout));
if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n");
+ dev_err(dev, "failed to acquire the i2c bus\n");
return -ETIMEDOUT;
}
@@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ /*
+ * Sometimes the I2C bus can stall after failure to use the
+ * EDID channel. Retry a few times to see if things clear
+ * up, else continue anyway.
+ */
+ retries = 5;
+ do {
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
+ &status);
+ retries--;
+ } while (ret && retries);
if (ret)
- return ret;
+ dev_err(dev, "failed to read status (%d)\n", ret);
ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
SII902X_SYS_CTRL_DDC_BUS_REQ |
@@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n");
+ dev_err(dev, "failed to release the i2c bus\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index b7eb704..7ab3604 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/extcon.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -25,6 +26,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -81,6 +83,10 @@ struct sii8620 {
struct edid *edid;
unsigned int gen2_write_burst:1;
enum sii8620_mt_state mt_state;
+ struct extcon_dev *extcon;
+ struct notifier_block extcon_nb;
+ struct work_struct extcon_wq;
+ int cable_state;
struct list_head mt_queue;
struct {
int r_size;
@@ -1169,8 +1175,18 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
sii8620_write_buf(ctx, REG_TPI_INFO_B0, buf, ret);
}
-static void sii8620_start_hdmi(struct sii8620 *ctx)
+static void sii8620_start_video(struct sii8620 *ctx)
{
+ if (!sii8620_is_mhl3(ctx))
+ sii8620_stop_video(ctx);
+
+ if (ctx->sink_type == SINK_DVI && !sii8620_is_mhl3(ctx)) {
+ sii8620_write(ctx, REG_RX_HDMI_CTRL2,
+ VAL_RX_HDMI_CTRL2_DEFVAL);
+ sii8620_write(ctx, REG_TPI_SC, 0);
+ return;
+ }
+
sii8620_write_seq_static(ctx,
REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL
| BIT_RX_HDMI_CTRL2_USE_AV_MUTE,
@@ -1229,21 +1245,6 @@ static void sii8620_start_hdmi(struct sii8620 *ctx)
sii8620_set_infoframes(ctx);
}
-static void sii8620_start_video(struct sii8620 *ctx)
-{
- if (!sii8620_is_mhl3(ctx))
- sii8620_stop_video(ctx);
-
- switch (ctx->sink_type) {
- case SINK_HDMI:
- sii8620_start_hdmi(ctx);
- break;
- case SINK_DVI:
- default:
- break;
- }
-}
-
static void sii8620_disable_hpd(struct sii8620 *ctx)
{
sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0);
@@ -1945,8 +1946,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
if (stat & BIT_INTR_SCDT_CHANGE) {
u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
- if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
- sii8620_scdt_high(ctx);
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
+ if (ctx->sink_type == SINK_HDMI)
+ /* enable infoframe interrupt */
+ sii8620_scdt_high(ctx);
+ else
+ sii8620_start_video(ctx);
+ }
}
sii8620_write(ctx, REG_INTR5, stat);
@@ -2170,6 +2176,77 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
ctx->rc_dev = rc_dev;
}
+static void sii8620_cable_out(struct sii8620 *ctx)
+{
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ sii8620_hw_off(ctx);
+}
+
+static void sii8620_extcon_work(struct work_struct *work)
+{
+ struct sii8620 *ctx =
+ container_of(work, struct sii8620, extcon_wq);
+ int state = extcon_get_state(ctx->extcon, EXTCON_DISP_MHL);
+
+ if (state == ctx->cable_state)
+ return;
+
+ ctx->cable_state = state;
+
+ if (state > 0)
+ sii8620_cable_in(ctx);
+ else
+ sii8620_cable_out(ctx);
+}
+
+static int sii8620_extcon_notifier(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct sii8620 *ctx =
+ container_of(self, struct sii8620, extcon_nb);
+
+ schedule_work(&ctx->extcon_wq);
+
+ return NOTIFY_DONE;
+}
+
+static int sii8620_extcon_init(struct sii8620 *ctx)
+{
+ struct extcon_dev *edev;
+ struct device_node *musb, *muic;
+ int ret;
+
+ /* get micro-USB connector node */
+ musb = of_graph_get_remote_node(ctx->dev->of_node, 1, -1);
+ /* next get micro-USB Interface Controller node */
+ muic = of_get_next_parent(musb);
+
+ if (!muic) {
+ dev_info(ctx->dev, "no extcon found, switching to 'always on' mode\n");
+ return 0;
+ }
+
+ edev = extcon_find_edev_by_node(muic);
+ of_node_put(muic);
+ if (IS_ERR(edev)) {
+ if (PTR_ERR(edev) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(ctx->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(edev);
+ }
+
+ ctx->extcon = edev;
+ ctx->extcon_nb.notifier_call = sii8620_extcon_notifier;
+ INIT_WORK(&ctx->extcon_wq, sii8620_extcon_work);
+ ret = extcon_register_notifier(edev, EXTCON_DISP_MHL, &ctx->extcon_nb);
+ if (ret) {
+ dev_err(ctx->dev, "failed to register notifier for MHL\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii8620, bridge);
@@ -2191,6 +2268,19 @@ static void sii8620_detach(struct drm_bridge *bridge)
rc_unregister_device(ctx->rc_dev);
}
+static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
+ MHL_DCAP_VID_LINK_PPIXEL;
+ unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
+ MHL1_MAX_LCLK;
+ max_pclk /= can_pack ? 2 : 3;
+
+ return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+}
+
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2220,8 +2310,9 @@ end:
union hdmi_infoframe frm;
u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
+ /* FIXME: We need the connector here */
drm_hdmi_vendor_infoframe_from_display_mode(
- &frm.vendor.hdmi, adjusted_mode);
+ &frm.vendor.hdmi, NULL, adjusted_mode);
vic = frm.vendor.hdmi.vic;
if (vic >= ARRAY_SIZE(mhl_vic))
vic = 0;
@@ -2238,6 +2329,7 @@ static const struct drm_bridge_funcs sii8620_bridge_funcs = {
.attach = sii8620_attach,
.detach = sii8620_detach,
.mode_fixup = sii8620_mode_fixup,
+ .mode_valid = sii8620_mode_valid,
};
static int sii8620_probe(struct i2c_client *client,
@@ -2287,13 +2379,20 @@ static int sii8620_probe(struct i2c_client *client,
if (ret)
return ret;
+ ret = sii8620_extcon_init(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "failed to initialize EXTCON\n");
+ return ret;
+ }
+
i2c_set_clientdata(client, ctx);
ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
- sii8620_cable_in(ctx);
+ if (!ctx->extcon)
+ sii8620_cable_in(ctx);
return 0;
}
@@ -2302,8 +2401,15 @@ static int sii8620_remove(struct i2c_client *client)
{
struct sii8620 *ctx = i2c_get_clientdata(client);
- disable_irq(to_i2c_client(ctx->dev)->irq);
- sii8620_hw_off(ctx);
+ if (ctx->extcon) {
+ extcon_unregister_notifier(ctx->extcon, EXTCON_DISP_MHL,
+ &ctx->extcon_nb);
+ flush_work(&ctx->extcon_wq);
+ if (ctx->cable_state > 0)
+ sii8620_cable_out(ctx);
+ } else {
+ sii8620_cable_out(ctx);
+ }
drm_bridge_remove(&ctx->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index b72259b..ec8d000 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -147,7 +147,6 @@ struct dw_hdmi {
int vic;
u8 edid[HDMI_EDID_LEN];
- bool cable_plugin;
struct {
const struct dw_hdmi_phy_ops *ops;
@@ -1037,19 +1036,21 @@ static void dw_hdmi_phy_enable_svsret(struct dw_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SVSRET_MASK);
}
-static void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable)
+void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_pddq);
-static void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable)
+void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable)
{
hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_txpwron);
static void dw_hdmi_phy_sel_data_en_pol(struct dw_hdmi *hdmi, u8 enable)
{
@@ -1065,6 +1066,22 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
+void dw_hdmi_phy_reset(struct dw_hdmi *hdmi)
+{
+ /* PHY reset. The reset signal is active high on Gen2 PHYs. */
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_reset);
+
+void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address)
+{
+ hdmi_phy_test_clear(hdmi, 1);
+ hdmi_writeb(hdmi, address, HDMI_PHY_I2CM_SLAVE_ADDR);
+ hdmi_phy_test_clear(hdmi, 0);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_set_addr);
+
static void dw_hdmi_phy_power_off(struct dw_hdmi *hdmi)
{
const struct dw_hdmi_phy_data *phy = hdmi->phy.data;
@@ -1203,16 +1220,11 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi)
if (phy->has_svsret)
dw_hdmi_phy_enable_svsret(hdmi, 1);
- /* PHY reset. The reset signal is active high on Gen2 PHYs. */
- hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
- hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
+ dw_hdmi_phy_reset(hdmi);
hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
- hdmi_phy_test_clear(hdmi, 1);
- hdmi_writeb(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
- HDMI_PHY_I2CM_SLAVE_ADDR);
- hdmi_phy_test_clear(hdmi, 0);
+ dw_hdmi_phy_i2c_set_addr(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2);
/* Write to the PHY as configured by the platform */
if (pdata->configure_phy)
@@ -1251,15 +1263,16 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
dw_hdmi_phy_power_off(hdmi);
}
-static enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
- void *data)
+enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
+ void *data)
{
return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
connector_status_connected : connector_status_disconnected;
}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_read_hpd);
-static void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
- bool force, bool disabled, bool rxsense)
+void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense)
{
u8 old_mask = hdmi->phy_mask;
@@ -1271,8 +1284,9 @@ static void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
if (old_mask != hdmi->phy_mask)
hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_update_hpd);
-static void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
+void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
{
/*
* Configure the PHY RX SENSE and HPD interrupts polarities and clear
@@ -1291,6 +1305,7 @@ static void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
HDMI_IH_MUTE_PHY_STAT0);
}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_setup_hpd);
static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = {
.init = dw_hdmi_phy_init,
@@ -1438,7 +1453,9 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
u8 buffer[10];
ssize_t err;
- err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
+ &hdmi->connector,
+ mode);
if (err < 0)
/*
* Going into that statement does not means vendor infoframe
@@ -1632,9 +1649,12 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
* then write one of the FC registers several times.
*
* The number of iterations matters and depends on the HDMI TX revision
- * (and possibly on the platform). So far only i.MX6Q (v1.30a) and
- * i.MX6DL (v1.31a) have been identified as needing the workaround, with
- * 4 and 1 iterations respectively.
+ * (and possibly on the platform). So far i.MX6Q (v1.30a), i.MX6DL
+ * (v1.31a) and multiple Allwinner SoCs (v1.32a) have been identified
+ * as needing the workaround, with 4 iterations for v1.30a and 1
+ * iteration for others.
+ * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
+ * the workaround with a single iteration.
*/
switch (hdmi->version) {
@@ -1642,6 +1662,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
count = 4;
break;
case 0x131a:
+ case 0x132a:
+ case 0x201a:
count = 1;
break;
default:
@@ -1656,12 +1678,6 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
}
-static void hdmi_enable_overflow_interrupts(struct dw_hdmi *hdmi)
-{
- hdmi_writeb(hdmi, 0, HDMI_FC_MASK2);
- hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2);
-}
-
static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
@@ -1751,8 +1767,6 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi_tx_hdcp_config(hdmi);
dw_hdmi_clear_overflow(hdmi);
- if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
- hdmi_enable_overflow_interrupts(hdmi);
return 0;
}
@@ -1911,8 +1925,6 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
- /* Store the ELD */
- drm_edid_to_eld(connector, edid);
kfree(edid);
} else {
dev_dbg(hdmi->dev, "failed to get edid\n");
@@ -2525,8 +2537,6 @@ __dw_hdmi_probe(struct platform_device *pdev,
if (hdmi->i2c)
dw_hdmi_i2c_init(hdmi);
- platform_set_drvdata(pdev, hdmi);
-
return hdmi;
err_iahb:
@@ -2576,25 +2586,23 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
/* -----------------------------------------------------------------------------
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
-int dw_hdmi_probe(struct platform_device *pdev,
- const struct dw_hdmi_plat_data *plat_data)
+struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
+ const struct dw_hdmi_plat_data *plat_data)
{
struct dw_hdmi *hdmi;
hdmi = __dw_hdmi_probe(pdev, plat_data);
if (IS_ERR(hdmi))
- return PTR_ERR(hdmi);
+ return hdmi;
drm_bridge_add(&hdmi->bridge);
- return 0;
+ return hdmi;
}
EXPORT_SYMBOL_GPL(dw_hdmi_probe);
-void dw_hdmi_remove(struct platform_device *pdev)
+void dw_hdmi_remove(struct dw_hdmi *hdmi)
{
- struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
-
drm_bridge_remove(&hdmi->bridge);
__dw_hdmi_remove(hdmi);
@@ -2604,31 +2612,30 @@ EXPORT_SYMBOL_GPL(dw_hdmi_remove);
/* -----------------------------------------------------------------------------
* Bind/unbind API, used from platforms based on the component framework.
*/
-int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_hdmi_plat_data *plat_data)
+struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_plat_data *plat_data)
{
struct dw_hdmi *hdmi;
int ret;
hdmi = __dw_hdmi_probe(pdev, plat_data);
if (IS_ERR(hdmi))
- return PTR_ERR(hdmi);
+ return hdmi;
ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
if (ret) {
- dw_hdmi_remove(pdev);
+ dw_hdmi_remove(hdmi);
DRM_ERROR("Failed to initialize bridge with drm\n");
- return ret;
+ return ERR_PTR(ret);
}
- return 0;
+ return hdmi;
}
EXPORT_SYMBOL_GPL(dw_hdmi_bind);
-void dw_hdmi_unbind(struct device *dev)
+void dw_hdmi_unbind(struct dw_hdmi *hdmi)
{
- struct dw_hdmi *hdmi = dev_get_drvdata(dev);
-
__dw_hdmi_remove(hdmi);
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index d9cca4f..226171a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -29,7 +29,10 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <video/mipi_display.h>
+#define HWVER_131 0x31333100 /* IP version 1.31 */
+
#define DSI_VERSION 0x00
+#define VERSION GENMASK(31, 8)
#define DSI_PWR_UP 0x04
#define RESET 0
@@ -136,10 +139,6 @@
GEN_SW_0P_TX_LP)
#define DSI_GEN_HDR 0x6c
-/* TODO These 2 defines will be reworked thanks to mipi_dsi_create_packet() */
-#define GEN_HDATA(data) (((data) & 0xffff) << 8)
-#define GEN_HTYPE(type) (((type) & 0xff) << 0)
-
#define DSI_GEN_PLD_DATA 0x70
#define DSI_CMD_PKT_STATUS 0x74
@@ -169,11 +168,12 @@
#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
-/* TODO Next register is slightly different between 1.30 & 1.31 IP version */
#define DSI_PHY_TMR_CFG 0x9c
#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
+#define PHY_HS2LP_TIME_V131(lbcc) (((lbcc) & 0x3ff) << 16)
+#define PHY_LP2HS_TIME_V131(lbcc) ((lbcc) & 0x3ff)
#define DSI_PHY_RSTZ 0xa0
#define PHY_DISFORCEPLL 0
@@ -212,7 +212,9 @@
#define DSI_INT_ST1 0xc0
#define DSI_INT_MSK0 0xc4
#define DSI_INT_MSK1 0xc8
+
#define DSI_PHY_TMR_RD_CFG 0xf4
+#define MAX_RD_TIME_V131(lbcc) ((lbcc) & 0x7fff)
#define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000
@@ -340,7 +342,7 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
val, !(val & GEN_CMD_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
+ if (ret) {
dev_err(dsi->dev, "failed to get available command FIFO\n");
return ret;
}
@@ -351,7 +353,7 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
val, (val & mask) == mask,
1000, CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
+ if (ret) {
dev_err(dsi->dev, "failed to write command FIFO\n");
return ret;
}
@@ -359,52 +361,23 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
return 0;
}
-static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
+static int dw_mipi_dsi_write(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_packet *packet)
{
- const u8 *tx_buf = msg->tx_buf;
- u16 data = 0;
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+ __le32 word;
u32 val;
- if (msg->tx_len > 0)
- data |= tx_buf[0];
- if (msg->tx_len > 1)
- data |= tx_buf[1] << 8;
-
- if (msg->tx_len > 2) {
- dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
-}
-
-static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- const u8 *tx_buf = msg->tx_buf;
- int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
- u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
- u32 remainder;
- u32 val;
-
- if (msg->tx_len < 3) {
- dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- while (DIV_ROUND_UP(len, pld_data_bytes)) {
+ while (len) {
if (len < pld_data_bytes) {
- remainder = 0;
- memcpy(&remainder, tx_buf, len);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word));
len = 0;
} else {
- memcpy(&remainder, tx_buf, pld_data_bytes);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ memcpy(&word, tx_buf, pld_data_bytes);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word));
tx_buf += pld_data_bytes;
len -= pld_data_bytes;
}
@@ -412,47 +385,81 @@ static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
val, !(val & GEN_PLD_W_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
+ if (ret) {
dev_err(dsi->dev,
"failed to get available write payload FIFO\n");
return ret;
}
}
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return dw_mipi_dsi_gen_pkt_hdr_write(dsi, le32_to_cpu(word));
+}
+
+static int dw_mipi_dsi_read(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ int i, j, ret, len = msg->rx_len;
+ u8 *buf = msg->rx_buf;
+ u32 val;
+
+ /* Wait end of the read operation */
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_RD_CMD_BUSY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(dsi->dev, "Timeout during read operation\n");
+ return ret;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ /* Read fifo must not be empty before all bytes are read */
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_R_EMPTY),
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(dsi->dev, "Read payload FIFO is empty\n");
+ return ret;
+ }
+
+ val = dsi_read(dsi, DSI_GEN_PLD_DATA);
+ for (j = 0; j < 4 && j + i < len; j++)
+ buf[i + j] = val >> (8 * j);
+ }
+
+ return ret;
}
static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
- int ret;
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(dsi->dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
- /*
- * TODO dw drv improvements
- * use mipi_dsi_create_packet() instead of all following
- * functions and code (no switch cases, no
- * dw_mipi_dsi_dcs_short_write(), only the loop in long_write...)
- * and use packet.header...
- */
dw_mipi_message_config(dsi, msg);
- switch (msg->type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
- ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
- break;
- case MIPI_DSI_DCS_LONG_WRITE:
- ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
- break;
- default:
- dev_err(dsi->dev, "unsupported message type 0x%02x\n",
- msg->type);
- ret = -EINVAL;
+ ret = dw_mipi_dsi_write(dsi, &packet);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = dw_mipi_dsi_read(dsi, msg);
+ if (ret)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
}
- return ret;
+ return nb_bytes;
}
static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
@@ -658,6 +665,8 @@ static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
{
+ u32 hw_version;
+
/*
* TODO dw drv improvements
* data & clock lane timers should be computed according to panel
@@ -665,8 +674,17 @@ static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
* note: DSI_PHY_TMR_CFG.MAX_RD_TIME should be in line with
* DSI_CMD_MODE_CFG.MAX_RD_PKT_SIZE_LP (see CMD_MODE_ALL_LP)
*/
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
- | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+
+ hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
+
+ if (hw_version >= HWVER_131) {
+ dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(0x40) |
+ PHY_LP2HS_TIME_V131(0x40));
+ dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000));
+ } else {
+ dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40) |
+ PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+ }
dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
| PHY_CLKLP2HS_TIME(0x40));
@@ -703,13 +721,13 @@ static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val,
val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US);
- if (ret < 0)
+ if (ret)
DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
val, val & PHY_STOP_STATE_CLK_LANE, 1000,
PHY_STATUS_TIMEOUT_US);
- if (ret < 0)
+ if (ret)
DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
}
@@ -746,9 +764,9 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->dev);
}
-void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
@@ -922,8 +940,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
dsi->bridge.of_node = pdev->dev.of_node;
#endif
- dev_set_drvdata(dev, dsi);
-
return dsi;
}
@@ -935,23 +951,16 @@ static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
/*
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
-int dw_mipi_dsi_probe(struct platform_device *pdev,
- const struct dw_mipi_dsi_plat_data *plat_data)
+struct dw_mipi_dsi *
+dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data)
{
- struct dw_mipi_dsi *dsi;
-
- dsi = __dw_mipi_dsi_probe(pdev, plat_data);
- if (IS_ERR(dsi))
- return PTR_ERR(dsi);
-
- return 0;
+ return __dw_mipi_dsi_probe(pdev, plat_data);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
-void dw_mipi_dsi_remove(struct platform_device *pdev)
+void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
{
- struct dw_mipi_dsi *dsi = platform_get_drvdata(pdev);
-
mipi_dsi_host_unregister(&dsi->dsi_host);
__dw_mipi_dsi_remove(dsi);
@@ -961,31 +970,30 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
/*
* Bind/unbind API, used from platforms based on the component framework.
*/
-int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data *plat_data)
+struct dw_mipi_dsi *
+dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data *plat_data)
{
struct dw_mipi_dsi *dsi;
int ret;
dsi = __dw_mipi_dsi_probe(pdev, plat_data);
if (IS_ERR(dsi))
- return PTR_ERR(dsi);
+ return dsi;
ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
if (ret) {
- dw_mipi_dsi_remove(pdev);
+ dw_mipi_dsi_remove(dsi);
DRM_ERROR("Failed to initialize bridge with drm\n");
- return ret;
+ return ERR_PTR(ret);
}
- return 0;
+ return dsi;
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
-void dw_mipi_dsi_unbind(struct device *dev)
+void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi)
{
- struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
-
__dw_mipi_dsi_remove(dsi);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8636e7e..08ab7d6a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -6,6 +6,8 @@
*
* Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
*
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
* Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
*
* Copyright (C) 2012 Texas Instruments
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index b5f5285..26df1e8 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -13,6 +13,14 @@
#include "cirrus_drv.h"
+static int cirrus_create_handle(struct drm_framebuffer *fb,
+ struct drm_file* file_priv,
+ unsigned int* handle)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+
+ return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
+}
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
@@ -24,6 +32,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
}
static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .create_handle = cirrus_create_handle,
.destroy = cirrus_user_framebuffer_destroy,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index cd23b1b..c91b9b0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
{
}
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct cirrus_device *cdev = dev->dev_private;
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
int i;
if (!crtc->enabled)
- return 0;
+ return;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
WREG8(PALETTE_DATA, *g++ >> 8);
WREG8(PALETTE_DATA, *b++ >> 8);
}
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+ cirrus_crtc_load_lut(crtc);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ cirrus_crtc_load_lut(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1ff1838..f219532 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -199,9 +199,8 @@ static struct ttm_backend_func cirrus_tt_backend_func = {
};
-static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -209,27 +208,15 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &cirrus_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
return tt;
}
-static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
-{
- return ttm_pool_populate(ttm);
-}
-
-static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_create = cirrus_ttm_tt_create,
- .ttm_tt_populate = cirrus_ttm_tt_populate,
- .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = cirrus_bo_evict_flags,
@@ -237,7 +224,6 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
@@ -342,7 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, acc_size,
NULL, NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -358,6 +344,7 @@ static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
@@ -369,7 +356,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -381,6 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
int cirrus_bo_push_sysram(struct cirrus_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
@@ -397,7 +385,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c2da558..7d25c42 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,7 @@
#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
void __drm_crtc_commit_free(struct kref *kref)
{
@@ -49,7 +50,8 @@ EXPORT_SYMBOL(__drm_crtc_commit_free);
* @state: atomic state
*
* Free all the memory allocated by drm_atomic_state_init.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
@@ -66,7 +68,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
* @state: atomic state
*
* Default implementation for filling in a new atomic state.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
@@ -131,7 +134,8 @@ EXPORT_SYMBOL(drm_atomic_state_alloc);
* @state: atomic state
*
* Default implementation for clearing atomic state.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_clear(struct drm_atomic_state *state)
{
@@ -386,9 +390,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
- drm_mode_convert_umode(&state->mode,
- (const struct drm_mode_modeinfo *)
- blob->data))
+ drm_mode_convert_umode(state->crtc->dev, &state->mode,
+ blob->data))
return -EINVAL;
state->mode_blob = drm_property_blob_get(blob);
@@ -405,11 +408,36 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
+/**
+ * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
+ * @dev: DRM device
+ * @blob: a pointer to the member blob to be replaced
+ * @blob_id: ID of the new blob
+ * @expected_size: total expected size of the blob data (in bytes)
+ * @expected_elem_size: expected element size of the blob data (in bytes)
+ * @replaced: did the blob get replaced?
+ *
+ * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
+ * @blob becomes NULL.
+ *
+ * If @expected_size is positive the new blob length is expected to be equal
+ * to @expected_size bytes. If @expected_elem_size is positive the new blob
+ * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
+ * an error is returned.
+ *
+ * @replaced will indicate to the caller whether the blob was replaced or not.
+ * If the old and new blobs were in fact the same blob @replaced will be false
+ * otherwise it will be true.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
static int
drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
struct drm_property_blob **blob,
uint64_t blob_id,
ssize_t expected_size,
+ ssize_t expected_elem_size,
bool *replaced)
{
struct drm_property_blob *new_blob = NULL;
@@ -419,7 +447,13 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length) {
+ if (expected_size > 0 &&
+ new_blob->length != expected_size) {
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ if (expected_elem_size > 0 &&
+ new_blob->length % expected_elem_size != 0) {
drm_property_blob_put(new_blob);
return -EINVAL;
}
@@ -467,7 +501,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
val,
- -1,
+ -1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
@@ -475,7 +509,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->ctm,
val,
- sizeof(struct drm_color_ctm),
+ sizeof(struct drm_color_ctm), -1,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
@@ -483,7 +517,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->gamma_lut,
val,
- -1,
+ -1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
@@ -755,6 +789,10 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
+ } else if (property == plane->color_encoding_property) {
+ state->color_encoding = val;
+ } else if (property == plane->color_range_property) {
+ state->color_range = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -814,6 +852,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
+ } else if (property == plane->color_encoding_property) {
+ *val = state->color_encoding;
+ } else if (property == plane->color_range_property) {
+ *val = state->color_range;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -859,10 +901,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
int ret;
/* either *both* CRTC and FB must be set, or neither */
- if (WARN_ON(state->crtc && !state->fb)) {
+ if (state->crtc && !state->fb) {
DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
return -EINVAL;
- } else if (WARN_ON(state->fb && !state->crtc)) {
+ } else if (state->fb && !state->crtc) {
DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
return -EINVAL;
}
@@ -878,12 +920,14 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
+ ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
+ state->fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
- drm_get_format_name(state->fb->format->format,
- &format_name));
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(state->fb->format->format,
+ &format_name),
+ state->fb->modifier);
return ret;
}
@@ -907,11 +951,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
DRM_DEBUG_ATOMIC("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+ state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
+ state->fb->width, state->fb->height);
return -ENOSPC;
}
@@ -934,30 +979,57 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
- if (state->fb) {
- struct drm_framebuffer *fb = state->fb;
- int i, n = fb->format->num_planes;
- struct drm_format_name_buf format_name;
-
- drm_printf(p, "\t\tformat=%s\n",
- drm_get_format_name(fb->format->format, &format_name));
- drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
- drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
- drm_printf(p, "\t\tlayers:\n");
- for (i = 0; i < n; i++) {
- drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
- drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
- }
- }
+ if (state->fb)
+ drm_framebuffer_print_info(p, 2, state->fb);
drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
drm_printf(p, "\trotation=%x\n", state->rotation);
+ drm_printf(p, "\tcolor-encoding=%s\n",
+ drm_get_color_encoding_name(state->color_encoding));
+ drm_printf(p, "\tcolor-range=%s\n",
+ drm_get_color_range_name(state->color_range));
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
}
/**
+ * DOC: handling driver private state
+ *
+ * Very often the DRM objects exposed to userspace in the atomic modeset api
+ * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
+ * underlying hardware. Especially for any kind of shared resources (e.g. shared
+ * clocks, scaler units, bandwidth and fifo limits shared among a group of
+ * planes or CRTCs, and so on) it makes sense to model these as independent
+ * objects. Drivers then need to do similar state tracking and commit ordering for
+ * such private (since not exposed to userpace) objects as the atomic core and
+ * helpers already provide for connectors, planes and CRTCs.
+ *
+ * To make this easier on drivers the atomic core provides some support to track
+ * driver private state objects using struct &drm_private_obj, with the
+ * associated state struct &drm_private_state.
+ *
+ * Similar to userspace-exposed objects, private state structures can be
+ * acquired by calling drm_atomic_get_private_obj_state(). Since this function
+ * does not take care of locking, drivers should wrap it for each type of
+ * private state object they have with the required call to drm_modeset_lock()
+ * for the corresponding &drm_modeset_lock.
+ *
+ * All private state structures contained in a &drm_atomic_state update can be
+ * iterated using for_each_oldnew_private_obj_in_state(),
+ * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
+ * Drivers are recommended to wrap these for each type of driver private state
+ * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
+ * least if they want to iterate over all objects of a given type.
+ *
+ * An earlier way to handle driver private state was by subclassing struct
+ * &drm_atomic_state. But since that encourages non-standard ways to implement
+ * the check/commit split atomic requires (by using e.g. "check and rollback or
+ * commit instead" of "duplicate state, check, then either commit or release
+ * duplicated state) it is deprecated in favour of using &drm_private_state.
+ */
+
+/**
* drm_atomic_private_obj_init - initialize private object
* @obj: private object
* @state: initial private object state
@@ -1196,6 +1268,12 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->picture_aspect_ratio = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
+ } else if (property == connector->content_protection_property) {
+ if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
+ return -EINVAL;
+ }
+ state->content_protection = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -1275,6 +1353,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->picture_aspect_ratio;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
+ } else if (property == connector->content_protection_property) {
+ *val = state->content_protection;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1808,7 +1888,7 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
#endif
/*
- * The big monstor ioctl
+ * The big monster ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index b16f1d6..c356545 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -696,6 +696,102 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
+ * drm_atomic_helper_check_plane_state() - Check plane state for validity
+ * @plane_state: plane state to check
+ * @crtc_state: crtc state to check
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ * doesn't cover the entire crtc? This will generally
+ * only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ * is disabled?
+ *
+ * Checks that a desired plane update is valid, and updates various
+ * bits of derived state (clipped coordinates etc.). Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
+ const struct drm_crtc_state *crtc_state,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_rect *src = &plane_state->src;
+ struct drm_rect *dst = &plane_state->dst;
+ unsigned int rotation = plane_state->rotation;
+ struct drm_rect clip = {};
+ int hscale, vscale;
+
+ WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
+
+ *src = drm_plane_state_src(plane_state);
+ *dst = drm_plane_state_dest(plane_state);
+
+ if (!fb) {
+ plane_state->visible = false;
+ return 0;
+ }
+
+ /* crtc should only be NULL when disabling (i.e., !fb) */
+ if (WARN_ON(!plane_state->crtc)) {
+ plane_state->visible = false;
+ return 0;
+ }
+
+ if (!crtc_state->enable && !can_update_disabled) {
+ DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
+ /* Check scaling */
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
+ if (hscale < 0 || vscale < 0) {
+ DRM_DEBUG_KMS("Invalid scaling of plane\n");
+ drm_rect_debug_print("src: ", &plane_state->src, true);
+ drm_rect_debug_print("dst: ", &plane_state->dst, false);
+ return -ERANGE;
+ }
+
+ if (crtc_state->enable)
+ drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
+
+ plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
+
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
+ if (!plane_state->visible)
+ /*
+ * Plane isn't visible; some drivers can handle this
+ * so we just return success here. Drivers that can't
+ * (including those that use the primary plane helper's
+ * update function) will return an error from their
+ * update_plane handler.
+ */
+ return 0;
+
+ if (!can_position && !drm_rect_equals(dst, &clip)) {
+ DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
+ drm_rect_debug_print("dst: ", dst, false);
+ drm_rect_debug_print("clip: ", &clip, false);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
+
+/**
* drm_atomic_helper_check_planes - validate state object for planes changes
* @dev: DRM device
* @state: the driver state object
@@ -907,6 +1003,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
*
* Drivers can use this for building their own atomic commit if they don't have
* a pure helper-based modeset implementation.
+ *
+ * Since these updates are not synchronized with lockings, only code paths
+ * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
+ * legacy state filled out by this helper. Defacto this means this helper and
+ * the legacy state pointers are only really useful for transitioning an
+ * existing driver to the atomic world.
*/
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
@@ -1778,6 +1880,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_crtc_state->event->base.completion = &commit->flip_done;
new_crtc_state->event->base.completion_release = release_crtc_commit;
drm_crtc_commit_get(commit);
+
+ commit->abort_completion = true;
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@@ -1787,11 +1891,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
!try_wait_for_completion(&old_conn_state->commit->flip_done))
return -EBUSY;
- /* commit tracked through new_crtc_state->commit, no need to do it explicitly */
- if (new_conn_state->crtc)
- continue;
-
- commit = crtc_or_fake_commit(state, old_conn_state->crtc);
+ /* Always track connectors explicitly for e.g. link retraining. */
+ commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
if (!commit)
return -ENOMEM;
@@ -1805,10 +1906,7 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
!try_wait_for_completion(&old_plane_state->commit->flip_done))
return -EBUSY;
- /*
- * Unlike connectors, always track planes explicitly for
- * async pageflip support.
- */
+ /* Always track planes explicitly for async pageflip support. */
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
if (!commit)
return -ENOMEM;
@@ -3327,8 +3425,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
if (state->commit) {
+ /*
+ * In the event that a non-blocking commit returns
+ * -ERESTARTSYS before the commit_tail work is queued, we will
+ * have an extra reference to the commit object. Release it, if
+ * the event has not been consumed by the worker.
+ *
+ * state->event may be freed, so we can't directly look at
+ * state->event->base.completion.
+ */
+ if (state->event && state->commit->abort_completion)
+ drm_crtc_commit_put(state->commit);
+
kfree(state->commit->event);
state->commit->event = NULL;
+
drm_crtc_commit_put(state->commit);
}
@@ -3707,7 +3818,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
}
/* Prepare GAMMA_LUT with the legacy values. */
- blob_data = (struct drm_color_lut *) blob->data;
+ blob_data = blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index aad468d..d9c0f75 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -230,6 +230,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
if (!dev->master)
goto out_unlock;
+ if (file_priv->master->lessor != NULL) {
+ DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = 0;
drm_drop_master(dev, file_priv);
out_unlock:
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 2e5e089..5a81e1b 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -88,15 +88,17 @@
* On top of this basic transformation additional properties can be exposed by
* the driver:
*
- * - Rotation is set up with drm_plane_create_rotation_property(). It adds a
- * rotation and reflection step between the source and destination rectangles.
- * Without this property the rectangle is only scaled, but not rotated or
- * reflected.
+ * rotation:
+ * Rotation is set up with drm_plane_create_rotation_property(). It adds a
+ * rotation and reflection step between the source and destination rectangles.
+ * Without this property the rectangle is only scaled, but not rotated or
+ * reflected.
*
- * - Z position is set up with drm_plane_create_zpos_immutable_property() and
- * drm_plane_create_zpos_property(). It controls the visibility of overlapping
- * planes. Without this property the primary plane is always below the cursor
- * plane, and ordering between all other planes is undefined.
+ * zpos:
+ * Z position is set up with drm_plane_create_zpos_immutable_property() and
+ * drm_plane_create_zpos_property(). It controls the visibility of overlapping
+ * planes. Without this property the primary plane is always below the cursor
+ * plane, and ordering between all other planes is undefined.
*
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
@@ -214,9 +216,11 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* This function initializes generic mutable zpos property and enables support
* for it in drm core. Drivers can then attach this property to planes to enable
* support for configurable planes arrangement during blending operation.
- * Once mutable zpos property has been enabled, the DRM core will automatically
- * calculate &drm_plane_state.normalized_zpos values. Usually min should be set
- * to 0 and max to maximal number of planes for given crtc - 1.
+ * Drivers that attach a mutable zpos property to any plane should call the
+ * drm_atomic_normalize_zpos() helper during their implementation of
+ * &drm_mode_config_funcs.atomic_check(), which will update the normalized zpos
+ * values and store them in &drm_plane_state.normalized_zpos. Usually min
+ * should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
* cursor/topmost planes), driver should adjust min/max values and assign those
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 1ee84dd..ba8cfe6 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -129,10 +129,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
-static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags,
- struct drm_map_list ** maplist)
+ struct drm_map_list **maplist)
{
struct drm_local_map *map;
struct drm_map_list *list;
@@ -224,7 +224,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
if (list != NULL) {
- if(list->map->size != map->size) {
+ if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, list->map->size);
@@ -361,7 +361,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
-int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
+int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
@@ -637,8 +637,8 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
*
* Frees any pages and buffers associated with the given entry.
*/
-static void drm_cleanup_buf_error(struct drm_device * dev,
- struct drm_buf_entry * entry)
+static void drm_cleanup_buf_error(struct drm_device *dev,
+ struct drm_buf_entry *entry)
{
int i;
@@ -1446,8 +1446,8 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
void __user **v,
int (*f)(void *, int, unsigned long,
- struct drm_buf *),
- struct drm_file *file_priv)
+ struct drm_buf *),
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 0d002b0..4ff0646 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -88,6 +88,20 @@
* drm_mode_crtc_set_gamma_size(). Drivers which support both should use
* drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
* "GAMMA_LUT" property above.
+ *
+ * Support for different non RGB color encodings is controlled through
+ * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
+ * are set up by calling drm_plane_create_color_properties().
+ *
+ * "COLOR_ENCODING"
+ * Optional plane enum property to support different non RGB
+ * color encodings. The driver can provide a subset of standard
+ * enum values supported by the DRM plane.
+ *
+ * "COLOR_RANGE"
+ * Optional plane enum property to support different non RGB
+ * color parameter ranges. The driver can provide a subset of
+ * standard enum values supported by the DRM plane.
*/
/**
@@ -339,3 +353,122 @@ out:
drm_modeset_unlock(&crtc->mutex);
return ret;
}
+
+static const char * const color_encoding_name[] = {
+ [DRM_COLOR_YCBCR_BT601] = "ITU-R BT.601 YCbCr",
+ [DRM_COLOR_YCBCR_BT709] = "ITU-R BT.709 YCbCr",
+ [DRM_COLOR_YCBCR_BT2020] = "ITU-R BT.2020 YCbCr",
+};
+
+static const char * const color_range_name[] = {
+ [DRM_COLOR_YCBCR_FULL_RANGE] = "YCbCr full range",
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = "YCbCr limited range",
+};
+
+/**
+ * drm_get_color_encoding_name - return a string for color encoding
+ * @encoding: color encoding to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
+{
+ if (WARN_ON(encoding >= ARRAY_SIZE(color_encoding_name)))
+ return "unknown";
+
+ return color_encoding_name[encoding];
+}
+
+/**
+ * drm_get_color_range_name - return a string for color range
+ * @range: color range to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_color_range_name(enum drm_color_range range)
+{
+ if (WARN_ON(range >= ARRAY_SIZE(color_range_name)))
+ return "unknown";
+
+ return color_range_name[range];
+}
+
+/**
+ * drm_plane_create_color_properties - color encoding related plane properties
+ * @plane: plane object
+ * @supported_encodings: bitfield indicating supported color encodings
+ * @supported_ranges: bitfileld indicating supported color ranges
+ * @default_encoding: default color encoding
+ * @default_range: default color range
+ *
+ * Create and attach plane specific COLOR_ENCODING and COLOR_RANGE
+ * properties to @plane. The supported encodings and ranges should
+ * be provided in supported_encodings and supported_ranges bitmasks.
+ * Each bit set in the bitmask indicates that its number as enum
+ * value is supported.
+ */
+int drm_plane_create_color_properties(struct drm_plane *plane,
+ u32 supported_encodings,
+ u32 supported_ranges,
+ enum drm_color_encoding default_encoding,
+ enum drm_color_range default_range)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_property *prop;
+ struct drm_prop_enum_list enum_list[max(DRM_COLOR_ENCODING_MAX,
+ DRM_COLOR_RANGE_MAX)];
+ int i, len;
+
+ if (WARN_ON(supported_encodings == 0 ||
+ (supported_encodings & -BIT(DRM_COLOR_ENCODING_MAX)) != 0 ||
+ (supported_encodings & BIT(default_encoding)) == 0))
+ return -EINVAL;
+
+ if (WARN_ON(supported_ranges == 0 ||
+ (supported_ranges & -BIT(DRM_COLOR_RANGE_MAX)) != 0 ||
+ (supported_ranges & BIT(default_range)) == 0))
+ return -EINVAL;
+
+ len = 0;
+ for (i = 0; i < DRM_COLOR_ENCODING_MAX; i++) {
+ if ((supported_encodings & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = color_encoding_name[i];
+ len++;
+ }
+
+ prop = drm_property_create_enum(dev, 0, "COLOR_ENCODING",
+ enum_list, len);
+ if (!prop)
+ return -ENOMEM;
+ plane->color_encoding_property = prop;
+ drm_object_attach_property(&plane->base, prop, default_encoding);
+ if (plane->state)
+ plane->state->color_encoding = default_encoding;
+
+ len = 0;
+ for (i = 0; i < DRM_COLOR_RANGE_MAX; i++) {
+ if ((supported_ranges & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = color_range_name[i];
+ len++;
+ }
+
+ prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
+ enum_list, len);
+ if (!prop)
+ return -ENOMEM;
+ plane->color_range_property = prop;
+ drm_object_attach_property(&plane->base, prop, default_range);
+ if (plane->state)
+ plane->state->color_range = default_range;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_color_properties);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9ae2360..b3cde89 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -24,6 +24,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_utils.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -204,9 +205,14 @@ int drm_connector_init(struct drm_device *dev,
connector->dev = dev;
connector->funcs = funcs;
- ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
- if (ret < 0)
+ /* connector index is used with 32bit bitmasks */
+ ret = ida_simple_get(&config->connector_ida, 0, 32, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Failed to allocate %s connector index: %d\n",
+ drm_connector_enum_list[connector_type].name,
+ ret);
goto out_put;
+ }
connector->index = ret;
ret = 0;
@@ -231,6 +237,8 @@ int drm_connector_init(struct drm_device *dev,
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
+ connector->display_info.panel_orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
drm_connector_get_cmdline_mode(connector);
@@ -712,6 +720,13 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
};
+static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
+ { DRM_MODE_PANEL_ORIENTATION_NORMAL, "Normal" },
+ { DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down" },
+ { DRM_MODE_PANEL_ORIENTATION_LEFT_UP, "Left Side Up" },
+ { DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, "Right Side Up" },
+};
+
static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
@@ -746,6 +761,13 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static struct drm_prop_enum_list drm_cp_enum_list[] = {
+ { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
+ { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
+ { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
+};
+DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+
/**
* DOC: standard connector properties
*
@@ -807,19 +829,91 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* should update this value using drm_mode_connector_set_tile_property().
* Userspace cannot change this property.
* link-status:
- * Connector link-status property to indicate the status of link. The default
- * value of link-status is "GOOD". If something fails during or after modeset,
- * the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
- * should update this value using drm_mode_connector_set_link_status_property().
+ * Connector link-status property to indicate the status of link. The
+ * default value of link-status is "GOOD". If something fails during or
+ * after modeset, the kernel driver may set this to "BAD" and issue a
+ * hotplug uevent. Drivers should update this value using
+ * drm_mode_connector_set_link_status_property().
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
* the output device is not rectilinear.
+ * Content Protection:
+ * This property is used by userspace to request the kernel protect future
+ * content communicated over the link. When requested, kernel will apply
+ * the appropriate means of protection (most often HDCP), and use the
+ * property to tell userspace the protection is active.
+ *
+ * Drivers can set this up by calling
+ * drm_connector_attach_content_protection_property() on initialization.
+ *
+ * The value of this property can be one of the following:
+ *
+ * DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0
+ * The link is not protected, content is transmitted in the clear.
+ * DRM_MODE_CONTENT_PROTECTION_DESIRED = 1
+ * Userspace has requested content protection, but the link is not
+ * currently protected. When in this state, kernel should enable
+ * Content Protection as soon as possible.
+ * DRM_MODE_CONTENT_PROTECTION_ENABLED = 2
+ * Userspace has requested content protection, and the link is
+ * protected. Only the driver can set the property to this value.
+ * If userspace attempts to set to ENABLED, kernel will return
+ * -EINVAL.
+ *
+ * A few guidelines:
+ *
+ * - DESIRED state should be preserved until userspace de-asserts it by
+ * setting the property to UNDESIRED. This means ENABLED should only
+ * transition to UNDESIRED when the user explicitly requests it.
+ * - If the state is DESIRED, kernel should attempt to re-authenticate the
+ * link whenever possible. This includes across disable/enable, dpms,
+ * hotplug, downstream device changes, link status failures, etc..
+ * - Userspace is responsible for polling the property to determine when
+ * the value transitions from ENABLED to DESIRED. This signifies the link
+ * is no longer protected and userspace should take appropriate action
+ * (whatever that might be).
*
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
* Mode object ID of the &drm_crtc this connector should be connected to.
+ *
+ * Connectors for LCD panels may also have one standardized property:
+ *
+ * panel orientation:
+ * On some devices the LCD panel is mounted in the casing in such a way
+ * that the up/top side of the panel does not match with the top side of
+ * the device. Userspace can use this property to check for this.
+ * Note that input coordinates from touchscreens (input devices with
+ * INPUT_PROP_DIRECT) will still map 1:1 to the actual LCD panel
+ * coordinates, so if userspace rotates the picture to adjust for
+ * the orientation it must also apply the same transformation to the
+ * touchscreen input coordinates. This property is initialized by calling
+ * drm_connector_init_panel_orientation_property().
+ *
+ * scaling mode:
+ * This property defines how a non-native mode is upscaled to the native
+ * mode of an LCD panel:
+ *
+ * None:
+ * No upscaling happens, scaling is left to the panel. Not all
+ * drivers expose this mode.
+ * Full:
+ * The output is upscaled to the full resolution of the panel,
+ * ignoring the aspect ratio.
+ * Center:
+ * No upscaling happens, the output is centered within the native
+ * resolution the panel.
+ * Full aspect:
+ * The output is upscaled to maximize either the width or height
+ * while retaining the aspect ratio.
+ *
+ * This property should be set up by calling
+ * drm_connector_attach_scaling_mode_property(). Note that drivers
+ * can also expose this property to external outputs, in which case they
+ * must support "None", which should be the default (since external screens
+ * have a built-in scaler).
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1104,6 +1198,42 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property);
/**
+ * drm_connector_attach_content_protection_property - attach content protection
+ * property
+ *
+ * @connector: connector to attach CP property on.
+ *
+ * This is used to add support for content protection on select connectors.
+ * Content Protection is intentionally vague to allow for different underlying
+ * technologies, however it is most implemented by HDCP.
+ *
+ * The content protection will be set to &drm_connector_state.content_protection
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ prop = drm_property_create_enum(dev, 0, "Content Protection",
+ drm_cp_enum_list,
+ ARRAY_SIZE(drm_cp_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&connector->base, prop,
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ connector->content_protection_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
+
+/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
*
@@ -1308,6 +1438,57 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector
}
EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+/**
+ * drm_connector_init_panel_orientation_property -
+ * initialize the connecters panel_orientation property
+ * @connector: connector for which to init the panel-orientation property.
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * This function should only be called for built-in panels, after setting
+ * connector->display_info.panel_orientation first (if known).
+ *
+ * This function will check for platform specific (e.g. DMI based) quirks
+ * overriding display_info.panel_orientation first, then if panel_orientation
+ * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
+ * "panel orientation" property to the connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_init_panel_orientation_property(
+ struct drm_connector *connector, int width, int height)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_property *prop;
+ int orientation_quirk;
+
+ orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+ if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ info->panel_orientation = orientation_quirk;
+
+ if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return 0;
+
+ prop = dev->mode_config.panel_orientation_property;
+ if (!prop) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "panel orientation",
+ drm_panel_orientation_enum_list,
+ ARRAY_SIZE(drm_panel_orientation_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ dev->mode_config.panel_orientation_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop,
+ info->panel_orientation);
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+
int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f0556e6..0358388 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -282,6 +282,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
+ /* crtc index is used with 32bit bitmasks */
+ if (WARN_ON(config->num_crtc >= 32))
+ return -EINVAL;
+
crtc->dev = dev;
crtc->funcs = funcs;
@@ -610,7 +614,7 @@ retry:
goto out;
}
- ret = drm_mode_convert_umode(mode, &crtc_req->mode);
+ ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
@@ -625,12 +629,14 @@ retry:
*/
if (!crtc->primary->format_default) {
ret = drm_plane_check_pixel_format(crtc->primary,
- fb->format->format);
+ fb->format->format,
+ fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(fb->format->format,
+ &format_name),
+ fb->modifier);
goto out;
}
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index af00f42..3c2b828 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -71,6 +71,8 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
/* drm_color_mgmt.c */
+const char *drm_get_color_encoding_name(enum drm_color_encoding encoding);
+const char *drm_get_color_range_name(enum drm_color_range range);
/* IOCTLs */
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
@@ -196,8 +198,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
/* drm_plane.c */
int drm_plane_register_all(struct drm_device *dev);
void drm_plane_unregister_all(struct drm_device *dev);
-int drm_plane_check_pixel_format(const struct drm_plane *plane,
- u32 format);
+int drm_plane_check_pixel_format(struct drm_plane *plane,
+ u32 format, u64 modifier);
/* drm_bridge.c */
void drm_bridge_detach(struct drm_bridge *bridge);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index c1807d5..b248281 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -158,6 +158,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
}
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_framebuffer_debugfs_init(minor);
+ if (ret) {
+ DRM_ERROR("Failed to create framebuffer debugfs file\n");
+ return ret;
+ }
+ }
+
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 9dd8795..9f83121 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -307,10 +307,29 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
return LINE_LEN(crc->values_cnt);
}
+static unsigned int crtc_crc_poll(struct file *file, poll_table *wait)
+{
+ struct drm_crtc *crtc = file->f_inode->i_private;
+ struct drm_crtc_crc *crc = &crtc->crc;
+ unsigned ret;
+
+ poll_wait(file, &crc->wq, wait);
+
+ spin_lock_irq(&crc->lock);
+ if (crc->source && crtc_crc_data_count(crc))
+ ret = POLLIN | POLLRDNORM;
+ else
+ ret = 0;
+ spin_unlock_irq(&crc->lock);
+
+ return ret;
+}
+
static const struct file_operations drm_crtc_crc_data_fops = {
.owner = THIS_MODULE,
.open = crtc_crc_open,
.read = crtc_crc_read,
+ .poll = crtc_crc_poll,
.release = crtc_crc_release,
};
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0530442..0e4f25d 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -177,8 +177,9 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
- atomic_dec(&aux_dev->usecount);
- wake_up_atomic_t(&aux_dev->usecount);
+ if (atomic_dec_and_test(&aux_dev->usecount))
+ wake_up_var(&aux_dev->usecount);
+
return res;
}
@@ -218,8 +219,9 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
res = pos - iocb->ki_pos;
iocb->ki_pos = pos;
- atomic_dec(&aux_dev->usecount);
- wake_up_atomic_t(&aux_dev->usecount);
+ if (atomic_dec_and_test(&aux_dev->usecount))
+ wake_up_var(&aux_dev->usecount);
+
return res;
}
@@ -277,8 +279,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
- wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
- TASK_UNINTERRUPTIBLE);
+ wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
minor = aux_dev->index;
if (aux_dev->dev)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index b3d6896..ffe14ec 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -146,6 +146,8 @@ u8 drm_dp_link_rate_to_bw_code(int link_rate)
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
+ case 810000:
+ return DP_LINK_BW_8_1;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
@@ -161,6 +163,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
return 270000;
case DP_LINK_BW_5_4:
return 540000;
+ case DP_LINK_BW_8_1:
+ return 810000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
@@ -1097,7 +1101,6 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
- aux->ddc.dev.of_node = aux->dev->of_node;
strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
sizeof(aux->ddc.name));
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 70dcfa5..6fac412 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1082,10 +1082,12 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
lct = drm_dp_calculate_rad(port, rad);
port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
+ if (port->mstb) {
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
- send_link = true;
+ send_link = true;
+ }
break;
}
return send_link;
@@ -2087,6 +2089,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
case DP_LINK_BW_5_4:
*out = 10 * dp_link_count;
break;
+ case DP_LINK_BW_8_1:
+ *out = 15 * dp_link_count;
+ break;
}
return true;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a934fd5..a1b9338 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -75,53 +75,6 @@ static bool drm_core_init_complete = false;
static struct dentry *drm_debugfs_root;
-#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
-
-void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- if (category != DRM_UT_NONE && !(drm_debug & category))
- return;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- if (dev)
- dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
- &vaf);
- else
- printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(drm_dev_printk);
-
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- if (category != DRM_UT_NONE && !(drm_debug & category))
- return;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- printk("%s" "[" DRM_NAME ":%ps]%s %pV",
- level, __builtin_return_address(0),
- strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(drm_printk);
-
/*
* DRM Minors
* A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -146,7 +99,7 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
case DRM_MINOR_CONTROL:
return &dev->control;
default:
- return NULL;
+ BUG();
}
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index cb48714..134069f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -162,6 +165,24 @@ static const struct edid_quirk {
/* HTC Vive VR Headset */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+
+ /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+
+ /* Windows Mixed Reality Headsets */
+ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
+ { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
+ { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
+ { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sony PlayStation VR Headset */
+ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -1554,8 +1575,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
struct edid *override = NULL;
if (connector->override_edid)
- override = drm_edid_duplicate((const struct edid *)
- connector->edid_blob_ptr->data);
+ override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
@@ -2083,6 +2103,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
+ if (!mode)
+ return NULL;
mode->hdisplay = 1366;
mode->hsync_start = mode->hsync_start - 1;
mode->hsync_end = mode->hsync_end - 1;
@@ -2767,7 +2789,7 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
drm_mode_probed_add(closure->connector, newmode);
closure->modes++;
- closure->preferred = 0;
+ closure->preferred = false;
}
}
@@ -2784,7 +2806,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
- .preferred = 1,
+ .preferred = true,
.quirks = quirks,
};
@@ -3398,6 +3420,7 @@ static int
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
const u8 *video_db, u8 video_len)
{
+ struct drm_display_info *info = &connector->display_info;
int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
@@ -3525,6 +3548,8 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
}
out:
+ if (modes > 0)
+ info->has_hdmi_infoframe = true;
return modes;
}
@@ -3761,8 +3786,8 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
u8 len = cea_db_payload_len(db);
- if (len >= 6)
- connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ if (len >= 6 && (db[6] & (1 << 7)))
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_SUPPORTS_AI;
if (len >= 8) {
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
@@ -3834,16 +3859,27 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
}
EXPORT_SYMBOL(drm_edid_get_monitor_name);
-/**
+static void clear_eld(struct drm_connector *connector)
+{
+ memset(connector->eld, 0, sizeof(connector->eld));
+
+ connector->latency_present[0] = false;
+ connector->latency_present[1] = false;
+ connector->video_latency[0] = 0;
+ connector->audio_latency[0] = 0;
+ connector->video_latency[1] = 0;
+ connector->audio_latency[1] = 0;
+}
+
+/*
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
- * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
- * fill in.
+ * HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
*/
-void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
@@ -3852,14 +3888,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
int mnl;
int dbl;
- memset(eld, 0, sizeof(connector->eld));
-
- connector->latency_present[0] = false;
- connector->latency_present[1] = false;
- connector->video_latency[0] = 0;
- connector->audio_latency[0] = 0;
- connector->video_latency[1] = 0;
- connector->audio_latency[1] = 0;
+ clear_eld(connector);
if (!edid)
return;
@@ -3870,17 +3899,18 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
return;
}
- mnl = get_monitor_name(edid, eld + 20);
+ mnl = get_monitor_name(edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
+ DRM_DEBUG_KMS("ELD monitor %s\n", &eld[DRM_ELD_MONITOR_NAME_STRING]);
- eld[4] = (cea[1] << 5) | mnl;
- DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+ eld[DRM_ELD_CEA_EDID_VER_MNL] = cea[1] << DRM_ELD_CEA_EDID_VER_SHIFT;
+ eld[DRM_ELD_CEA_EDID_VER_MNL] |= mnl;
- eld[0] = 2 << 3; /* ELD version: 2 */
+ eld[DRM_ELD_VER] = DRM_ELD_VER_CEA861D;
- eld[16] = edid->mfg_id[0];
- eld[17] = edid->mfg_id[1];
- eld[18] = edid->prod_code[0];
- eld[19] = edid->prod_code[1];
+ eld[DRM_ELD_MANUFACTURER_NAME0] = edid->mfg_id[0];
+ eld[DRM_ELD_MANUFACTURER_NAME1] = edid->mfg_id[1];
+ eld[DRM_ELD_PRODUCT_CODE0] = edid->prod_code[0];
+ eld[DRM_ELD_PRODUCT_CODE1] = edid->prod_code[1];
if (cea_revision(cea) >= 3) {
int i, start, end;
@@ -3901,14 +3931,14 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
/* Audio Data Block, contains SADs */
sad_count = min(dbl / 3, 15 - total_sad_count);
if (sad_count >= 1)
- memcpy(eld + 20 + mnl + total_sad_count * 3,
+ memcpy(&eld[DRM_ELD_CEA_SAD(mnl, total_sad_count)],
&db[1], sad_count * 3);
total_sad_count += sad_count;
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
if (dbl >= 1)
- eld[7] = db[1];
+ eld[DRM_ELD_SPEAKER] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
@@ -3920,7 +3950,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
}
- eld[5] |= total_sad_count << 4;
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= total_sad_count << DRM_ELD_SAD_COUNT_SHIFT;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
+ else
+ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
@@ -3928,7 +3964,6 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
drm_eld_size(eld), total_sad_count);
}
-EXPORT_SYMBOL(drm_edid_to_eld);
/**
* drm_edid_to_sad - extracts SADs from EDID
@@ -4238,6 +4273,8 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
struct drm_display_info *display = &connector->display_info;
struct drm_hdmi_info *hdmi = &display->hdmi;
+ display->has_hdmi_infoframe = true;
+
if (hf_vsdb[6] & 0x80) {
hdmi->scdc.supported = true;
if (hf_vsdb[6] & 0x40)
@@ -4413,6 +4450,7 @@ drm_reset_display_info(struct drm_connector *connector)
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->has_hdmi_infoframe = false;
info->non_desktop = 0;
}
@@ -4433,6 +4471,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->has_hdmi_infoframe = false;
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
@@ -4634,8 +4673,8 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
* @edid: EDID data
*
* Add the specified modes to the connector's mode list. Also fills out the
- * &drm_display_info structure in @connector with any information which can be
- * derived from the edid.
+ * &drm_display_info structure and ELD in @connector with any information which
+ * can be derived from the edid.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
@@ -4645,14 +4684,18 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
u32 quirks;
if (edid == NULL) {
+ clear_eld(connector);
return 0;
}
if (!drm_edid_is_valid(edid)) {
+ clear_eld(connector);
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
connector->name);
return 0;
}
+ drm_edid_to_eld(connector, edid);
+
/*
* CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
* To avoid multiple parsing of same block, lets parse that map
@@ -4850,6 +4893,11 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
* @mode: DRM display mode
* @rgb_quant_range: RGB quantization range (Q)
* @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
+ * @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations
+ *
+ * Note that @is_hdmi2_sink can be derived by looking at the
+ * &drm_scdc.supported flag stored in &drm_hdmi_info.scdc,
+ * &drm_display_info.hdmi, which can be found in &drm_connector.display_info.
*/
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
@@ -4928,6 +4976,7 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode)
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
* data from a DRM display mode
* @frame: HDMI vendor infoframe
+ * @connector: the connector
* @mode: DRM display mode
*
* Note that there's is a need to send HDMI vendor infoframes only when using a
@@ -4938,8 +4987,15 @@ s3d_structure_from_display_mode(const struct drm_display_mode *mode)
*/
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ struct drm_connector *connector,
const struct drm_display_mode *mode)
{
+ /*
+ * FIXME: sil-sii8620 doesn't have a connector around when
+ * we need one, so we have to be prepared for a NULL connector.
+ */
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
int err;
u32 s3d_flags;
u8 vic;
@@ -4947,11 +5003,21 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!frame || !mode)
return -EINVAL;
+ if (!has_hdmi_infoframe)
+ return -EINVAL;
+
vic = drm_match_hdmi_mode(mode);
s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
- if (!vic && !s3d_flags)
- return -EINVAL;
+ /*
+ * Even if it's not absolutely necessary to send the infoframe
+ * (ie.vic==0 and s3d_struct==0) we will still send it if we
+ * know that the sink can handle it. This is based on a
+ * suggestion in HDMI 2.0 Appendix F. Apparently some sinks
+ * have trouble realizing that they shuld switch from 3D to 2D
+ * mode if the source simply stops sending the infoframe when
+ * it wants to switch from 3D to 2D.
+ */
if (vic && s3d_flags)
return -EINVAL;
@@ -4960,10 +5026,8 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (err < 0)
return err;
- if (vic)
- frame->vic = vic;
- else
- frame->s3d_struct = s3d_structure_from_display_mode(mode);
+ frame->vic = vic;
+ frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
}
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 59e0ebe..273e1c5 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -110,6 +110,10 @@ int drm_encoder_init(struct drm_device *dev,
{
int ret;
+ /* encoder index is used with 32bit bitmasks */
+ if (WARN_ON(dev->mode_config.num_encoder >= 32))
+ return -EINVAL;
+
ret = drm_mode_object_add(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 0e3c141..186d00a 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_print.h>
#include <linux/module.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -42,7 +43,7 @@ struct drm_fbdev_cma {
* callback function to create a cma backed framebuffer.
*
* An fbdev framebuffer backed by cma is also available by calling
- * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
* If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
* set up automatically. &drm_framebuffer_funcs.dirty is called by
* drm_fb_helper_deferred_io() in process context (&struct delayed_work).
@@ -68,7 +69,7 @@ struct drm_fbdev_cma {
*
* Initialize::
*
- * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
* dev->mode_config.num_crtc,
* dev->mode_config.num_connector,
* &driver_fb_funcs);
@@ -130,43 +131,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-#ifdef CONFIG_DEBUG_FS
-static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
-{
- int i;
-
- seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
- (char *)&fb->format->format);
-
- for (i = 0; i < fb->format->num_planes; i++) {
- seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
- i, fb->offsets[i], fb->pitches[i]);
- drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
- }
-}
-
-/**
- * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
- * in debugfs.
- * @m: output file
- * @arg: private data for the callback
- */
-int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_framebuffer *fb;
-
- mutex_lock(&dev->mode_config.fb_lock);
- drm_for_each_fb(fb, dev)
- drm_fb_cma_describe(fb, m);
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
-#endif
-
static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
return dma_mmap_writecombine(info->device, vma, info->screen_base,
@@ -293,7 +257,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
fbi->screen_size = size;
fbi->fix.smem_len = size;
- if (fbdev_cma->fb_funcs->dirty) {
+ if (fb->funcs->dirty) {
ret = drm_fbdev_cma_defio_init(fbi, obj);
if (ret)
goto err_cma_destroy;
@@ -315,6 +279,118 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
};
/**
+ * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ * @funcs: Framebuffer functions, in particular a custom dirty() callback.
+ * Can be NULL.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
+ if (!max_conn_count)
+ max_conn_count = dev->mode_config.num_connector;
+
+ fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+ if (!fbdev_cma)
+ return -ENOMEM;
+
+ fbdev_cma->fb_funcs = funcs;
+ fb_helper = &fbdev_cma->fb_helper;
+
+ drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev_cma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
+
+/**
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count)
+{
+ return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
+ max_conn_count, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
+
+/**
+ * drm_fb_cma_fbdev_fini() - Teardown fbdev emulation
+ * @dev: DRM device
+ */
+void drm_fb_cma_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+
+ if (!fb_helper)
+ return;
+
+ /* Unregister if it hasn't been done already */
+ if (fb_helper->fbdev && fb_helper->fbdev->dev)
+ drm_fb_helper_unregister_fbi(fb_helper);
+
+ if (fb_helper->fbdev)
+ drm_fbdev_cma_defio_fini(fb_helper->fbdev);
+
+ if (fb_helper->fb)
+ drm_framebuffer_remove(fb_helper->fb);
+
+ drm_fb_helper_fini(fb_helper);
+ kfree(to_fbdev_cma(fb_helper));
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
+
+/**
* drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e561663..0646b10 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -41,6 +41,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include "drm_crtc_internal.h"
#include "drm_crtc_helper_internal.h"
static bool drm_fbdev_emulation = true;
@@ -65,19 +66,23 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
- * Initialization is done as a four-step process with drm_fb_helper_prepare(),
- * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
- * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
- * default behaviour can override the third step with their own code.
- * Teardown is done with drm_fb_helper_fini() after the fbdev device is
- * unregisters using drm_fb_helper_unregister_fbi().
+ * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
+ * down by calling drm_fb_helper_fbdev_teardown().
*
- * At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode_unlocked() from their &drm_driver.lastclose
- * callback. They should also notify the fb helper code from updates to the
- * output configuration by calling drm_fb_helper_hotplug_event(). For easier
- * integration with the output polling code in drm_crtc_helper.c the modeset
- * code provides a &drm_mode_config_funcs.output_poll_changed callback.
+ * Drivers that need to handle connector hotplugging (e.g. dp mst) can't use
+ * the setup helper and will need to do the whole four-step setup process with
+ * drm_fb_helper_prepare(), drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors(), enable hotplugging and
+ * drm_fb_helper_initial_config() to avoid a possible race window.
+ *
+ * At runtime drivers should restore the fbdev console by using
+ * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
+ * They should also notify the fb helper code from updates to the output
+ * configuration by using drm_fb_helper_output_poll_changed() as their
+ * &drm_mode_config_funcs.output_poll_changed callback.
+ *
+ * For suspend/resume consider using drm_mode_config_helper_suspend() and
+ * drm_mode_config_helper_resume() which takes care of fbdev as well.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
@@ -102,7 +107,8 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes.
+ * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
+ * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
*/
#define drm_fb_helper_for_each_connector(fbh, i__) \
@@ -150,6 +156,9 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
{
int err;
+ if (!fb_helper)
+ return 0;
+
mutex_lock(&fb_helper->lock);
err = __drm_fb_helper_add_one_connector(fb_helper, connector);
mutex_unlock(&fb_helper->lock);
@@ -161,7 +170,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
/**
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
* emulation helper
- * @fb_helper: fbdev initialized with drm_fb_helper_init
+ * @fb_helper: fbdev initialized with drm_fb_helper_init, can be NULL
*
* This functions adds all the available connectors for use with the given
* fb_helper. This is a separate step to allow drivers to freely assign
@@ -174,14 +183,16 @@ EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
*/
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- struct drm_device *dev = fb_helper->dev;
+ struct drm_device *dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int i, ret = 0;
- if (!drm_fbdev_emulation)
+ if (!drm_fbdev_emulation || !fb_helper)
return 0;
+ dev = fb_helper->dev;
+
mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -245,6 +256,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
{
int err;
+ if (!fb_helper)
+ return 0;
+
mutex_lock(&fb_helper->lock);
err = __drm_fb_helper_remove_one_connector(fb_helper, connector);
mutex_unlock(&fb_helper->lock);
@@ -350,6 +364,7 @@ EXPORT_SYMBOL(drm_fb_helper_debug_leave);
static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active)
{
struct drm_device *dev = fb_helper->dev;
+ struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
@@ -368,8 +383,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
retry:
plane_mask = 0;
drm_for_each_plane(plane, dev) {
- struct drm_plane_state *plane_state;
-
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
@@ -392,6 +405,11 @@ retry:
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_plane *primary = mode_set->crtc->primary;
+
+ /* Cannot fail as we've already gotten the plane state above */
+ plane_state = drm_atomic_get_new_plane_state(state, primary);
+ plane_state->rotation = fb_helper->crtc_info[i].rotation;
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
@@ -484,7 +502,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: fbcon to restore
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
*
* This should be called from driver's drm &drm_driver.lastclose callback
* when implementing an fbcon on top of kms using this helper. This ensures that
@@ -498,7 +516,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
bool do_delayed;
int ret;
- if (!drm_fbdev_emulation)
+ if (!drm_fbdev_emulation || !fb_helper)
return -ENODEV;
if (READ_ONCE(fb_helper->deferred_setup))
@@ -793,8 +811,10 @@ int drm_fb_helper_init(struct drm_device *dev,
struct drm_mode_config *config = &dev->mode_config;
int i;
- if (!drm_fbdev_emulation)
+ if (!drm_fbdev_emulation) {
+ dev->fb_helper = fb_helper;
return 0;
+ }
if (!max_conn_count)
return -EINVAL;
@@ -821,6 +841,7 @@ int drm_fb_helper_init(struct drm_device *dev,
if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].rotation = DRM_MODE_ROTATE_0;
}
i = 0;
@@ -829,6 +850,8 @@ int drm_fb_helper_init(struct drm_device *dev,
i++;
}
+ dev->fb_helper = fb_helper;
+
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
@@ -883,7 +906,7 @@ EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
/**
* drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
- * @fb_helper: driver-allocated fbdev helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
*
* A wrapper around unregister_framebuffer, to release the fb_info
* framebuffer device. This must be called before releasing all resources for
@@ -898,7 +921,7 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
/**
* drm_fb_helper_fini - finialize a &struct drm_fb_helper
- * @fb_helper: driver-allocated fbdev helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
*
* This cleans up all remaining resources associated with @fb_helper. Must be
* called after drm_fb_helper_unlink_fbi() was called.
@@ -907,7 +930,12 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
struct fb_info *info;
- if (!drm_fbdev_emulation || !fb_helper)
+ if (!fb_helper)
+ return;
+
+ fb_helper->dev->fb_helper = NULL;
+
+ if (!drm_fbdev_emulation)
return;
cancel_work_sync(&fb_helper->resume_work);
@@ -937,7 +965,7 @@ EXPORT_SYMBOL(drm_fb_helper_fini);
/**
* drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
- * @fb_helper: driver-allocated fbdev helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
*
* A wrapper around unlink_framebuffer implemented by fbdev core
*/
@@ -1002,6 +1030,49 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
+ * drm_fb_helper_defio_init - fbdev deferred I/O initialization
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * This function allocates &fb_deferred_io, sets callback to
+ * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
+ * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
+ * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
+ *
+ * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
+ * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
+ * affect other instances of that &fb_ops.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->fbdev;
+ struct fb_deferred_io *fbdefio;
+ struct fb_ops *fbops;
+
+ fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ if (!fbdefio || !fbops) {
+ kfree(fbdefio);
+ kfree(fbops);
+ return -ENOMEM;
+ }
+
+ info->fbdefio = fbdefio;
+ fbdefio->delay = msecs_to_jiffies(50);
+ fbdefio->deferred_io = drm_fb_helper_deferred_io;
+
+ *fbops = *info->fbops;
+ info->fbops = fbops;
+
+ fb_deferred_io_init(info);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_defio_init);
+
+/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -1138,7 +1209,7 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
/**
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend
- * @fb_helper: driver-allocated fbdev helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
* @suspend: whether to suspend or resume
*
* A wrapper around fb_set_suspend implemented by fbdev core.
@@ -1155,7 +1226,7 @@ EXPORT_SYMBOL(drm_fb_helper_set_suspend);
/**
* drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
* takes the console lock
- * @fb_helper: driver-allocated fbdev helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
* @suspend: whether to suspend or resume
*
* A wrapper around fb_set_suspend() that takes the console lock. If the lock
@@ -1280,7 +1351,7 @@ static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc,
if (IS_ERR(gamma_lut))
return gamma_lut;
- lut = (struct drm_color_lut *)gamma_lut->data;
+ lut = gamma_lut->data;
if (cmap->start || cmap->len != size) {
u16 *r = crtc->gamma_store;
u16 *g = r + crtc->gamma_size;
@@ -1825,6 +1896,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
+ strcpy(fb_helper->fb->comm, "[fbcon]");
return 0;
}
@@ -2342,6 +2414,62 @@ out:
return best_score;
}
+/*
+ * This function checks if rotation is necessary because of panel orientation
+ * and if it is, if it is supported.
+ * If rotation is necessary and supported, its gets set in fb_crtc.rotation.
+ * If rotation is necessary but not supported, a DRM_MODE_ROTATE_* flag gets
+ * or-ed into fb_helper->sw_rotations. In drm_setup_crtcs_fb() we check if only
+ * one bit is set and then we set fb_info.fbcon_rotate_hint to make fbcon do
+ * the unsupported rotation.
+ */
+static void drm_setup_crtc_rotation(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc *fb_crtc,
+ struct drm_connector *connector)
+{
+ struct drm_plane *plane = fb_crtc->mode_set.crtc->primary;
+ uint64_t valid_mask = 0;
+ int i, rotation;
+
+ fb_crtc->rotation = DRM_MODE_ROTATE_0;
+
+ switch (connector->display_info.panel_orientation) {
+ case DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP:
+ rotation = DRM_MODE_ROTATE_180;
+ break;
+ case DRM_MODE_PANEL_ORIENTATION_LEFT_UP:
+ rotation = DRM_MODE_ROTATE_90;
+ break;
+ case DRM_MODE_PANEL_ORIENTATION_RIGHT_UP:
+ rotation = DRM_MODE_ROTATE_270;
+ break;
+ default:
+ rotation = DRM_MODE_ROTATE_0;
+ }
+
+ /*
+ * TODO: support 90 / 270 degree hardware rotation,
+ * depending on the hardware this may require the framebuffer
+ * to be in a specific tiling format.
+ */
+ if (rotation != DRM_MODE_ROTATE_180 || !plane->rotation_property) {
+ fb_helper->sw_rotations |= rotation;
+ return;
+ }
+
+ for (i = 0; i < plane->rotation_property->num_values; i++)
+ valid_mask |= (1ULL << plane->rotation_property->values[i]);
+
+ if (!(rotation & valid_mask)) {
+ fb_helper->sw_rotations |= rotation;
+ return;
+ }
+
+ fb_crtc->rotation = rotation;
+ /* Rotating in hardware, fbcon should not rotate */
+ fb_helper->sw_rotations |= DRM_MODE_ROTATE_0;
+}
+
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
u32 width, u32 height)
{
@@ -2401,6 +2529,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
drm_fb_helper_modeset_release(fb_helper,
&fb_helper->crtc_info[i].mode_set);
+ fb_helper->sw_rotations = 0;
drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
@@ -2420,6 +2549,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
drm_connector_get(connector);
+ drm_setup_crtc_rotation(fb_helper, fb_crtc, connector);
modeset->connectors[modeset->num_connectors++] = connector;
modeset->x = offset->x;
modeset->y = offset->y;
@@ -2461,6 +2591,28 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
}
}
mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+ switch (fb_helper->sw_rotations) {
+ case DRM_MODE_ROTATE_0:
+ info->fbcon_rotate_hint = FB_ROTATE_UR;
+ break;
+ case DRM_MODE_ROTATE_90:
+ info->fbcon_rotate_hint = FB_ROTATE_CCW;
+ break;
+ case DRM_MODE_ROTATE_180:
+ info->fbcon_rotate_hint = FB_ROTATE_UD;
+ break;
+ case DRM_MODE_ROTATE_270:
+ info->fbcon_rotate_hint = FB_ROTATE_CW;
+ break;
+ default:
+ /*
+ * Multiple bits are set / multiple rotations requested
+ * fbcon cannot handle separate rotation settings per
+ * output, so fallback to unrotated.
+ */
+ info->fbcon_rotate_hint = FB_ROTATE_UR;
+ }
}
/* Note: Drops fb_helper->lock before returning. */
@@ -2576,7 +2728,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb
- * @fb_helper: the drm_fb_helper
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
*
* Scan the connectors attached to the fb_helper and try to put together a
* setup after notification of a change in output configuration.
@@ -2598,7 +2750,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
int err = 0;
- if (!drm_fbdev_emulation)
+ if (!drm_fbdev_emulation || !fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
@@ -2626,6 +2778,148 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+/**
+ * drm_fb_helper_fbdev_setup() - Setup fbdev emulation
+ * @dev: DRM device
+ * @fb_helper: fbdev helper structure to set up
+ * @funcs: fbdev helper functions
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ *
+ * This function sets up fbdev emulation and registers fbdev for access by
+ * userspace. If all connectors are disconnected, setup is deferred to the next
+ * time drm_fb_helper_hotplug_event() is called.
+ * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
+ * function.
+ *
+ * See also: drm_fb_helper_initial_config()
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_fbdev_setup(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ const struct drm_fb_helper_funcs *funcs,
+ unsigned int preferred_bpp,
+ unsigned int max_conn_count)
+{
+ int ret;
+
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
+ if (!max_conn_count)
+ max_conn_count = dev->mode_config.num_connector;
+ if (!max_conn_count) {
+ DRM_DEV_ERROR(dev->dev, "No connectors\n");
+ return -EINVAL;
+ }
+
+ drm_fb_helper_prepare(dev, fb_helper, funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper\n");
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to add connectors\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_fbdev_setup);
+
+/**
+ * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation
+ * @dev: DRM device
+ *
+ * This function unregisters fbdev if not already done and cleans up the
+ * associated resources including the &drm_framebuffer.
+ * The driver is responsible for freeing the &drm_fb_helper structure which is
+ * stored in &drm_device->fb_helper. Do note that this pointer has been cleared
+ * when this function returns.
+ *
+ * In order to support device removal/unplug while file handles are still open,
+ * drm_fb_helper_unregister_fbi() should be called on device removal and
+ * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when
+ * file handles are closed.
+ */
+void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
+{
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+ struct fb_ops *fbops = NULL;
+
+ if (!fb_helper)
+ return;
+
+ /* Unregister if it hasn't been done already */
+ if (fb_helper->fbdev && fb_helper->fbdev->dev)
+ drm_fb_helper_unregister_fbi(fb_helper);
+
+ if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) {
+ fb_deferred_io_cleanup(fb_helper->fbdev);
+ kfree(fb_helper->fbdev->fbdefio);
+ fbops = fb_helper->fbdev->fbops;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+ kfree(fbops);
+
+ if (fb_helper->fb)
+ drm_framebuffer_remove(fb_helper->fb);
+}
+EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown);
+
+/**
+ * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
+ * @dev: DRM device
+ *
+ * This function can be used as the &drm_driver->lastclose callback for drivers
+ * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked().
+ */
+void drm_fb_helper_lastclose(struct drm_device *dev)
+{
+ drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
+}
+EXPORT_SYMBOL(drm_fb_helper_lastclose);
+
+/**
+ * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed
+ * helper for fbdev emulation
+ * @dev: DRM device
+ *
+ * This function can be used as the
+ * &drm_mode_config_funcs.output_poll_changed callback for drivers that only
+ * need to call drm_fb_helper_hotplug_event().
+ */
+void drm_fb_helper_output_poll_changed(struct drm_device *dev)
+{
+ drm_fb_helper_hotplug_event(dev->fb_helper);
+}
+EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
+
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 9a17bd3..e394799 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -567,7 +567,7 @@ __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
poll_wait(filp, &file_priv->event_wait, wait);
if (!list_empty(&file_priv->event_list))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 9c0152d..5ca6395 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -112,18 +112,18 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
@@ -132,26 +132,26 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
- { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
@@ -172,7 +172,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
- { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 279c103..ad67203 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -25,7 +25,9 @@
#include <drm/drm_auth.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include "drm_internal.h"
#include "drm_crtc_internal.h"
/**
@@ -78,11 +80,12 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
- src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
@@ -118,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
+ if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
+ dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
+ r.pixel_format = DRM_FORMAT_XBGR2101010;
+
ret = drm_mode_addfb2(dev, &r, file_priv);
if (ret)
return ret;
@@ -155,9 +162,10 @@ static int framebuffer_check(struct drm_device *dev,
info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
if (!info) {
struct drm_format_name_buf format_name;
+
DRM_DEBUG_KMS("bad framebuffer format %s\n",
- drm_get_format_name(r->pixel_format,
- &format_name));
+ drm_get_format_name(r->pixel_format,
+ &format_name));
return -EINVAL;
}
@@ -454,6 +462,12 @@ int drm_mode_getfb(struct drm_device *dev,
if (!fb)
return -ENOENT;
+ /* Multi-planar framebuffers need getfb2. */
+ if (fb->format->num_planes > 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
r->height = fb->height;
r->width = fb->width;
r->depth = fb->format->depth;
@@ -477,6 +491,7 @@ int drm_mode_getfb(struct drm_device *dev,
ret = -ENODEV;
}
+out:
drm_framebuffer_put(fb);
return ret;
@@ -661,6 +676,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
+ strcpy(fb->comm, current->comm);
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
@@ -766,14 +782,18 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
struct drm_plane *plane;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
- int i, ret = 0;
+ int i, ret;
unsigned plane_mask;
+ bool disable_crtcs = false;
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
-
+retry_disable:
drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
state->acquire_ctx = &ctx;
retry:
@@ -794,7 +814,7 @@ retry:
goto unlock;
}
- if (plane_state->crtc->primary == plane) {
+ if (disable_crtcs && plane_state->crtc->primary == plane) {
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
@@ -819,6 +839,7 @@ retry:
plane->old_fb = plane->fb;
}
+ /* This list is only filled when disable_crtcs is set. */
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
@@ -841,9 +862,15 @@ unlock:
drm_atomic_state_put(state);
+out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
+ if (ret == -EINVAL && !disable_crtcs) {
+ disable_crtcs = true;
+ goto retry_disable;
+ }
+
return ret;
}
@@ -957,3 +984,61 @@ int drm_framebuffer_plane_height(int height,
return fb_plane_height(height, fb->format, plane);
}
EXPORT_SYMBOL(drm_framebuffer_plane_height);
+
+void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_framebuffer *fb)
+{
+ struct drm_format_name_buf format_name;
+ unsigned int i;
+
+ drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
+ drm_printf_indent(p, indent, "refcount=%u\n",
+ drm_framebuffer_read_refcount(fb));
+ drm_printf_indent(p, indent, "format=%s\n",
+ drm_get_format_name(fb->format->format, &format_name));
+ drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier);
+ drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height);
+ drm_printf_indent(p, indent, "layers:\n");
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ drm_printf_indent(p, indent + 1, "size[%u]=%dx%d\n", i,
+ drm_framebuffer_plane_width(fb->width, fb, i),
+ drm_framebuffer_plane_height(fb->height, fb, i));
+ drm_printf_indent(p, indent + 1, "pitch[%u]=%u\n", i, fb->pitches[i]);
+ drm_printf_indent(p, indent + 1, "offset[%u]=%u\n", i, fb->offsets[i]);
+ drm_printf_indent(p, indent + 1, "obj[%u]:%s\n", i,
+ fb->obj[i] ? "" : "(null)");
+ if (fb->obj[i])
+ drm_gem_print_info(p, indent + 2, fb->obj[i]);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(fb, dev) {
+ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+ drm_framebuffer_print_info(&p, 1, fb);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
+ { "framebuffer", drm_framebuffer_info, 0 },
+};
+
+int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 55d6182..4975ba9 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -40,6 +40,7 @@
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include "drm_internal.h"
/** @file drm_gem.c
@@ -97,7 +98,7 @@ drm_gem_init(struct drm_device *dev)
struct drm_vma_offset_manager *vma_offset_manager;
mutex_init(&dev->object_name_lock);
- idr_init(&dev->object_name_idr);
+ idr_init_base(&dev->object_name_idr, 1);
vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
if (!vma_offset_manager) {
@@ -348,7 +349,7 @@ EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
* @file: drm file-private structure to remove the dumb handle from
* @dev: corresponding drm_device
* @handle: the dumb handle to remove
- *
+ *
* This implements the &drm_driver.dumb_destroy kms driver callback for drivers
* which use gem to manage their backing storage.
*/
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pointer to return the created handle to the caller
- *
+ *
* This expects the &drm_device.object_name_lock to be held already and will
* drop it before returning. Used to avoid races in establishing new handles
* when importing an object from either an flink name or a dma-buf.
@@ -775,7 +776,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
- idr_init(&file_private->object_idr);
+ idr_init_base(&file_private->object_idr, 1);
spin_lock_init(&file_private->table_lock);
}
@@ -1040,3 +1041,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
+
+void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ drm_printf_indent(p, indent, "name=%d\n", obj->name);
+ drm_printf_indent(p, indent, "refcount=%u\n",
+ kref_read(&obj->refcount));
+ drm_printf_indent(p, indent, "start=%08lx\n",
+ drm_vma_node_start(&obj->vma_node));
+ drm_printf_indent(p, indent, "size=%zu\n", obj->size);
+ drm_printf_indent(p, indent, "imported=%s\n",
+ obj->import_attach ? "yes" : "no");
+
+ if (obj->dev->driver->gem_print_info)
+ obj->dev->driver->gem_print_info(p, indent, obj);
+}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 020e766..80a5115 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -397,31 +397,24 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
#endif
-#ifdef CONFIG_DEBUG_FS
/**
- * drm_gem_cma_describe - describe a CMA GEM object for debugfs
- * @cma_obj: CMA GEM object
- * @m: debugfs file handle
+ * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
*
- * This function can be used to dump a human-readable representation of the
- * CMA GEM object into a synthetic file.
+ * This function can be used as the &drm_driver->gem_print_info callback.
+ * It prints paddr and vaddr for use in e.g. debugfs output.
*/
-void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
- struct seq_file *m)
+void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
{
- struct drm_gem_object *obj = &cma_obj->base;
- uint64_t off;
-
- off = drm_vma_node_start(&obj->vma_node);
-
- seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
- obj->name, kref_read(&obj->refcount),
- off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
+ const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
- seq_printf(m, "\n");
+ drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
+ drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
-#endif
+EXPORT_SYMBOL(drm_gem_cma_print_info);
/**
* drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
@@ -482,8 +475,26 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
- if (sgt->nents != 1)
- return ERR_PTR(-EINVAL);
+ if (sgt->nents != 1) {
+ /* check if the entries in the sg_table are contiguous */
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ /*
+ * sg_dma_address(s) is only valid for entries
+ * that have sg_dma_len(s) != 0
+ */
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next_addr)
+ return ERR_PTR(-EINVAL);
+
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
+ }
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index aa8cb9b..4d682a6 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -272,7 +272,8 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
* @sizes: fbdev size description
* @pitch_align: Optional pitch alignment
* @obj: GEM object backing the framebuffer
- * @funcs: vtable to be used for the new framebuffer object
+ * @funcs: Optional vtable to be used for the new framebuffer object when the
+ * dirty callback is needed.
*
* This function creates a framebuffer from a &drm_fb_helper_surface_size
* description for use in the &drm_fb_helper_funcs.fb_probe callback.
@@ -300,6 +301,9 @@ drm_gem_fbdev_fb_create(struct drm_device *dev,
if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
return ERR_PTR(-EINVAL);
+ if (!funcs)
+ funcs = &drm_gem_fb_funcs;
+
return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
}
EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index c9d5a6c..b72242e 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -111,6 +111,8 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
@@ -178,3 +180,8 @@ int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+
+/* drm_framebuffer.c */
+void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_framebuffer *fb);
+int drm_framebuffer_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 4aafe48..af78291 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -509,7 +509,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return -EACCES;
/* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) &&
+ if (unlikely((flags & DRM_MASTER) &&
!drm_is_current_master(file_priv) &&
!drm_is_control_client(file_priv)))
return -EACCES;
@@ -704,7 +704,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* ##define DRM_IOCTL_MY_DRIVER_OPERATION \
* DRM_IOW(DRM_COMMAND_BASE, struct my_driver_operation)
- *
+ *
* DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
* DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
* up the handlers and set the access rights::
@@ -848,7 +848,7 @@ long drm_ioctl(struct file *filp,
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
- DRM_DEBUG("ret = %d\n", retcode);
+ DRM_DEBUG("pid=%d, ret = %d\n", task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 1402c0e..d345563 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(drm_lease_owner);
/**
* _drm_find_lessee - find lessee by id (idr_mutex held)
* @master: drm_master of lessor
- * @id: lessee_id
+ * @lessee_id: id
*
* RETURN:
*
@@ -101,7 +101,7 @@ static bool _drm_has_leased(struct drm_master *master, int id)
/**
* _drm_lease_held - check drm_mode_object lease status (idr_mutex held)
- * @master: the drm_master
+ * @file_priv: the master drm_file
* @id: the object id
*
* Checks if the specified master holds a lease on the object. Return
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(_drm_lease_held);
/**
* drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
- * @master: the drm_master
+ * @file_priv: the master drm_file
* @id: the object id
*
* Checks if the specified master holds a lease on the object. Return
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(drm_lease_held);
/**
* drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
* @file_priv: requestor file
- * @crtcs: bitmask of crtcs to check
+ * @crtcs_in: bitmask of crtcs to check
*
* Reconstructs a crtc mask based on the crtcs which are visible
* through the specified file.
@@ -305,7 +305,7 @@ void drm_lease_destroy(struct drm_master *master)
/**
* _drm_lease_revoke - revoke access to all leased objects (idr_mutex held)
- * @master: the master losing its lease
+ * @top: the master losing its lease
*/
static void _drm_lease_revoke(struct drm_master *top)
{
@@ -482,7 +482,7 @@ out_free_objects:
* drm_mode_create_lease_ioctl - create a new lease
* @dev: the drm device
* @data: pointer to struct drm_mode_create_lease
- * @file_priv: the file being manipulated
+ * @lessor_priv: the file being manipulated
*
* The master associated with the specified file will have a lease
* created containing the objects specified in the ioctl structure.
@@ -662,7 +662,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
* drm_mode_get_lease_ioctl - list leased objects
* @dev: the drm device
* @data: pointer to struct drm_mode_get_lease
- * @file_priv: the file being manipulated
+ * @lessee_priv: the file being manipulated
*
* Return the list of leased objects for the specified lessee
*/
@@ -722,7 +722,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
* drm_mode_revoke_lease_ioctl - revoke lease
* @dev: the drm device
* @data: pointer to struct drm_mode_revoke_lease
- * @file_priv: the file being manipulated
+ * @lessor_priv: the file being manipulated
*
* This removes all of the objects from the lease without
* actually getting rid of the lease itself; that way all
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index fc0ebd2..3c54044 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -149,3 +149,16 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
+
+u64 drm_get_max_iomem(void)
+{
+ struct resource *tmp;
+ resource_size_t max_iomem = 0;
+
+ for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
+ max_iomem = max(max_iomem, tmp->end);
+ }
+
+ return max_iomem;
+}
+EXPORT_SYMBOL(drm_get_max_iomem);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 4b47226..bc73b7f 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -498,8 +498,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi)
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
- return mipi_dsi_device_transfer(dsi, &msg);
+ return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral);
@@ -517,8 +518,9 @@ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi)
.tx_buf = (u8 [2]) { 0, 0 },
.tx_len = 2,
};
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
- return mipi_dsi_device_transfer(dsi, &msg);
+ return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral);
@@ -541,8 +543,9 @@ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
.tx_len = sizeof(tx),
.tx_buf = tx,
};
+ int ret = mipi_dsi_device_transfer(dsi, &msg);
- return mipi_dsi_device_transfer(dsi, &msg);
+ return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c3c79ee..3166026 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -92,7 +92,7 @@
* some basic allocator dumpers for debugging.
*
* Note that this range allocator is not thread-safe, drivers need to protect
- * modifications with their on locking. The idea behind this is that for a full
+ * modifications with their own locking. The idea behind this is that for a full
* memory manager additional data needs to be protected anyway, hence internal
* locking would be fully redundant.
*/
@@ -180,7 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm *mm = hole_node->mm;
struct rb_node **link, *rb;
struct drm_mm_node *parent;
- bool leftmost = true;
+ bool leftmost;
node->__subtree_last = LAST(node);
@@ -201,6 +201,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
} else {
rb = NULL;
link = &mm->interval_tree.rb_root.rb_node;
+ leftmost = true;
}
while (*link) {
@@ -208,11 +209,11 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
parent = rb_entry(rb, struct drm_mm_node, rb);
if (parent->__subtree_last < node->__subtree_last)
parent->__subtree_last = node->__subtree_last;
- if (node->start < parent->start)
+ if (node->start < parent->start) {
link = &parent->rb.rb_left;
- else {
+ } else {
link = &parent->rb.rb_right;
- leftmost = true;
+ leftmost = false;
}
}
@@ -836,9 +837,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
if (!mm->color_adjust)
return NULL;
- hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = hole_start + hole->hole_size;
+ /*
+ * The hole found during scanning should ideally be the first element
+ * in the hole_stack list, but due to side-effects in the driver it
+ * may not be.
+ */
+ list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = hole_start + hole->hole_size;
+
+ if (hole_start <= scan->hit_start &&
+ hole_end >= scan->hit_end)
+ break;
+ }
+
+ /* We should only be called after we found the hole previously */
+ DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
+ if (unlikely(&hole->hole_stack == &mm->hole_stack))
+ return NULL;
DRM_MM_BUG_ON(hole_start > scan->hit_start);
DRM_MM_BUG_ON(hole_end < scan->hit_end);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 256de73..e5c6533 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -472,6 +472,9 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ struct drm_printer p = drm_debug_printer("[leaked fb]");
+ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+ drm_framebuffer_print_info(&p, 1, fb);
drm_framebuffer_free(&fb->base.refcount);
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 4a3f68a..e82b61e 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -773,24 +773,23 @@ EXPORT_SYMBOL(drm_mode_hsync);
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
- unsigned int calc_val;
if (mode->vrefresh > 0)
refresh = mode->vrefresh;
else if (mode->htotal > 0 && mode->vtotal > 0) {
- int vtotal;
- vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
- calc_val /= mode->htotal;
- refresh = (calc_val + vtotal / 2) / vtotal;
+ unsigned int num, den;
+
+ num = mode->clock * 1000;
+ den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
+ num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- refresh /= 2;
+ den *= 2;
if (mode->vscan > 1)
- refresh /= mode->vscan;
+ den *= mode->vscan;
+
+ refresh = DIV_ROUND_CLOSEST(num, den);
}
return refresh;
}
@@ -833,7 +832,7 @@ EXPORT_SYMBOL(drm_mode_get_hv_timing);
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
- if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ if (!p)
return;
p->crtc_clock = p->clock;
@@ -1023,19 +1022,18 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
-/**
- * drm_mode_validate_basic - make sure the mode is somewhat sane
- * @mode: mode to check
- *
- * Check that the mode timings are at least somewhat reasonable.
- * Any hardware specific limits are left up for each driver to check.
- *
- * Returns:
- * The mode status
- */
-enum drm_mode_status
+static enum drm_mode_status
drm_mode_validate_basic(const struct drm_display_mode *mode)
{
+ if (mode->type & ~DRM_MODE_TYPE_ALL)
+ return MODE_BAD;
+
+ if (mode->flags & ~DRM_MODE_FLAG_ALL)
+ return MODE_BAD;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+ return MODE_BAD;
+
if (mode->clock == 0)
return MODE_CLOCK_LOW;
@@ -1053,7 +1051,35 @@ drm_mode_validate_basic(const struct drm_display_mode *mode)
return MODE_OK;
}
-EXPORT_SYMBOL(drm_mode_validate_basic);
+
+/**
+ * drm_mode_validate_driver - make sure the mode is somewhat sane
+ * @dev: drm device
+ * @mode: mode to check
+ *
+ * First do basic validation on the mode, and then allow the driver
+ * to check for device/driver specific limitations via the optional
+ * &drm_mode_config_helper_funcs.mode_valid hook.
+ *
+ * Returns:
+ * The mode status
+ */
+enum drm_mode_status
+drm_mode_validate_driver(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ enum drm_mode_status status;
+
+ status = drm_mode_validate_basic(mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (dev->mode_config.funcs->mode_valid)
+ return dev->mode_config.funcs->mode_valid(dev, mode);
+ else
+ return MODE_OK;
+}
+EXPORT_SYMBOL(drm_mode_validate_driver);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1319,9 +1345,9 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
* modeline in fb_mode_option will be parsed instead.
*
* This uses the same parameters as the fb modedb.c, except for an extra
- * force-enable, force-enable-digital and force-disable bit at the end:
+ * force-enable, force-enable-digital and force-disable bit at the end::
*
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* The intermediate drm_cmdline_mode structure is required to store additional
* options from the command line modline like the force-enable/disable flag.
@@ -1555,6 +1581,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
/**
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
+ * @dev: drm device
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
@@ -1564,18 +1591,12 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_convert_umode(struct drm_display_mode *out,
+int drm_mode_convert_umode(struct drm_device *dev,
+ struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
- int ret = -EINVAL;
-
- if (in->clock > INT_MAX || in->vrefresh > INT_MAX) {
- ret = -ERANGE;
- goto out;
- }
-
- if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
- goto out;
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
out->clock = in->clock;
out->hdisplay = in->hdisplay;
@@ -1590,20 +1611,23 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
- out->type = in->type;
+ /*
+ * Old xf86-video-vmware (possibly others too) used to
+ * leave 'type' unititialized. Just ignore any bits we
+ * don't like. It's a just hint after all, and more
+ * useful for the kernel->userspace direction anyway.
+ */
+ out->type = in->type & DRM_MODE_TYPE_ALL;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
- out->status = drm_mode_validate_basic(out);
+ out->status = drm_mode_validate_driver(dev, out);
if (out->status != MODE_OK)
- goto out;
+ return -EINVAL;
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
- ret = 0;
-
-out:
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 9cb1eed..f1c24ab 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,9 @@
* OF THIS SOFTWARE.
*/
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_plane_helper.h>
@@ -156,3 +159,76 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
NULL);
}
EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_mode_config_helper_suspend - Modeset suspend helper
+ * @dev: DRM device
+ *
+ * This helper function takes care of suspending the modeset side. It disables
+ * output polling if initialized, suspends fbdev if used and finally calls
+ * drm_atomic_helper_suspend().
+ * If suspending fails, fbdev and polling is re-enabled.
+ *
+ * Returns:
+ * Zero on success, negative error code on error.
+ *
+ * See also:
+ * drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked().
+ */
+int drm_mode_config_helper_suspend(struct drm_device *dev)
+{
+ struct drm_atomic_state *state;
+
+ if (!dev)
+ return 0;
+
+ drm_kms_helper_poll_disable(dev);
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
+ state = drm_atomic_helper_suspend(dev);
+ if (IS_ERR(state)) {
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_kms_helper_poll_enable(dev);
+ return PTR_ERR(state);
+ }
+
+ dev->mode_config.suspend_state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_config_helper_suspend);
+
+/**
+ * drm_mode_config_helper_resume - Modeset resume helper
+ * @dev: DRM device
+ *
+ * This helper function takes care of resuming the modeset side. It calls
+ * drm_atomic_helper_resume(), resumes fbdev if used and enables output polling
+ * if initiaized.
+ *
+ * Returns:
+ * Zero on success, negative error code on error.
+ *
+ * See also:
+ * drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable().
+ */
+int drm_mode_config_helper_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (!dev)
+ return 0;
+
+ if (WARN_ON(!dev->mode_config.suspend_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state);
+ if (ret)
+ DRM_ERROR("Failed to resume (%d)\n", ret);
+ dev->mode_config.suspend_state = NULL;
+
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_kms_helper_poll_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_config_helper_resume);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 963e23d..8a51006 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -113,6 +113,7 @@ retry:
kfree(ctx);
return;
}
+ ww_acquire_done(&ctx->ww_ctx);
WARN_ON(config->acquire_ctx);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 4c191c0..1fe1224 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -122,12 +122,10 @@ int drm_of_component_probe(struct device *dev,
if (!port)
break;
- if (!of_device_is_available(port->parent)) {
- of_node_put(port);
- continue;
- }
+ if (of_device_is_available(port->parent))
+ drm_of_component_match_add(dev, &match, compare_of,
+ port);
- drm_of_component_match_add(dev, &match, compare_of, port);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
new file mode 100644
index 0000000..902cc1a
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * drm_panel_orientation_quirks.c -- Quirks for non-normal panel orientation
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Note the quirks in this file are shared with fbdev/efifb and as such
+ * must not depend on other drm code.
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_utils.h>
+
+#ifdef CONFIG_DMI
+
+/*
+ * Some x86 clamshell design devices use portrait tablet screens and a display
+ * engine which cannot rotate in hardware, so we need to rotate the fbcon to
+ * compensate. Unfortunately these (cheap) devices also typically have quite
+ * generic DMI data, so we match on a combination of DMI data, screen resolution
+ * and a list of known BIOS dates to avoid false positives.
+ */
+
+struct drm_dmi_panel_orientation_data {
+ int width;
+ int height;
+ const char * const *bios_dates;
+ int orientation;
+};
+
+static const struct drm_dmi_panel_orientation_data asus_t100ha = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
+static const struct drm_dmi_panel_orientation_data gpd_pocket = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "05/26/2017", "06/28/2017",
+ "07/05/2017", "08/07/2017", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+static const struct drm_dmi_panel_orientation_data gpd_win = {
+ .width = 720,
+ .height = 1280,
+ .bios_dates = (const char * const []){
+ "10/25/2016", "11/18/2016", "12/23/2016", "12/26/2016",
+ "02/21/2017", "03/20/2017", "05/25/2017", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
+ .width = 800,
+ .height = 1280,
+ .bios_dates = (const char * const []){ "10/16/2015", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+static const struct drm_dmi_panel_orientation_data vios_lth17 = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
+static const struct dmi_system_id orientation_data[] = {
+ { /* Asus T100HA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
+ },
+ .driver_data = (void *)&asus_t100ha,
+ }, { /*
+ * GPD Pocket, note that the the DMI data is less generic then
+ * it seems, devices with a board-vendor of "AMI Corporation"
+ * are quite rare, as are devices which have both board- *and*
+ * product-id set to "Default String"
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_pocket,
+ }, { /* GPD Win (same note on DMI match as GPD Pocket) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_win,
+ }, { /* I.T.Works TW891 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TW891"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
+ },
+ .driver_data = (void *)&itworks_tw891,
+ }, { /* VIOS LTH17 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
+ },
+ .driver_data = (void *)&vios_lth17,
+ },
+ {}
+};
+
+/**
+ * drm_get_panel_orientation_quirk - Check for panel orientation quirks
+ * @width: width in pixels of the panel
+ * @height: height in pixels of the panel
+ *
+ * This function checks for platform specific (e.g. DMI based) quirks
+ * providing info on panel_orientation for systems where this cannot be
+ * probed from the hard-/firm-ware. To avoid false-positive this function
+ * takes the panel resolution as argument and checks that against the
+ * resolution expected by the quirk-table entry.
+ *
+ * Note this function is also used outside of the drm-subsys, by for example
+ * the efifb code. Because of this this function gets compiled into its own
+ * kernel-module when built as a module.
+ *
+ * Returns:
+ * A DRM_MODE_PANEL_ORIENTATION_* value if there is a quirk for this system,
+ * or DRM_MODE_PANEL_ORIENTATION_UNKNOWN if there is no quirk.
+ */
+int drm_get_panel_orientation_quirk(int width, int height)
+{
+ const struct dmi_system_id *match;
+ const struct drm_dmi_panel_orientation_data *data;
+ const char *bios_date;
+ int i;
+
+ for (match = dmi_first_match(orientation_data);
+ match;
+ match = dmi_first_match(match + 1)) {
+ data = match->driver_data;
+
+ if (data->width != width ||
+ data->height != height)
+ continue;
+
+ if (!data->bios_dates)
+ return data->orientation;
+
+ bios_date = dmi_get_system_info(DMI_BIOS_DATE);
+ if (!bios_date)
+ continue;
+
+ for (i = 0; data->bios_dates[i]; i++) {
+ if (!strcmp(data->bios_dates[i], bios_date))
+ return data->orientation;
+ }
+ }
+
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+}
+EXPORT_SYMBOL(drm_get_panel_orientation_quirk);
+
+#else
+
+/* There are no quirks for non x86 devices yet */
+int drm_get_panel_orientation_quirk(int width, int height)
+{
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+}
+EXPORT_SYMBOL(drm_get_panel_orientation_quirk);
+
+#endif
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 2c90519..6d2a6e4 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -104,7 +104,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
if (IS_ERR(blob))
return -1;
- blob_data = (struct drm_format_modifier_blob *)blob->data;
+ blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
blob_data->count_formats = plane->format_count;
blob_data->formats_offset = sizeof(struct drm_format_modifier_blob);
@@ -173,6 +173,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned int format_modifier_count = 0;
int ret;
+ /* plane index is used with 32bit bitmasks */
+ if (WARN_ON(config->num_total_plane >= 32))
+ return -EINVAL;
+
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
@@ -545,16 +549,33 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
return 0;
}
-int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+int drm_plane_check_pixel_format(struct drm_plane *plane,
+ u32 format, u64 modifier)
{
unsigned int i;
for (i = 0; i < plane->format_count; i++) {
if (format == plane->format_types[i])
- return 0;
+ break;
+ }
+ if (i == plane->format_count)
+ return -EINVAL;
+
+ if (!plane->modifier_count)
+ return 0;
+
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
}
+ if (i == plane->modifier_count)
+ return -EINVAL;
- return -EINVAL;
+ if (plane->funcs->format_mod_supported &&
+ !plane->funcs->format_mod_supported(plane, format, modifier))
+ return -EINVAL;
+
+ return 0;
}
/*
@@ -598,12 +619,14 @@ static int __setplane_internal(struct drm_plane *plane,
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->format->format);
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+ drm_get_format_name(fb->format->format,
+ &format_name),
+ fb->modifier);
goto out;
}
@@ -944,7 +967,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if (r)
return r;
- current_vblank = drm_crtc_vblank_count(crtc);
+ current_vblank = (u32)drm_crtc_vblank_count(crtc);
switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 759ed93..f88f681 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -100,104 +100,12 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
}
/**
- * drm_plane_helper_check_state() - Check plane state for validity
- * @state: plane state to check
- * @clip: integer clipping coordinates
- * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
- * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
- * @can_position: is it legal to position the plane such that it
- * doesn't cover the entire crtc? This will generally
- * only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
- * is disabled?
- *
- * Checks that a desired plane update is valid, and updates various
- * bits of derived state (clipped coordinates etc.). Drivers that provide
- * their own plane handling rather than helper-provided implementations may
- * still wish to call this function to avoid duplication of error checking
- * code.
- *
- * RETURNS:
- * Zero if update appears valid, error code on failure
- */
-int drm_plane_helper_check_state(struct drm_plane_state *state,
- const struct drm_rect *clip,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled)
-{
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect *src = &state->src;
- struct drm_rect *dst = &state->dst;
- unsigned int rotation = state->rotation;
- int hscale, vscale;
-
- *src = drm_plane_state_src(state);
- *dst = drm_plane_state_dest(state);
-
- if (!fb) {
- state->visible = false;
- return 0;
- }
-
- /* crtc should only be NULL when disabling (i.e., !fb) */
- if (WARN_ON(!crtc)) {
- state->visible = false;
- return 0;
- }
-
- if (!crtc->enabled && !can_update_disabled) {
- DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
- return -EINVAL;
- }
-
- drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
-
- /* Check scaling */
- hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
- vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
- if (hscale < 0 || vscale < 0) {
- DRM_DEBUG_KMS("Invalid scaling of plane\n");
- drm_rect_debug_print("src: ", &state->src, true);
- drm_rect_debug_print("dst: ", &state->dst, false);
- return -ERANGE;
- }
-
- state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
-
- drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
-
- if (!state->visible)
- /*
- * Plane isn't visible; some drivers can handle this
- * so we just return success here. Drivers that can't
- * (including those that use the primary plane helper's
- * update function) will return an error from their
- * update_plane handler.
- */
- return 0;
-
- if (!can_position && !drm_rect_equals(dst, clip)) {
- DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
- drm_rect_debug_print("dst: ", dst, false);
- drm_rect_debug_print("clip: ", clip, false);
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_plane_helper_check_state);
-
-/**
* drm_plane_helper_check_update() - Check plane update for validity
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @src: source coordinates in 16.16 fixed point
* @dst: integer destination coordinates
- * @clip: integer clipping coordinates
* @rotation: plane rotation
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
@@ -222,7 +130,6 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_rect *src,
struct drm_rect *dst,
- const struct drm_rect *clip,
unsigned int rotation,
int min_scale,
int max_scale,
@@ -230,7 +137,7 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
bool can_update_disabled,
bool *visible)
{
- struct drm_plane_state state = {
+ struct drm_plane_state plane_state = {
.plane = plane,
.crtc = crtc,
.fb = fb,
@@ -245,18 +152,23 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
.rotation = rotation,
.visible = *visible,
};
+ struct drm_crtc_state crtc_state = {
+ .crtc = crtc,
+ .enable = crtc->enabled,
+ .mode = crtc->mode,
+ };
int ret;
- ret = drm_plane_helper_check_state(&state, clip,
- min_scale, max_scale,
- can_position,
- can_update_disabled);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ min_scale, max_scale,
+ can_position,
+ can_update_disabled);
if (ret)
return ret;
- *src = state.src;
- *dst = state.dst;
- *visible = state.visible;
+ *src = plane_state.src;
+ *dst = plane_state.dst;
+ *visible = plane_state.visible;
return 0;
}
@@ -326,16 +238,12 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
- const struct drm_rect clip = {
- .x2 = crtc->mode.hdisplay,
- .y2 = crtc->mode.vdisplay,
- };
struct drm_connector **connector_list;
int num_connectors, ret;
bool visible;
ret = drm_plane_helper_check_update(plane, crtc, fb,
- &src, &dest, &clip,
+ &src, &dest,
DRM_MODE_ROTATE_0,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 8de93a2..7856a9b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -73,6 +73,9 @@
* Drivers should detect this situation and return back the gem object
* from the dma-buf private. Prime will do this automatically for drivers that
* use the drm_gem_prime_{import,export} helpers.
+ *
+ * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
+ * drivers which implement GEM interface.
*/
struct drm_prime_member {
@@ -180,9 +183,20 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-static int drm_gem_map_attach(struct dma_buf *dma_buf,
- struct device *target_dev,
- struct dma_buf_attachment *attach)
+/**
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
+ * @dma_buf: buffer to attach device to
+ * @target_dev: not used
+ * @attach: buffer attachment data
+ *
+ * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
+ * device specific attachment. This can be used as the &dma_buf_ops.attach
+ * callback.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+ struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
@@ -200,33 +214,44 @@ static int drm_gem_map_attach(struct dma_buf *dma_buf,
return dev->driver->gem_prime_pin(obj);
}
+EXPORT_SYMBOL(drm_gem_map_attach);
-static void drm_gem_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+/**
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
+ * @dma_buf: buffer to detach from
+ * @attach: attachment to be detached
+ *
+ * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
+ * callback.
+ */
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
- struct sg_table *sgt;
- if (dev->driver->gem_prime_unpin)
- dev->driver->gem_prime_unpin(obj);
+ if (prime_attach) {
+ struct sg_table *sgt = prime_attach->sgt;
- if (!prime_attach)
- return;
-
- sgt = prime_attach->sgt;
- if (sgt) {
- if (prime_attach->dir != DMA_NONE)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
- prime_attach->dir);
- sg_free_table(sgt);
+ if (sgt) {
+ if (prime_attach->dir != DMA_NONE)
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+ sgt->nents,
+ prime_attach->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(prime_attach);
+ attach->priv = NULL;
}
- kfree(sgt);
- kfree(prime_attach);
- attach->priv = NULL;
+ if (dev->driver->gem_prime_unpin)
+ dev->driver->gem_prime_unpin(obj);
}
+EXPORT_SYMBOL(drm_gem_map_detach);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
@@ -253,8 +278,20 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
}
-static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+/**
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
+ * @attach: attachment whose scatterlist is to be returned
+ * @dir: direction of DMA transfer
+ *
+ * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
+ * can be used as the &dma_buf_ops.map_dma_buf callback.
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ */
+
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
@@ -277,7 +314,8 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
- if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
@@ -289,13 +327,21 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
return sgt;
}
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
-static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
+/**
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ *
+ * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
+ * used as the &dma_buf_ops.unmap_dma_buf callback.
+ */
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
/* nothing to be done here */
}
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_export - dma_buf export implementation for GEM
@@ -346,47 +392,99 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+/**
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ *
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
+ * callback.
+ *
+ * Returns the kernel virtual address.
+ */
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
return dev->driver->gem_prime_vmap(obj);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
-static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+/**
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @vaddr: the virtual address of the buffer
+ *
+ * Releases a kernel virtual mapping. This can be used as the
+ * &dma_buf_ops.vunmap callback.
+ */
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
dev->driver->gem_prime_vunmap(obj, vaddr);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
-static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num)
+/**
+ * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
+ */
+void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
{
return NULL;
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+/**
+ * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
+ */
+void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
{
}
-static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
- unsigned long page_num)
+EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
+
+/**
+ * drm_gem_dmabuf_kmap - map implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.map callback.
+ */
+void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
-static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr)
+/**
+ * drm_gem_dmabuf_kunmap - unmap implementation for GEM
+ *
+ * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
+ */
+void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
+ void *addr)
{
}
+EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
-static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
- struct vm_area_struct *vma)
+/**
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @vma: virtual address range
+ *
+ * Provides memory mapping for the buffer. This can be used as the
+ * &dma_buf_ops.mmap callback.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->dev;
@@ -396,6 +494,7 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
return dev->driver->gem_prime_mmap(obj, vma);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.attach = drm_gem_map_attach,
@@ -823,40 +922,40 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
/**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
* @sgt: scatter-gather table to convert
- * @pages: array of page pointers to store the page array in
+ * @pages: optional array of page pointers to store the page array in
* @addrs: optional array to store the dma bus address of each page
- * @max_pages: size of both the passed-in arrays
+ * @max_entries: size of both the passed-in arrays
*
* Exports an sg table into an array of pages and addresses. This is currently
* required by the TTM driver in order to do correct fault handling.
*/
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages)
+ dma_addr_t *addrs, int max_entries)
{
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len;
- int pg_index;
+ u32 len, index;
dma_addr_t addr;
- pg_index = 0;
+ index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
page = sg_page(sg);
addr = sg_dma_address(sg);
while (len > 0) {
- if (WARN_ON(pg_index >= max_pages))
+ if (WARN_ON(index >= max_entries))
return -1;
- pages[pg_index] = page;
+ if (pages)
+ pages[index] = page;
if (addrs)
- addrs[pg_index] = addr;
+ addrs[index] = addr;
page++;
addr += PAGE_SIZE;
len -= PAGE_SIZE;
- pg_index++;
+ index++;
}
}
return 0;
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 74c466a..b25f98f 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,6 +23,8 @@
* Rob Clark <robdclark@gmail.com>
*/
+#define DEBUG /* for pr_debug() */
+
#include <stdarg.h>
#include <linux/seq_file.h>
#include <drm/drmP.h>
@@ -53,13 +55,90 @@ EXPORT_SYMBOL(__drm_printfn_debug);
*/
void drm_printf(struct drm_printer *p, const char *f, ...)
{
- struct va_format vaf;
va_list args;
va_start(args, f);
- vaf.fmt = f;
- vaf.va = &args;
- p->printfn(p, &vaf);
+ drm_vprintf(p, f, &args);
va_end(args);
}
EXPORT_SYMBOL(drm_printf);
+
+void drm_dev_printk(const struct device *dev, const char *level,
+ const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (dev)
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+ else
+ printk("%s" "[" DRM_NAME ":%ps] %pV",
+ level, __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_dev_printk);
+
+void drm_dev_dbg(const struct device *dev, unsigned int category,
+ const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!(drm_debug & category))
+ return;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (dev)
+ dev_printk(KERN_DEBUG, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+ else
+ printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_dev_dbg);
+
+void drm_dbg(unsigned int category, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!(drm_debug & category))
+ return;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_dbg);
+
+void drm_err(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_err);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6dc2dde..52774339 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -216,8 +216,7 @@ enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
* suspend/resume.
*
* Drivers can call this helper from their device resume implementation. It is
- * an error to call this when the output polling support has not yet been set
- * up.
+ * not an error to call this even when output polling isn't enabled.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
@@ -500,7 +499,7 @@ retry:
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
- mode->status = drm_mode_validate_basic(mode);
+ mode->status = drm_mode_validate_driver(dev, mode);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
@@ -655,6 +654,26 @@ out:
}
/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
+/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index bae50e6..8f4672d 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -50,11 +50,27 @@
* IOCTL and in the get/set property IOCTL.
*/
-static bool drm_property_type_valid(struct drm_property *property)
+static bool drm_property_flags_valid(u32 flags)
{
- if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
- return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
- return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ u32 legacy_type = flags & DRM_MODE_PROP_LEGACY_TYPE;
+ u32 ext_type = flags & DRM_MODE_PROP_EXTENDED_TYPE;
+
+ /* Reject undefined/deprecated flags */
+ if (flags & ~(DRM_MODE_PROP_LEGACY_TYPE |
+ DRM_MODE_PROP_EXTENDED_TYPE |
+ DRM_MODE_PROP_IMMUTABLE |
+ DRM_MODE_PROP_ATOMIC))
+ return false;
+
+ /* We want either a legacy type or an extended type, but not both */
+ if (!legacy_type == !ext_type)
+ return false;
+
+ /* Only one legacy type at a time please */
+ if (legacy_type && !is_power_of_2(legacy_type))
+ return false;
+
+ return true;
}
/**
@@ -72,12 +88,19 @@ static bool drm_property_type_valid(struct drm_property *property)
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
+struct drm_property *drm_property_create(struct drm_device *dev,
+ u32 flags, const char *name,
+ int num_values)
{
struct drm_property *property = NULL;
int ret;
+ if (WARN_ON(!drm_property_flags_valid(flags)))
+ return NULL;
+
+ if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
+ return NULL;
+
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
return NULL;
@@ -99,15 +122,11 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_list);
- if (name) {
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
- property->name[DRM_PROP_NAME_LEN-1] = '\0';
- }
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
list_add_tail(&property->head, &dev->mode_config.property_list);
- WARN_ON(!drm_property_type_valid(property));
-
return property;
fail:
kfree(property->values);
@@ -135,10 +154,10 @@ EXPORT_SYMBOL(drm_property_create);
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
- const struct drm_prop_enum_list *props,
- int num_values)
+struct drm_property *drm_property_create_enum(struct drm_device *dev,
+ u32 flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
{
struct drm_property *property;
int i, ret;
@@ -184,10 +203,10 @@ EXPORT_SYMBOL(drm_property_create_enum);
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
- const struct drm_prop_enum_list *props,
- int num_props,
- uint64_t supported_bits)
+ u32 flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_props,
+ uint64_t supported_bits)
{
struct drm_property *property;
int i, ret, index = 0;
@@ -221,8 +240,8 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
EXPORT_SYMBOL(drm_property_create_bitmask);
static struct drm_property *property_create_range(struct drm_device *dev,
- int flags, const char *name,
- uint64_t min, uint64_t max)
+ u32 flags, const char *name,
+ uint64_t min, uint64_t max)
{
struct drm_property *property;
@@ -255,9 +274,9 @@ static struct drm_property *property_create_range(struct drm_device *dev,
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
- uint64_t min, uint64_t max)
+struct drm_property *drm_property_create_range(struct drm_device *dev,
+ u32 flags, const char *name,
+ uint64_t min, uint64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
name, min, max);
@@ -284,8 +303,8 @@ EXPORT_SYMBOL(drm_property_create_range);
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
- int flags, const char *name,
- int64_t min, int64_t max)
+ u32 flags, const char *name,
+ int64_t min, int64_t max)
{
return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
name, I642U64(min), I642U64(max));
@@ -311,7 +330,7 @@ EXPORT_SYMBOL(drm_property_create_signed_range);
* A pointer to the newly created property on success, NULL on failure.
*/
struct drm_property *drm_property_create_object(struct drm_device *dev,
- int flags, const char *name,
+ u32 flags, const char *name,
uint32_t type)
{
struct drm_property *property;
@@ -347,8 +366,8 @@ EXPORT_SYMBOL(drm_property_create_object);
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
- const char *name)
+struct drm_property *drm_property_create_bool(struct drm_device *dev,
+ u32 flags, const char *name)
{
return drm_property_create_range(dev, flags, name, 0, 1);
}
@@ -374,26 +393,24 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
- if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+ if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN))
+ return -EINVAL;
+
+ if (WARN_ON(!drm_property_type_is(property, DRM_MODE_PROP_ENUM) &&
+ !drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
- if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
- (value > 63))
+ if (WARN_ON(drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+ value > 63))
return -EINVAL;
- if (!list_empty(&property->enum_list)) {
- list_for_each_entry(prop_enum, &property->enum_list, head) {
- if (prop_enum->value == value) {
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- return 0;
- }
- }
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
+ if (WARN_ON(prop_enum->value == value))
+ return -EINVAL;
}
prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
@@ -550,6 +567,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
/* This must be explicitly initialised, so we can safely call list_del
* on it in the removal handler, even if it isn't in a file list. */
INIT_LIST_HEAD(&blob->head_file);
+ blob->data = (void *)blob + sizeof(*blob);
blob->length = length;
blob->dev = dev;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index dc9fd10..987a353 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -34,6 +34,20 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static enum drm_mode_status
+drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->mode_valid)
+ /* Anything goes */
+ return MODE_OK;
+
+ return pipe->funcs->mode_valid(crtc, mode);
+}
+
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -72,11 +86,34 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+ .mode_valid = drm_simple_kms_crtc_mode_valid,
.atomic_check = drm_simple_kms_crtc_check,
.atomic_enable = drm_simple_kms_crtc_enable,
.atomic_disable = drm_simple_kms_crtc_disable,
};
+static int drm_simple_kms_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->enable_vblank)
+ return 0;
+
+ return pipe->funcs->enable_vblank(pipe);
+}
+
+static void drm_simple_kms_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->disable_vblank)
+ return;
+
+ pipe->funcs->disable_vblank(pipe);
+}
+
static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
@@ -84,12 +121,13 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = drm_simple_kms_crtc_enable_vblank,
+ .disable_vblank = drm_simple_kms_crtc_disable_vblank,
};
static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
- struct drm_rect clip = { 0 };
struct drm_simple_display_pipe *pipe;
struct drm_crtc_state *crtc_state;
int ret;
@@ -97,21 +135,16 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
&pipe->crtc);
- if (!crtc_state->enable)
- return 0; /* nothing to check when disabling or disabled */
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
-
- ret = drm_plane_helper_check_state(plane_state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
if (ret)
return ret;
if (!plane_state->visible)
- return -EINVAL;
+ return 0;
if (!pipe->funcs || !pipe->funcs->check)
return 0;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index cb4d09c..d4f4ce4 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -29,9 +29,9 @@
/**
* DOC: Overview
*
- * DRM synchronisation objects (syncobj) are a persistent objects,
- * that contain an optional fence. The fence can be updated with a new
- * fence, or be NULL.
+ * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
+ * persistent objects that contain an optional fence. The fence can be updated
+ * with a new fence, or be NULL.
*
* syncobj's can be waited upon, where it will wait for the underlying
* fence.
@@ -61,7 +61,8 @@
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
*
- * Returns a reference to the syncobj pointed to by handle or NULL.
+ * Returns a reference to the syncobj pointed to by handle or NULL. The
+ * reference must be released by calling drm_syncobj_put().
*/
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle)
@@ -106,7 +107,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
* callback when a fence has already been set.
*/
if (syncobj->fence) {
- *fence = dma_fence_get(syncobj->fence);
+ *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock)));
ret = 1;
} else {
*fence = NULL;
@@ -168,8 +170,9 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
spin_lock(&syncobj->lock);
- old_fence = syncobj->fence;
- syncobj->fence = fence;
+ old_fence = rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock));
+ rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
@@ -227,6 +230,19 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
return 0;
}
+/**
+ * drm_syncobj_find_fence - lookup and reference the fence in a sync object
+ * @file_private: drm file private pointer
+ * @handle: sync object handle to lookup.
+ * @fence: out parameter for the fence
+ *
+ * This is just a convenience function that combines drm_syncobj_find() and
+ * drm_syncobj_fence_get().
+ *
+ * Returns 0 on success or a negative error value on failure. On success @fence
+ * contains a reference to the fence, which must be released by calling
+ * dma_fence_put().
+ */
int drm_syncobj_find_fence(struct drm_file *file_private,
u32 handle,
struct dma_fence **fence)
@@ -267,6 +283,12 @@ EXPORT_SYMBOL(drm_syncobj_free);
* @out_syncobj: returned syncobj
* @flags: DRM_SYNCOBJ_* flags
* @fence: if non-NULL, the syncobj will represent this fence
+ *
+ * This is the first function to create a sync object. After creating, drivers
+ * probably want to make it available to userspace, either through
+ * drm_syncobj_get_handle() or drm_syncobj_get_fd().
+ *
+ * Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
@@ -300,6 +322,14 @@ EXPORT_SYMBOL(drm_syncobj_create);
/**
* drm_syncobj_get_handle - get a handle from a syncobj
+ * @file_private: drm file private pointer
+ * @syncobj: Sync object to export
+ * @handle: out parameter with the new handle
+ *
+ * Exports a sync object created with drm_syncobj_create() as a handle on
+ * @file_private to userspace.
+ *
+ * Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_get_handle(struct drm_file *file_private,
struct drm_syncobj *syncobj, u32 *handle)
@@ -369,6 +399,15 @@ static const struct file_operations drm_syncobj_file_fops = {
.release = drm_syncobj_file_release,
};
+/**
+ * drm_syncobj_get_fd - get a file descriptor from a syncobj
+ * @syncobj: Sync object to export
+ * @p_fd: out parameter with the new file descriptor
+ *
+ * Exports a sync object created with drm_syncobj_create() as a file descriptor.
+ *
+ * Returns 0 on success or a negative error value on failure.
+ */
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
struct file *file;
@@ -507,7 +546,7 @@ err_put_fd:
void
drm_syncobj_open(struct drm_file *file_private)
{
- idr_init(&file_private->syncobj_idr);
+ idr_init_base(&file_private->syncobj_idr, 1);
spin_lock_init(&file_private->syncobj_table_lock);
}
@@ -640,7 +679,8 @@ static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
container_of(cb, struct syncobj_wait_entry, syncobj_cb);
/* This happens inside the syncobj lock */
- wait->fence = dma_fence_get(syncobj->fence);
+ wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock)));
wake_up_process(wait->task);
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 3717b3df..28cdcf7 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -120,6 +120,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ if (WARN_ON(!crtc))
+ return 0;
+
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
}
@@ -271,7 +274,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
-static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -292,11 +295,11 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
* This is mostly useful for hardware that can obtain the scanout position, but
* doesn't have a hardware frame counter.
*/
-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
+u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
- u32 vblank;
+ u64 vblank;
unsigned long flags;
WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
@@ -318,6 +321,9 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ if (WARN_ON(!crtc))
+ return;
+
if (crtc->funcs->disable_vblank) {
crtc->funcs->disable_vblank(crtc);
return;
@@ -347,23 +353,25 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
- * Only disable vblank interrupts if they're enabled. This avoids
- * calling the ->disable_vblank() operation in atomic context with the
- * hardware potentially runtime suspended.
+ * Update vblank count and disable vblank interrupts only if the
+ * interrupts were enabled. This avoids calling the ->disable_vblank()
+ * operation in atomic context with the hardware potentially runtime
+ * suspended.
*/
- if (vblank->enabled) {
- __disable_vblank(dev, pipe);
- vblank->enabled = false;
- }
+ if (!vblank->enabled)
+ goto out;
/*
- * Always update the count and timestamp to maintain the
+ * Update the count and timestamp to maintain the
* appearance that the counter has been ticking all along until
* this time. This makes the count account for the entire time
* between drm_crtc_vblank_on() and drm_crtc_vblank_off().
*/
drm_update_vblank_count(dev, pipe, false);
+ __disable_vblank(dev, pipe);
+ vblank->enabled = false;
+out:
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
@@ -663,14 +671,16 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
- /* save this only for debugging purposes */
- ts_etime = ktime_to_timespec64(etime);
- ts_vblank_time = ktime_to_timespec64(*vblank_time);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
- *vblank_time = etime;
+ *vblank_time = ktime_sub_ns(etime, delta_ns);
+
+ if ((drm_debug & DRM_UT_VBL) == 0)
+ return true;
+
+ ts_etime = ktime_to_timespec64(etime);
+ ts_vblank_time = ktime_to_timespec64(*vblank_time);
DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
pipe, hpos, vpos,
@@ -916,6 +926,9 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ if (WARN_ON(!crtc))
+ return 0;
+
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
}
@@ -1053,7 +1066,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int ret;
- u32 last;
+ u64 last;
if (WARN_ON(pipe >= dev->num_crtcs))
return;
@@ -1233,6 +1246,71 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
+/**
+ * drm_vblank_restore - estimate missed vblanks and update vblank count.
+ * @dev: DRM device
+ * @pipe: CRTC index
+ *
+ * Power manamement features can cause frame counter resets between vblank
+ * disable and enable. Drivers can use this function in their
+ * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
+ * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
+ * vblank counter.
+ *
+ * This function is the legacy version of drm_crtc_vblank_restore().
+ */
+void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
+{
+ ktime_t t_vblank;
+ struct drm_vblank_crtc *vblank;
+ int framedur_ns;
+ u64 diff_ns;
+ u32 cur_vblank, diff = 1;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ assert_spin_locked(&dev->vbl_lock);
+ assert_spin_locked(&dev->vblank_time_lock);
+
+ vblank = &dev->vblank[pipe];
+ WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
+ "Cannot compute missed vblanks without frame duration\n");
+ framedur_ns = vblank->framedur_ns;
+
+ do {
+ cur_vblank = __get_vblank_counter(dev, pipe);
+ drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
+ } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
+
+ diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
+ if (framedur_ns)
+ diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
+
+
+ DRM_DEBUG_VBL("missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
+ diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
+ store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
+}
+EXPORT_SYMBOL(drm_vblank_restore);
+
+/**
+ * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
+ * @crtc: CRTC in question
+ *
+ * Power manamement features can cause frame counter resets between vblank
+ * disable and enable. Drivers can use this function in their
+ * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
+ * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
+ * vblank counter.
+ */
+void drm_crtc_vblank_restore(struct drm_crtc *crtc)
+{
+ drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_restore);
+
static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
unsigned int pipe)
{
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 28f1226..23c749c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -203,21 +203,16 @@ EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node, unsigned long pages)
{
- int ret;
+ int ret = 0;
write_lock(&mgr->vm_lock);
- if (drm_mm_node_allocated(&node->vm_node)) {
- ret = 0;
- goto out_unlock;
- }
+ if (!drm_mm_node_allocated(&node->vm_node))
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
+ &node->vm_node, pages);
- ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
- if (ret)
- goto out_unlock;
-
-out_unlock:
write_unlock(&mgr->vm_lock);
+
return ret;
}
EXPORT_SYMBOL(drm_vma_offset_add);
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index a29b8f5..e5bfeca 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -6,13 +6,23 @@ config DRM_ETNAVIV
depends on MMU
select SHMEM
select SYNC_FILE
+ select THERMAL if DRM_ETNAVIV_THERMAL
select TMPFS
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select DRM_SCHED
help
DRM driver for Vivante GPUs.
+config DRM_ETNAVIV_THERMAL
+ bool "enable ETNAVIV thermal throttling"
+ depends on DRM_ETNAVIV
+ default y
+ help
+ Compile in support for thermal throttling.
+ Say Y unless you want to risk burning your SoC.
+
config DRM_ETNAVIV_REGISTER_LOGGING
bool "enable ETNAVIV register logging"
depends on DRM_ETNAVIV
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 1281c8d..46e5ffa 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -9,9 +9,11 @@ etnaviv-y := \
etnaviv_gem_submit.o \
etnaviv_gem.o \
etnaviv_gpu.o \
+ etnaviv_hwdb.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
etnaviv_mmu.o \
- etnaviv_perfmon.o
+ etnaviv_perfmon.o \
+ etnaviv_sched.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
index 207f45c..001faea 100644
--- a/drivers/gpu/drm/etnaviv/common.xml.h
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -8,15 +8,12 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 19930 bytes, from 2017-03-09 15:43:43)
-- common.xml ( 23473 bytes, from 2017-03-09 15:43:43)
-- state_hi.xml ( 26403 bytes, from 2017-03-09 15:43:43)
-- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
-- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
-- state_3d.xml ( 66957 bytes, from 2017-03-09 15:43:43)
-- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+- texdesc_3d.xml ( 3183 bytes, from 2017-12-18 16:51:59)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
-Copyright (C) 2012-2017 by the following authors:
+Copyright (C) 2012-2018 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -49,12 +46,7 @@ DEALINGS IN THE SOFTWARE.
#define SYNC_RECIPIENT_RA 0x00000005
#define SYNC_RECIPIENT_PE 0x00000007
#define SYNC_RECIPIENT_DE 0x0000000b
-#define SYNC_RECIPIENT_VG 0x0000000f
-#define SYNC_RECIPIENT_TESSELATOR 0x00000010
-#define SYNC_RECIPIENT_VG2 0x00000011
-#define SYNC_RECIPIENT_TESSELATOR2 0x00000012
-#define SYNC_RECIPIENT_VG3 0x00000013
-#define SYNC_RECIPIENT_TESSELATOR3 0x00000014
+#define SYNC_RECIPIENT_BLT 0x00000010
#define ENDIAN_MODE_NO_SWAP 0x00000000
#define ENDIAN_MODE_SWAP_16 0x00000001
#define ENDIAN_MODE_SWAP_32 0x00000002
@@ -77,6 +69,7 @@ DEALINGS IN THE SOFTWARE.
#define chipModel_GC800 0x00000800
#define chipModel_GC860 0x00000860
#define chipModel_GC880 0x00000880
+#define chipModel_GC900 0x00000900
#define chipModel_GC1000 0x00001000
#define chipModel_GC1500 0x00001500
#define chipModel_GC2000 0x00002000
@@ -88,6 +81,12 @@ DEALINGS IN THE SOFTWARE.
#define chipModel_GC5000 0x00005000
#define chipModel_GC5200 0x00005200
#define chipModel_GC6400 0x00006400
+#define chipModel_GC7000 0x00007000
+#define chipModel_GC7400 0x00007400
+#define chipModel_GC8000 0x00008000
+#define chipModel_GC8100 0x00008100
+#define chipModel_GC8200 0x00008200
+#define chipModel_GC8400 0x00008400
#define RGBA_BITS_R 0x00000001
#define RGBA_BITS_G 0x00000002
#define RGBA_BITS_B 0x00000004
@@ -203,7 +202,7 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures2_RGB888 0x00001000
#define chipMinorFeatures2_TX__YUV_ASSEMBLER 0x00002000
#define chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING 0x00004000
-#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
+#define chipMinorFeatures2_TX_FILTER 0x00008000
#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
#define chipMinorFeatures2_2D_TILING 0x00020000
#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
@@ -242,36 +241,36 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures3_TX_ENHANCEMENTS1 0x00080000
#define chipMinorFeatures3_SH_ENHANCEMENTS1 0x00100000
#define chipMinorFeatures3_SH_ENHANCEMENTS2 0x00200000
-#define chipMinorFeatures3_UNK22 0x00400000
+#define chipMinorFeatures3_PE_ENHANCEMENTS1 0x00400000
#define chipMinorFeatures3_2D_FC_SOURCE 0x00800000
-#define chipMinorFeatures3_UNK24 0x01000000
-#define chipMinorFeatures3_UNK25 0x02000000
+#define chipMinorFeatures3_BUG_FIXES_14 0x01000000
+#define chipMinorFeatures3_POWER_OPTIMIZATIONS_0 0x02000000
#define chipMinorFeatures3_NEW_HZ 0x04000000
-#define chipMinorFeatures3_UNK27 0x08000000
-#define chipMinorFeatures3_UNK28 0x10000000
+#define chipMinorFeatures3_PE_DITHER_FIX 0x08000000
+#define chipMinorFeatures3_DE_ENHANCEMENTS3 0x10000000
#define chipMinorFeatures3_SH_ENHANCEMENTS3 0x20000000
-#define chipMinorFeatures3_UNK30 0x40000000
-#define chipMinorFeatures3_UNK31 0x80000000
-#define chipMinorFeatures4_UNK0 0x00000001
+#define chipMinorFeatures3_SH_ENHANCEMENTS4 0x40000000
+#define chipMinorFeatures3_TX_ENHANCEMENTS2 0x80000000
+#define chipMinorFeatures4_FE_ENHANCEMENTS1 0x00000001
#define chipMinorFeatures4_PE_ENHANCEMENTS2 0x00000002
#define chipMinorFeatures4_FRUSTUM_CLIP_FIX 0x00000004
-#define chipMinorFeatures4_UNK3 0x00000008
-#define chipMinorFeatures4_UNK4 0x00000010
+#define chipMinorFeatures4_DE_NO_GAMMA 0x00000008
+#define chipMinorFeatures4_PA_ENHANCEMENTS_2 0x00000010
#define chipMinorFeatures4_2D_GAMMA 0x00000020
#define chipMinorFeatures4_SINGLE_BUFFER 0x00000040
-#define chipMinorFeatures4_UNK7 0x00000080
-#define chipMinorFeatures4_UNK8 0x00000100
-#define chipMinorFeatures4_UNK9 0x00000200
-#define chipMinorFeatures4_UNK10 0x00000400
+#define chipMinorFeatures4_HI_ENHANCEMENTS_1 0x00000080
+#define chipMinorFeatures4_TX_ENHANCEMENTS_3 0x00000100
+#define chipMinorFeatures4_SH_ENHANCEMENTS_5 0x00000200
+#define chipMinorFeatures4_FE_ENHANCEMENTS_2 0x00000400
#define chipMinorFeatures4_TX_LERP_PRECISION_FIX 0x00000800
#define chipMinorFeatures4_2D_COLOR_SPACE_CONVERSION 0x00001000
#define chipMinorFeatures4_TEXTURE_ASTC 0x00002000
-#define chipMinorFeatures4_UNK14 0x00004000
-#define chipMinorFeatures4_UNK15 0x00008000
+#define chipMinorFeatures4_PE_ENHANCEMENTS_4 0x00004000
+#define chipMinorFeatures4_MC_ENHANCEMENTS_1 0x00008000
#define chipMinorFeatures4_HALTI2 0x00010000
-#define chipMinorFeatures4_UNK17 0x00020000
+#define chipMinorFeatures4_2D_MIRROR_EXTENSION 0x00020000
#define chipMinorFeatures4_SMALL_MSAA 0x00040000
-#define chipMinorFeatures4_UNK19 0x00080000
+#define chipMinorFeatures4_BUG_FIXES_17 0x00080000
#define chipMinorFeatures4_NEW_RA 0x00100000
#define chipMinorFeatures4_2D_OPF_YUV_OUTPUT 0x00200000
#define chipMinorFeatures4_2D_MULTI_SOURCE_BLT_EX2 0x00400000
@@ -280,41 +279,207 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures4_BUG_FIXES18 0x02000000
#define chipMinorFeatures4_2D_COMPRESSION 0x04000000
#define chipMinorFeatures4_PROBE 0x08000000
-#define chipMinorFeatures4_UNK28 0x10000000
+#define chipMinorFeatures4_MEDIUM_PRECISION 0x10000000
#define chipMinorFeatures4_2D_SUPER_TILE_VERSION 0x20000000
-#define chipMinorFeatures4_UNK30 0x40000000
-#define chipMinorFeatures4_UNK31 0x80000000
-#define chipMinorFeatures5_UNK0 0x00000001
-#define chipMinorFeatures5_UNK1 0x00000002
-#define chipMinorFeatures5_UNK2 0x00000004
-#define chipMinorFeatures5_UNK3 0x00000008
+#define chipMinorFeatures4_BUG_FIXES19 0x40000000
+#define chipMinorFeatures4_SH_ENHANCEMENTS6 0x80000000
+#define chipMinorFeatures5_SH_ENHANCEMENTS7 0x00000001
+#define chipMinorFeatures5_BUG_FIXES20 0x00000002
+#define chipMinorFeatures5_DE_ADDRESS_40 0x00000004
+#define chipMinorFeatures5_MINI_MMU_FIX 0x00000008
#define chipMinorFeatures5_EEZ 0x00000010
-#define chipMinorFeatures5_UNK5 0x00000020
-#define chipMinorFeatures5_UNK6 0x00000040
-#define chipMinorFeatures5_UNK7 0x00000080
-#define chipMinorFeatures5_UNK8 0x00000100
+#define chipMinorFeatures5_BUG_FIXES21 0x00000020
+#define chipMinorFeatures5_EXTRA_VG_CAPS 0x00000040
+#define chipMinorFeatures5_MULTI_SRC_V15 0x00000080
+#define chipMinorFeatures5_BUG_FIXES22 0x00000100
#define chipMinorFeatures5_HALTI3 0x00000200
-#define chipMinorFeatures5_UNK10 0x00000400
+#define chipMinorFeatures5_TESSELATION_SHADERS 0x00000400
#define chipMinorFeatures5_2D_ONE_PASS_FILTER_TAP 0x00000800
-#define chipMinorFeatures5_UNK12 0x00001000
+#define chipMinorFeatures5_MULTI_SRC_V2_STR_QUAD 0x00001000
#define chipMinorFeatures5_SEPARATE_SRC_DST 0x00002000
#define chipMinorFeatures5_HALTI4 0x00004000
-#define chipMinorFeatures5_UNK15 0x00008000
+#define chipMinorFeatures5_RA_WRITE_DEPTH 0x00008000
#define chipMinorFeatures5_ANDROID_ONLY 0x00010000
#define chipMinorFeatures5_HAS_PRODUCTID 0x00020000
-#define chipMinorFeatures5_UNK18 0x00040000
-#define chipMinorFeatures5_UNK19 0x00080000
+#define chipMinorFeatures5_TX_SUPPORT_DEC 0x00040000
+#define chipMinorFeatures5_S8_MSAA_COMPRESSION 0x00080000
#define chipMinorFeatures5_PE_DITHER_FIX2 0x00100000
-#define chipMinorFeatures5_UNK21 0x00200000
-#define chipMinorFeatures5_UNK22 0x00400000
-#define chipMinorFeatures5_UNK23 0x00800000
-#define chipMinorFeatures5_UNK24 0x01000000
-#define chipMinorFeatures5_UNK25 0x02000000
-#define chipMinorFeatures5_UNK26 0x04000000
+#define chipMinorFeatures5_L2_CACHE_REMOVE 0x00200000
+#define chipMinorFeatures5_FE_ALLOW_RND_VTX_CNT 0x00400000
+#define chipMinorFeatures5_CUBE_MAP_FL28 0x00800000
+#define chipMinorFeatures5_TX_6BIT_FRAC 0x01000000
+#define chipMinorFeatures5_FE_ALLOW_STALL_PREFETCH_ENG 0x02000000
+#define chipMinorFeatures5_THIRD_PARTY_COMPRESSION 0x04000000
#define chipMinorFeatures5_RS_DEPTHSTENCIL_NATIVE_SUPPORT 0x08000000
#define chipMinorFeatures5_V2_MSAA_COMP_FIX 0x10000000
-#define chipMinorFeatures5_UNK29 0x20000000
-#define chipMinorFeatures5_UNK30 0x40000000
-#define chipMinorFeatures5_UNK31 0x80000000
+#define chipMinorFeatures5_HALTI5 0x20000000
+#define chipMinorFeatures5_EVIS 0x40000000
+#define chipMinorFeatures5_BLT_ENGINE 0x80000000
+#define chipMinorFeatures6_BUG_FIXES_23 0x00000001
+#define chipMinorFeatures6_BUG_FIXES_24 0x00000002
+#define chipMinorFeatures6_DEC 0x00000004
+#define chipMinorFeatures6_VS_TILE_NV12 0x00000008
+#define chipMinorFeatures6_VS_TILE_NV12_10BIT 0x00000010
+#define chipMinorFeatures6_RENDER_TARGET_8 0x00000020
+#define chipMinorFeatures6_TEX_LOD_FLOW_CORR 0x00000040
+#define chipMinorFeatures6_FACE_LOD 0x00000080
+#define chipMinorFeatures6_MULTI_CORE_SEMAPHORE_STALL_V2 0x00000100
+#define chipMinorFeatures6_VMSAA 0x00000200
+#define chipMinorFeatures6_CHIP_ENABLE_LINK 0x00000400
+#define chipMinorFeatures6_MULTI_SRC_BLT_1_5_ENHANCEMENT 0x00000800
+#define chipMinorFeatures6_MULTI_SRC_BLT_BILINEAR_FILTER 0x00001000
+#define chipMinorFeatures6_RA_HZEZ_CLOCK_CONTROL 0x00002000
+#define chipMinorFeatures6_CACHE128B256BPERLINE 0x00004000
+#define chipMinorFeatures6_V4_COMPRESSION 0x00008000
+#define chipMinorFeatures6_PE2D_MAJOR_SUPER_TILE 0x00010000
+#define chipMinorFeatures6_PE_32BPC_COLORMASK_FIX 0x00020000
+#define chipMinorFeatures6_ALPHA_BLENDING_OPT 0x00040000
+#define chipMinorFeatures6_NEW_GPIPE 0x00080000
+#define chipMinorFeatures6_PIPELINE_32_ATTRIBUTES 0x00100000
+#define chipMinorFeatures6_MSAA_SHADING 0x00200000
+#define chipMinorFeatures6_NO_ANISTRO_FILTER 0x00400000
+#define chipMinorFeatures6_NO_ASTC 0x00800000
+#define chipMinorFeatures6_NO_DXT 0x01000000
+#define chipMinorFeatures6_HWTFB 0x02000000
+#define chipMinorFeatures6_RA_DEPTH_WRITE_MSAA1X_FIX 0x04000000
+#define chipMinorFeatures6_EZHZ_CLOCKGATE_FIX 0x08000000
+#define chipMinorFeatures6_SH_SNAP2PAGE_FIX 0x10000000
+#define chipMinorFeatures6_SH_HALFDEPENDENCY_FIX 0x20000000
+#define chipMinorFeatures6_USC_MCFILL_FIX 0x40000000
+#define chipMinorFeatures6_TPG_TCPERF_FIX 0x80000000
+#define chipMinorFeatures7_USC_MDFIFO_OVERFLOW_FIX 0x00000001
+#define chipMinorFeatures7_SH_TEXLD_BARRIER_IN_CS_FIX 0x00000002
+#define chipMinorFeatures7_RS_NEW_BASEADDR 0x00000004
+#define chipMinorFeatures7_PE_8BPP_DUALPIPE_FIX 0x00000008
+#define chipMinorFeatures7_SH_ADVANCED_INSTR 0x00000010
+#define chipMinorFeatures7_SH_FLAT_INTERPOLATION_DUAL16_FIX 0x00000020
+#define chipMinorFeatures7_USC_CONTINUOUS_FLUS_FIX 0x00000040
+#define chipMinorFeatures7_SH_SUPPORT_V4 0x00000080
+#define chipMinorFeatures7_SH_SUPPORT_ALPHA_KILL 0x00000100
+#define chipMinorFeatures7_PE_NO_ALPHA_TEST 0x00000200
+#define chipMinorFeatures7_TX_LOD_NEAREST_SELECT 0x00000400
+#define chipMinorFeatures7_SH_FIX_LDEXP 0x00000800
+#define chipMinorFeatures7_SUPPORT_MOVAI 0x00001000
+#define chipMinorFeatures7_SH_SNAP2PAGE_MAXPAGES_FIX 0x00002000
+#define chipMinorFeatures7_PE_RGBA16I_FIX 0x00004000
+#define chipMinorFeatures7_BLT_8bpp_256TILE_FC_FIX 0x00008000
+#define chipMinorFeatures7_PE_64BIT_FENCE_FIX 0x00010000
+#define chipMinorFeatures7_USC_FULL_CACHE_FIX 0x00020000
+#define chipMinorFeatures7_TX_YUV_ASSEMBLER_10BIT 0x00040000
+#define chipMinorFeatures7_FE_32BIT_INDEX_FIX 0x00080000
+#define chipMinorFeatures7_BLT_64BPP_MASKED_CLEAR_FIX 0x00100000
+#define chipMinorFeatures7_BIT_SECURITY 0x00200000
+#define chipMinorFeatures7_BIT_ROBUSTNESS 0x00400000
+#define chipMinorFeatures7_USC_ATOMIC_FIX 0x00800000
+#define chipMinorFeatures7_SH_PSO_MSAA1x_FIX 0x01000000
+#define chipMinorFeatures7_BIT_USC_VX_PERF_FIX 0x02000000
+#define chipMinorFeatures7_EVIS_NO_ABSDIFF 0x04000000
+#define chipMinorFeatures7_EVIS_NO_BITREPLACE 0x08000000
+#define chipMinorFeatures7_EVIS_NO_BOXFILTER 0x10000000
+#define chipMinorFeatures7_EVIS_NO_CORDIAC 0x20000000
+#define chipMinorFeatures7_EVIS_NO_DP32 0x40000000
+#define chipMinorFeatures7_EVIS_NO_FILTER 0x80000000
+#define chipMinorFeatures8_EVIS_NO_IADD 0x00000001
+#define chipMinorFeatures8_EVIS_NO_SELECTADD 0x00000002
+#define chipMinorFeatures8_EVIS_LERP_7OUTPUT 0x00000004
+#define chipMinorFeatures8_EVIS_ACCSQ_8OUTPUT 0x00000008
+#define chipMinorFeatures8_USC_GOS_ADDR_FIX 0x00000010
+#define chipMinorFeatures8_TX_8BIT_UVFRAC 0x00000020
+#define chipMinorFeatures8_TX_DESC_CACHE_CLOCKGATE_FIX 0x00000040
+#define chipMinorFeatures8_RSBLT_MSAA_DECOMPRESSION 0x00000080
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE 0x00000100
+#define chipMinorFeatures8_DRAWID 0x00000200
+#define chipMinorFeatures8_PSIO_SAMPLEMASK_IN_R0ZW_FIX 0x00000400
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE_V2 0x00000800
+#define chipMinorFeatures8_MULTI_CORE_BLOCK_SET_CONFIG 0x00001000
+#define chipMinorFeatures8_VG_RESOLVE_ENGINE 0x00002000
+#define chipMinorFeatures8_VG_PE_COLOR_KEY 0x00004000
+#define chipMinorFeatures8_VG_IM_INDEX_FORMAT 0x00008000
+#define chipMinorFeatures8_SNAPPAGE_CMD 0x00010000
+#define chipMinorFeatures8_SH_NO_INDEX_CONST_ON_A0 0x00020000
+#define chipMinorFeatures8_SH_NO_ONECONST_LIMIT 0x00040000
+#define chipMinorFeatures8_SH_IMG_LDST_ON_TEMP 0x00080000
+#define chipMinorFeatures8_COMPUTE_ONLY 0x00100000
+#define chipMinorFeatures8_SH_IMG_LDST_CLAMP 0x00200000
+#define chipMinorFeatures8_SH_ICACHE_ALLOC_COUNT_FIX 0x00400000
+#define chipMinorFeatures8_SH_ICACHE_PREFETCH 0x00800000
+#define chipMinorFeatures8_PE2D_SEPARATE_CACHE 0x01000000
+#define chipMinorFeatures8_VG_AYUV_INPUT_OUTPUT 0x02000000
+#define chipMinorFeatures8_VG_DOUBLE_IMAGE 0x04000000
+#define chipMinorFeatures8_VG_RECTANGLE_STRIPE_MODE 0x08000000
+#define chipMinorFeatures8_VG_MMU 0x10000000
+#define chipMinorFeatures8_VG_IM_FILTER 0x20000000
+#define chipMinorFeatures8_VG_IM_YUV_PACKET 0x40000000
+#define chipMinorFeatures8_VG_IM_YUV_PLANAR 0x80000000
+#define chipMinorFeatures9_VG_PE_YUV_PACKET 0x00000001
+#define chipMinorFeatures9_VG_COLOR_PRECISION_8_BIT 0x00000002
+#define chipMinorFeatures9_PE_MSAA_OQ_FIX 0x00000004
+#define chipMinorFeatures9_PSIO_MSAA_CL_FIX 0x00000008
+#define chipMinorFeatures9_USC_DEFER_FILL_FIX 0x00000010
+#define chipMinorFeatures9_SH_CLOCK_GATE_FIX 0x00000020
+#define chipMinorFeatures9_FE_NEED_DUMMYDRAW 0x00000040
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_OUTPUT 0x00000080
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_10BIT 0x00000100
+#define chipMinorFeatures9_MULTI_CLUSTER 0x00000200
+#define chipMinorFeatures9_VG_TS_CULLING 0x00000400
+#define chipMinorFeatures9_VG_FP25 0x00000800
+#define chipMinorFeatures9_SH_MULTI_WG_PACK 0x00001000
+#define chipMinorFeatures9_SH_DUAL16_SAMPLEMASK_ZW 0x00002000
+#define chipMinorFeatures9_TPG_TRIVIAL_MODE_FIX 0x00004000
+#define chipMinorFeatures9_TX_ASTC_MULTISLICE_FIX 0x00008000
+#define chipMinorFeatures9_FE_ROBUST_FIX 0x00010000
+#define chipMinorFeatures9_SH_GPIPE_ACCESS_FULLTEMPS 0x00020000
+#define chipMinorFeatures9_PSIO_INTERLOCK 0x00040000
+#define chipMinorFeatures9_PA_WIDELINE_FIX 0x00080000
+#define chipMinorFeatures9_WIDELINE_HELPER_FIX 0x00100000
+#define chipMinorFeatures9_G2D_3RD_PARTY_COMPRESSION_1_1 0x00200000
+#define chipMinorFeatures9_TX_FLUSH_L1CACHE 0x00400000
+#define chipMinorFeatures9_PE_DITHER_FIX2 0x00800000
+#define chipMinorFeatures9_G2D_DEC400 0x01000000
+#define chipMinorFeatures9_SH_TEXLD_U_FIX 0x02000000
+#define chipMinorFeatures9_MC_FCCACHE_BYTEMASK 0x04000000
+#define chipMinorFeatures9_SH_MULTI_WG_PACK_FIX 0x08000000
+#define chipMinorFeatures9_DC_OVERLAY_SCALING 0x10000000
+#define chipMinorFeatures9_DC_SOURCE_ROTATION 0x20000000
+#define chipMinorFeatures9_DC_TILED 0x40000000
+#define chipMinorFeatures9_DC_YUV_L1 0x80000000
+#define chipMinorFeatures10_DC_D30_OUTPUT 0x00000001
+#define chipMinorFeatures10_DC_MMU 0x00000002
+#define chipMinorFeatures10_DC_COMPRESSION 0x00000004
+#define chipMinorFeatures10_DC_QOS 0x00000008
+#define chipMinorFeatures10_PE_ADVANCE_BLEND_PART0 0x00000010
+#define chipMinorFeatures10_FE_PATCHLIST_FETCH_FIX 0x00000020
+#define chipMinorFeatures10_RA_CG_FIX 0x00000040
+#define chipMinorFeatures10_EVIS_VX2 0x00000080
+#define chipMinorFeatures10_NN_FLOAT 0x00000100
+#define chipMinorFeatures10_DEC400 0x00000200
+#define chipMinorFeatures10_LS_SUPPORT_PERCOMP_DEPENDENCY 0x00000400
+#define chipMinorFeatures10_TP_ENGINE 0x00000800
+#define chipMinorFeatures10_MULTI_CORE_BLOCK_SET_CONFIG2 0x00001000
+#define chipMinorFeatures10_PE_VMSAA_COVERAGE_CACHE_FIX 0x00002000
+#define chipMinorFeatures10_SECURITY_AHB 0x00004000
+#define chipMinorFeatures10_MULTICORE_SEMAPHORESTALL_V3 0x00008000
+#define chipMinorFeatures10_SMALLBATCH 0x00010000
+#define chipMinorFeatures10_SH_CMPLX 0x00020000
+#define chipMinorFeatures10_SH_IDIV0_SWZL_EHS 0x00040000
+#define chipMinorFeatures10_TX_LERP_LESS_BIT 0x00080000
+#define chipMinorFeatures10_SH_GM_ENDIAN 0x00100000
+#define chipMinorFeatures10_SH_GM_USC_UNALLOC 0x00200000
+#define chipMinorFeatures10_SH_END_OF_BB 0x00400000
+#define chipMinorFeatures10_VIP_V7 0x00800000
+#define chipMinorFeatures10_TX_BORDER_CLAMP_FIX 0x01000000
+#define chipMinorFeatures10_SH_IMG_LD_LASTPIXEL_FIX 0x02000000
+#define chipMinorFeatures10_ASYNC_BLT 0x04000000
+#define chipMinorFeatures10_ASYNC_FE_FENCE_FIX 0x08000000
+#define chipMinorFeatures10_PSCS_THROTTLE 0x10000000
+#define chipMinorFeatures10_SEPARATE_LS 0x20000000
+#define chipMinorFeatures10_MCFE 0x40000000
+#define chipMinorFeatures10_WIDELINE_TRIANGLE_EMU 0x80000000
+#define chipMinorFeatures11_VG_RESOLUTION_8K 0x00000001
+#define chipMinorFeatures11_FENCE_32BIT 0x00000002
+#define chipMinorFeatures11_FENCE_64BIT 0x00000004
+#define chipMinorFeatures11_NN_INTERLEVE8 0x00000008
+#define chipMinorFeatures11_TP_REORDER 0x00000010
+#define chipMinorFeatures11_PE_DEPTH_ONLY_OQFIX 0x00000020
#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 9e7098e..bfc6d4a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -100,6 +100,8 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
{
u32 flush = 0;
+ lockdep_assert_held(&gpu->lock);
+
/*
* This assumes that if we're switching to 2D, we're switching
* away from 3D, and vice versa. Hence, if we're switching to
@@ -164,7 +166,9 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
/* initialize buffer */
buffer->user_size = 0;
@@ -178,7 +182,9 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
@@ -209,12 +215,32 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8;
}
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
+
+ buffer->user_size = 0;
+
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(0));
+
+ CMD_END(buffer);
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ return buffer->user_size / 8;
+}
+
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
+ lockdep_assert_held(&gpu->lock);
+
if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
else if (gpu->exec_state == ETNA_PIPE_3D)
@@ -253,10 +279,12 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
+ lockdep_assert_held(&gpu->lock);
+
/*
* We need at most 3 dwords in the return target:
* 1 event + 1 end + 1 wait + 1 link.
@@ -287,13 +315,16 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
}
/* Append a command buffer to the ring buffer. */
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
- struct etnaviv_cmdbuf *cmdbuf)
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
+ bool switch_context = gpu->exec_state != exec_state;
+
+ lockdep_assert_held(&gpu->lock);
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@@ -306,7 +337,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || gpu->switch_context) {
+ if (gpu->mmu->need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
@@ -321,7 +352,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
}
/* pipe switch commands */
- if (gpu->switch_context)
+ if (switch_context)
extra_dwords += 4;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
@@ -349,10 +380,9 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
gpu->mmu->need_flush = false;
}
- if (gpu->switch_context) {
- etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
- gpu->exec_state = cmdbuf->exec_state;
- gpu->switch_context = false;
+ if (switch_context) {
+ etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
+ gpu->exec_state = exec_state;
}
/* And the link to the submitted buffer */
@@ -421,4 +451,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+ gpu->lastctx = cmdbuf->ctx;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index 6e3bbcf..68e6d37 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -78,6 +78,7 @@ static const struct {
ST(0x17c0, 8),
ST(0x17e0, 8),
ST(0x2400, 14 * 16),
+ ST(0x3824, 1),
ST(0x10800, 32 * 16),
ST(0x14600, 16),
ST(0x14800, 8 * 8),
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 66ac795..3746827 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -86,26 +86,11 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
kfree(suballoc);
}
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos, size_t nr_pmrs)
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size)
{
- struct etnaviv_cmdbuf *cmdbuf;
- struct etnaviv_perfmon_request *pmrs;
- size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
- sizeof(*cmdbuf));
int granule_offs, order, ret;
- cmdbuf = kzalloc(sz, GFP_KERNEL);
- if (!cmdbuf)
- return NULL;
-
- sz = sizeof(*pmrs) * nr_pmrs;
- pmrs = kzalloc(sz, GFP_KERNEL);
- if (!pmrs)
- goto out_free_cmdbuf;
-
- cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
@@ -123,7 +108,7 @@ retry:
if (!ret) {
dev_err(suballoc->gpu->dev,
"Timeout waiting for cmdbuf space\n");
- return NULL;
+ return -ETIMEDOUT;
}
goto retry;
}
@@ -131,11 +116,7 @@ retry:
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
- return cmdbuf;
-
-out_free_cmdbuf:
- kfree(cmdbuf);
- return NULL;
+ return 0;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@@ -151,8 +132,6 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
- kfree(cmdbuf->pmrs);
- kfree(cmdbuf);
}
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index b6348b9..ddc3f7e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -33,27 +33,15 @@ struct etnaviv_cmdbuf {
void *vaddr;
u32 size;
u32 user_size;
- /* fence after which this buffer is to be disposed */
- struct dma_fence *fence;
- /* target exec state */
- u32 exec_state;
- /* per GPU in-flight list */
- struct list_head node;
- /* perfmon requests */
- unsigned int nr_pmrs;
- struct etnaviv_perfmon_request *pmrs;
- /* BOs attached to this command buffer */
- unsigned int nr_bos;
- struct etnaviv_vram_mapping *bo_map[0];
};
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos, size_t nr_pmrs);
+
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 491eddf..ab50090 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -101,12 +101,25 @@ static void load_gpu(struct drm_device *dev)
static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
+ struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
+ int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+
+ if (gpu) {
+ drm_sched_entity_init(&gpu->sched,
+ &ctx->sched_entity[i],
+ &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
+ 32, NULL);
+ }
+ }
+
file->driver_priv = ctx;
return 0;
@@ -126,6 +139,9 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
if (gpu->lastctx == ctx)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
+
+ drm_sched_entity_fini(&gpu->sched,
+ &ctx->sched_entity[i]);
}
}
@@ -172,7 +188,7 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
{
- struct etnaviv_cmdbuf *buf = gpu->buffer;
+ struct etnaviv_cmdbuf *buf = &gpu->buffer;
u32 size = buf->size;
u32 *ptr = buf->vaddr;
u32 i;
@@ -459,9 +475,6 @@ static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_domain *args = data;
struct etnaviv_gpu *gpu;
- /* reject as long as the feature isn't stable */
- return -EINVAL;
-
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@@ -479,9 +492,6 @@ static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_signal *args = data;
struct etnaviv_gpu *gpu;
- /* reject as long as the feature isn't stable */
- return -EINVAL;
-
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@@ -556,7 +566,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 1,
+ .minor = 2,
};
/*
@@ -580,12 +590,6 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
- priv->wq = alloc_ordered_workqueue("etnaviv", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_wq;
- }
-
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -607,9 +611,6 @@ static int etnaviv_bind(struct device *dev)
out_register:
component_unbind_all(dev, drm);
out_bind:
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-out_wq:
kfree(priv);
out_unref:
drm_dev_unref(drm);
@@ -624,9 +625,6 @@ static void etnaviv_unbind(struct device *dev)
drm_dev_unregister(drm);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
component_unbind_all(dev, drm);
drm->dev_private = NULL;
@@ -655,25 +653,21 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct component_match *match = NULL;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (node) {
+ if (!dev->platform_data) {
struct device_node *core_node;
- int i;
- for (i = 0; ; i++) {
- core_node = of_parse_phandle(node, "cores", i);
- if (!core_node)
- break;
+ for_each_compatible_node(core_node, NULL, "vivante,gc") {
+ if (!of_device_is_available(core_node))
+ continue;
drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node);
- of_node_put(core_node);
}
- } else if (dev->platform_data) {
+ } else {
char **names = dev->platform_data;
unsigned i;
@@ -691,25 +685,18 @@ static int etnaviv_pdev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id dt_match[] = {
- { .compatible = "fsl,imx-gpu-subsystem" },
- { .compatible = "marvell,dove-gpu-subsystem" },
- {}
-};
-MODULE_DEVICE_TABLE(of, dt_match);
-
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
.remove = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
- .of_match_table = dt_match,
},
};
static int __init etnaviv_init(void)
{
int ret;
+ struct device_node *np;
etnaviv_validate_init();
@@ -721,6 +708,19 @@ static int __init etnaviv_init(void)
if (ret != 0)
platform_driver_unregister(&etnaviv_gpu_driver);
+ /*
+ * If the DT contains at least one available GPU device, instantiate
+ * the DRM platform device.
+ */
+ for_each_compatible_node(np, NULL, "vivante,gc") {
+ if (!of_device_is_available(np))
+ continue;
+
+ platform_device_register_simple("etnaviv", -1, NULL, 0);
+ of_node_put(np);
+ break;
+ }
+
return ret;
}
module_init(etnaviv_init);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d249acb..ddb17ee5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
+#include <drm/gpu_scheduler.h>
struct etnaviv_cmdbuf;
struct etnaviv_gpu;
@@ -42,11 +43,11 @@ struct etnaviv_gem_object;
struct etnaviv_gem_submit;
struct etnaviv_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
+ /*
+ * When per-context address spaces are supported we'd keep track of
* the context's page-tables here.
*/
- int dummy;
+ struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
struct etnaviv_drm_private {
@@ -56,18 +57,8 @@ struct etnaviv_drm_private {
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
-
- struct workqueue_struct *wq;
};
-static inline void etnaviv_queue_work(struct drm_device *dev,
- struct work_struct *w)
-{
- struct etnaviv_drm_private *priv = dev->dev_private;
-
- queue_work(priv->wq, w);
-}
-
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -95,10 +86,11 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
- struct etnaviv_cmdbuf *cmdbuf);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
u32 *stream, unsigned int size,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 2d955d7..48aef6c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -20,9 +20,13 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_sched.h"
#include "state.xml.h"
#include "state_hi.xml.h"
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
struct core_dump_iterator {
void *start;
struct etnaviv_dump_object_header *hdr;
@@ -120,11 +124,17 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct core_dump_iterator iter;
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
- struct etnaviv_cmdbuf *cmd;
+ struct etnaviv_gem_submit *submit;
+ struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ /* Only catch the first event, or when manually re-armed */
+ if (!etnaviv_dump_core)
+ return;
+ etnaviv_dump_core = false;
+
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@@ -132,13 +142,16 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer->size;
+ mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
- file_size += cmd->size;
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
+ file_size += submit->cmdbuf.size;
n_obj++;
}
+ spin_unlock(&gpu->sched.job_list_lock);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -176,13 +189,18 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
- gpu->buffer->size,
- etnaviv_cmdbuf_get_va(gpu->buffer));
-
- list_for_each_entry(cmd, &gpu->active_cmd_list, node)
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
- cmd->size, etnaviv_cmdbuf_get_va(cmd));
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ gpu->buffer.size,
+ etnaviv_cmdbuf_get_va(&gpu->buffer));
+
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf));
+ }
+ spin_unlock(&gpu->sched.job_list_lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index daee3f1..fcc969f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -24,6 +24,9 @@
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+static struct lock_class_key etnaviv_shm_lock_class;
+static struct lock_class_key etnaviv_userptr_lock_class;
+
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
@@ -583,7 +586,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
kfree(etnaviv_obj);
}
-int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
@@ -591,8 +594,6 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_lock(&priv->gem_lock);
list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
mutex_unlock(&priv->gem_lock);
-
- return 0;
}
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
@@ -640,8 +641,9 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
return 0;
}
-static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags)
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
{
struct drm_gem_object *obj = NULL;
int ret;
@@ -653,6 +655,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
+ lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
+
ret = drm_gem_object_init(dev, obj, size);
if (ret == 0) {
struct address_space *mapping;
@@ -660,7 +664,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See coments above new_inode()
+ * conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
@@ -672,33 +676,12 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- return obj;
-
-fail:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
-/* convenience method to construct a GEM buffer object, and userspace handle */
-int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- u32 size, u32 flags, u32 *handle)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = __etnaviv_gem_new(dev, size, flags);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = etnaviv_gem_obj_add(dev, obj);
- if (ret < 0) {
- drm_gem_object_put_unlocked(obj);
- return ret;
- }
+ etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
+fail:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -722,139 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
return 0;
}
-struct get_pages_work {
- struct work_struct work;
- struct mm_struct *mm;
- struct task_struct *task;
- struct etnaviv_gem_object *etnaviv_obj;
-};
-
-static struct page **etnaviv_gem_userptr_do_get_pages(
- struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
-{
- int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- struct page **pvec;
- uintptr_t ptr;
- unsigned int flags = 0;
-
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pvec)
- return ERR_PTR(-ENOMEM);
-
- if (!etnaviv_obj->userptr.ro)
- flags |= FOLL_WRITE;
-
- pinned = 0;
- ptr = etnaviv_obj->userptr.ptr;
-
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- flags, pvec + pinned, NULL, NULL);
- if (ret < 0)
- break;
-
- ptr += ret * PAGE_SIZE;
- pinned += ret;
- }
- up_read(&mm->mmap_sem);
-
- if (ret < 0) {
- release_pages(pvec, pinned);
- kvfree(pvec);
- return ERR_PTR(ret);
- }
-
- return pvec;
-}
-
-static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
-{
- struct get_pages_work *work = container_of(_work, typeof(*work), work);
- struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
- struct page **pvec;
-
- pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
-
- mutex_lock(&etnaviv_obj->lock);
- if (IS_ERR(pvec)) {
- etnaviv_obj->userptr.work = ERR_CAST(pvec);
- } else {
- etnaviv_obj->userptr.work = NULL;
- etnaviv_obj->pages = pvec;
- }
-
- mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
-
- mmput(work->mm);
- put_task_struct(work->task);
- kfree(work);
-}
-
static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
struct page **pvec = NULL;
- struct get_pages_work *work;
- struct mm_struct *mm;
- int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
-
- if (etnaviv_obj->userptr.work) {
- if (IS_ERR(etnaviv_obj->userptr.work)) {
- ret = PTR_ERR(etnaviv_obj->userptr.work);
- etnaviv_obj->userptr.work = NULL;
- } else {
- ret = -EAGAIN;
- }
- return ret;
- }
+ struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
+ int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- mm = get_task_mm(etnaviv_obj->userptr.task);
- pinned = 0;
- if (mm == current->mm) {
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pvec) {
- mmput(mm);
- return -ENOMEM;
- }
-
- pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
- !etnaviv_obj->userptr.ro, pvec);
- if (pinned < 0) {
- kvfree(pvec);
- mmput(mm);
- return pinned;
- }
-
- if (pinned == npages) {
- etnaviv_obj->pages = pvec;
- mmput(mm);
- return 0;
- }
- }
+ might_lock_read(&current->mm->mmap_sem);
- release_pages(pvec, pinned);
- kvfree(pvec);
+ if (userptr->mm != current->mm)
+ return -EPERM;
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work) {
- mmput(mm);
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pvec)
return -ENOMEM;
- }
- get_task_struct(current);
- drm_gem_object_get(&etnaviv_obj->base);
+ do {
+ unsigned num_pages = npages - pinned;
+ uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
+ struct page **pages = pvec + pinned;
- work->mm = mm;
- work->task = current;
- work->etnaviv_obj = etnaviv_obj;
+ ret = get_user_pages_fast(ptr, num_pages,
+ !userptr->ro ? FOLL_WRITE : 0, pages);
+ if (ret < 0) {
+ release_pages(pvec, pinned);
+ kvfree(pvec);
+ return ret;
+ }
+
+ pinned += ret;
- etnaviv_obj->userptr.work = &work->work;
- INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
+ } while (pinned < npages);
- etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
+ etnaviv_obj->pages = pvec;
- return -EAGAIN;
+ return 0;
}
static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
@@ -870,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
- put_task_struct(etnaviv_obj->userptr.task);
}
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
@@ -897,17 +781,16 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
if (ret)
return ret;
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
+
etnaviv_obj->userptr.ptr = ptr;
- etnaviv_obj->userptr.task = current;
+ etnaviv_obj->userptr.mm = current->mm;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
- get_task_struct(current);
- ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret)
- goto unreference;
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
-unreference:
+
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index e437fba..93e696f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -18,6 +18,7 @@
#define __ETNAVIV_GEM_H__
#include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct dma_fence;
@@ -26,8 +27,7 @@ struct etnaviv_gem_object;
struct etnaviv_gem_userptr {
uintptr_t ptr;
- struct task_struct *task;
- struct work_struct *work;
+ struct mm_struct *mm;
bool ro;
};
@@ -94,30 +94,41 @@ struct etnaviv_gem_submit_bo {
u32 flags;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
+ struct dma_fence *excl;
+ unsigned int nr_shared;
+ struct dma_fence **shared;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
- * make it easier to unwind when things go wrong, etc). This only
- * lasts for the duration of the submit-ioctl.
+ * make it easier to unwind when things go wrong, etc).
*/
struct etnaviv_gem_submit {
- struct drm_device *dev;
+ struct drm_sched_job sched_job;
+ struct kref refcount;
struct etnaviv_gpu *gpu;
- struct ww_acquire_ctx ticket;
- struct dma_fence *fence;
+ struct dma_fence *out_fence, *in_fence;
+ int out_fence_id;
+ struct list_head node; /* GPU active submit list */
+ struct etnaviv_cmdbuf cmdbuf;
+ bool runtime_resumed;
+ u32 exec_state;
u32 flags;
+ unsigned int nr_pmrs;
+ struct etnaviv_perfmon_request *pmrs;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
/* No new members here, the previous one is variable-length! */
};
+void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
+
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
struct timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res);
-int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index ae88472..5704305 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -19,6 +19,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
+static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -125,6 +126,8 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
return ERR_PTR(ret);
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class);
+
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
@@ -139,9 +142,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto fail;
- ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret)
- goto fail;
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
return &etnaviv_obj->base;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index ff91154..46ecd3e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -22,6 +22,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
/*
* Cmdstream submission:
@@ -33,22 +34,25 @@
#define BO_PINNED 0x2000
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
- struct etnaviv_gpu *gpu, size_t nr)
+ struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_gem_submit *submit;
- size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
+ size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
- submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (submit) {
- submit->dev = dev;
- submit->gpu = gpu;
+ submit = kzalloc(sz, GFP_KERNEL);
+ if (!submit)
+ return NULL;
- /* initially, until copy_from_user() and bo lookup succeeds: */
- submit->nr_bos = 0;
- submit->fence = NULL;
-
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
+ GFP_KERNEL);
+ if (!submit->pmrs) {
+ kfree(submit);
+ return NULL;
}
+ submit->nr_pmrs = nr_pmrs;
+
+ submit->gpu = gpu;
+ kref_init(&submit->refcount);
return submit;
}
@@ -111,7 +115,8 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
}
}
-static int submit_lock_objects(struct etnaviv_gem_submit *submit)
+static int submit_lock_objects(struct etnaviv_gem_submit *submit,
+ struct ww_acquire_ctx *ticket)
{
int contended, slow_locked = -1, i, ret = 0;
@@ -126,7 +131,7 @@ retry:
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
- &submit->ticket);
+ ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
@@ -136,7 +141,7 @@ retry:
}
}
- ww_acquire_done(&submit->ticket);
+ ww_acquire_done(ticket);
return 0;
@@ -154,7 +159,7 @@ fail:
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
- &submit->ticket);
+ ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@@ -165,35 +170,53 @@ fail:
return ret;
}
-static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+static int submit_fence_sync(struct etnaviv_gem_submit *submit)
{
- unsigned int context = submit->gpu->fence_context;
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
- bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+ struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+ struct reservation_object *robj = bo->obj->resv;
+
+ if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
+ ret = reservation_object_reserve_shared(robj);
+ if (ret)
+ return ret;
+ }
+
+ if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
+ continue;
+
+ if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
+ ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+ &bo->nr_shared,
+ &bo->shared);
+ if (ret)
+ return ret;
+ } else {
+ bo->excl = reservation_object_get_excl_rcu(robj);
+ }
- ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
- explicit);
- if (ret)
- break;
}
return ret;
}
-static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
+static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
- if (submit->bos[i].flags & BO_PINNED)
- etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
- submit->bos[i].mapping = NULL;
- submit->bos[i].flags &= ~BO_PINNED;
+ if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(etnaviv_obj->resv,
+ submit->out_fence);
+ else
+ reservation_object_add_shared_fence(etnaviv_obj->resv,
+ submit->out_fence);
+
+ submit_unlock_object(submit, i);
}
}
@@ -211,6 +234,7 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
ret = PTR_ERR(mapping);
break;
}
+ atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
submit->bos[i].mapping = mapping;
@@ -285,13 +309,11 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
}
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
- struct etnaviv_cmdbuf *cmdbuf,
- const struct drm_etnaviv_gem_submit_pmr *pmrs,
- u32 nr_pms)
+ u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
{
u32 i;
- for (i = 0; i < nr_pms; i++) {
+ for (i = 0; i < submit->nr_pmrs; i++) {
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
struct etnaviv_gem_submit_bo *bo;
int ret;
@@ -316,52 +338,83 @@ static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
return -EINVAL;
}
- if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
+ if (etnaviv_pm_req_validate(r, exec_state)) {
DRM_ERROR("perfmon request: domain or signal not valid");
return -EINVAL;
}
- cmdbuf->pmrs[i].flags = r->flags;
- cmdbuf->pmrs[i].domain = r->domain;
- cmdbuf->pmrs[i].signal = r->signal;
- cmdbuf->pmrs[i].sequence = r->sequence;
- cmdbuf->pmrs[i].offset = r->read_offset;
- cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
+ submit->pmrs[i].flags = r->flags;
+ submit->pmrs[i].domain = r->domain;
+ submit->pmrs[i].signal = r->signal;
+ submit->pmrs[i].sequence = r->sequence;
+ submit->pmrs[i].offset = r->read_offset;
+ submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
}
return 0;
}
-static void submit_cleanup(struct etnaviv_gem_submit *submit)
+static void submit_cleanup(struct kref *kref)
{
+ struct etnaviv_gem_submit *submit =
+ container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
+ if (submit->runtime_resumed)
+ pm_runtime_put_autosuspend(submit->gpu->dev);
+
+ if (submit->cmdbuf.suballoc)
+ etnaviv_cmdbuf_free(&submit->cmdbuf);
+
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ /* unpin all objects */
+ if (submit->bos[i].flags & BO_PINNED) {
+ etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+ atomic_dec(&etnaviv_obj->gpu_active);
+ submit->bos[i].mapping = NULL;
+ submit->bos[i].flags &= ~BO_PINNED;
+ }
+
+ /* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
- ww_acquire_fini(&submit->ticket);
- if (submit->fence)
- dma_fence_put(submit->fence);
+ wake_up_all(&submit->gpu->fence_event);
+
+ if (submit->in_fence)
+ dma_fence_put(submit->in_fence);
+ if (submit->out_fence) {
+ /* first remove from IDR, so fence can not be found anymore */
+ mutex_lock(&submit->gpu->fence_idr_lock);
+ idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
+ mutex_unlock(&submit->gpu->fence_idr_lock);
+ dma_fence_put(submit->out_fence);
+ }
+ kfree(submit->pmrs);
kfree(submit);
}
+void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
+{
+ kref_put(&submit->refcount, submit_cleanup);
+}
+
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct etnaviv_file_private *ctx = file->driver_priv;
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
- struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
+ struct ww_acquire_ctx ticket;
int out_fence_fd = -1;
void *stream;
int ret;
@@ -399,17 +452,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
- cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
- ALIGN(args->stream_size, 8) + 8,
- args->nr_bos, args->nr_pmrs);
- if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
+ if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM;
goto err_submit_cmds;
}
- cmdbuf->exec_state = args->exec_state;
- cmdbuf->ctx = file->driver_priv;
-
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
@@ -430,7 +477,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = -EFAULT;
goto err_submit_cmds;
}
- cmdbuf->nr_pmrs = args->nr_pmrs;
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
@@ -447,19 +493,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, args->nr_bos);
+ ww_acquire_init(&ticket, &reservation_ww_class);
+
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
if (!submit) {
ret = -ENOMEM;
- goto err_submit_cmds;
+ goto err_submit_ww_acquire;
}
- submit->flags = args->flags;
-
- ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
+ ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_objects;
- ret = submit_lock_objects(submit);
+ submit->cmdbuf.ctx = file->driver_priv;
+ submit->exec_state = args->exec_state;
+ submit->flags = args->flags;
+
+ ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
@@ -470,48 +521,41 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
- in_fence = sync_file_get_fence(args->fence_fd);
- if (!in_fence) {
+ submit->in_fence = sync_file_get_fence(args->fence_fd);
+ if (!submit->in_fence) {
ret = -EINVAL;
goto err_submit_objects;
}
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
- ret = dma_fence_wait(in_fence, true);
- if (ret)
- goto err_submit_objects;
- }
}
- ret = submit_fence_sync(submit);
- if (ret)
- goto err_submit_objects;
-
ret = submit_pin_objects(submit);
if (ret)
- goto out;
+ goto err_submit_objects;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
- goto out;
+ goto err_submit_objects;
+
+ ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
+ if (ret)
+ goto err_submit_objects;
+
+ memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
- ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
+ ret = submit_lock_objects(submit, &ticket);
if (ret)
- goto out;
+ goto err_submit_objects;
- memcpy(cmdbuf->vaddr, stream, args->stream_size);
- cmdbuf->user_size = ALIGN(args->stream_size, 8);
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto err_submit_objects;
- ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+ ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
if (ret)
- goto out;
+ goto err_submit_objects;
- cmdbuf = NULL;
+ submit_attach_object_fences(submit);
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
@@ -520,39 +564,26 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
* fence to the sync file here, eliminating the ENOMEM
* possibility at this stage.
*/
- sync_file = sync_file_create(submit->fence);
+ sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto out;
+ goto err_submit_objects;
}
fd_install(out_fence_fd, sync_file->file);
}
args->fence_fd = out_fence_fd;
- args->fence = submit->fence->seqno;
-
-out:
- submit_unpin_objects(submit);
-
- /*
- * If we're returning -EAGAIN, it may be due to the userptr code
- * wanting to run its workqueue outside of any locks. Flush our
- * workqueue to ensure that it is run in a timely manner.
- */
- if (ret == -EAGAIN)
- flush_workqueue(priv->wq);
+ args->fence = submit->out_fence_id;
err_submit_objects:
- if (in_fence)
- dma_fence_put(in_fence);
- submit_cleanup(submit);
+ etnaviv_submit_put(submit);
+
+err_submit_ww_acquire:
+ ww_acquire_fini(&ticket);
err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- /* if we still own the cmdbuf */
- if (cmdbuf)
- etnaviv_cmdbuf_free(cmdbuf);
if (stream)
kvfree(stream);
if (bos)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index e19cbe0..8a88799 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -26,19 +26,21 @@
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#include "cmdstream.xml.h"
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET 0
+#endif
+
static const struct platform_device_id gpu_ids[] = {
{ .name = "etnaviv-gpu,2d" },
{ },
};
-static bool etnaviv_dump_core = true;
-module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
-
/*
* Driver functions:
*/
@@ -82,6 +84,30 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.minor_features5;
break;
+ case ETNAVIV_PARAM_GPU_FEATURES_7:
+ *value = gpu->identity.minor_features6;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_8:
+ *value = gpu->identity.minor_features7;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_9:
+ *value = gpu->identity.minor_features8;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_10:
+ *value = gpu->identity.minor_features9;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_11:
+ *value = gpu->identity.minor_features10;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_12:
+ *value = gpu->identity.minor_features11;
+ break;
+
case ETNAVIV_PARAM_GPU_STREAM_COUNT:
*value = gpu->identity.stream_count;
break;
@@ -348,6 +374,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
+ /*
+ * If there is a match in the HWDB, we aren't interested in the
+ * remaining register values, as they might be wrong.
+ */
+ if (etnaviv_fill_identity_from_hwdb(gpu))
+ return;
+
gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
/* Disable fast clear on GC700. */
@@ -448,9 +481,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
- /* set soft reset. */
- control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
- gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
+ VIVS_MMUv2_AHB_CONTROL_RESET);
+ } else {
+ /* set soft reset. */
+ control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+ }
/* wait for reset. */
usleep_range(10, 20);
@@ -561,6 +599,12 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
VIVS_FE_COMMAND_CONTROL_ENABLE |
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
+ }
}
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
@@ -634,6 +678,12 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
+ val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
+ }
+
/* setup the pulse eater */
etnaviv_gpu_setup_pulse_eater(gpu);
@@ -644,7 +694,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
prefetch);
}
@@ -696,6 +746,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
+ /*
+ * On cores with security features supported, we claim control over the
+ * security states.
+ */
+ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
+ (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
+ gpu->sec_mode = ETNA_SEC_KERNEL;
+
ret = etnaviv_hw_reset(gpu);
if (ret) {
dev_err(gpu->dev, "GPU reset failed\n");
@@ -717,15 +775,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
- gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
- if (!gpu->buffer) {
- ret = -ENOMEM;
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
+ PAGE_SIZE);
+ if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
+ etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@@ -751,8 +809,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
return 0;
free_buffer:
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
+ etnaviv_cmdbuf_free(&gpu->buffer);
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -808,6 +865,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
verify_dma(gpu, &debug);
seq_puts(m, "\tfeatures\n");
+ seq_printf(m, "\t major_features: 0x%08x\n",
+ gpu->identity.features);
seq_printf(m, "\t minor_features0: 0x%08x\n",
gpu->identity.minor_features0);
seq_printf(m, "\t minor_features1: 0x%08x\n",
@@ -820,6 +879,18 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.minor_features4);
seq_printf(m, "\t minor_features5: 0x%08x\n",
gpu->identity.minor_features5);
+ seq_printf(m, "\t minor_features6: 0x%08x\n",
+ gpu->identity.minor_features6);
+ seq_printf(m, "\t minor_features7: 0x%08x\n",
+ gpu->identity.minor_features7);
+ seq_printf(m, "\t minor_features8: 0x%08x\n",
+ gpu->identity.minor_features8);
+ seq_printf(m, "\t minor_features9: 0x%08x\n",
+ gpu->identity.minor_features9);
+ seq_printf(m, "\t minor_features10: 0x%08x\n",
+ gpu->identity.minor_features10);
+ seq_printf(m, "\t minor_features11: 0x%08x\n",
+ gpu->identity.minor_features11);
seq_puts(m, "\tspecs\n");
seq_printf(m, "\t stream_count: %d\n",
@@ -913,38 +984,24 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
}
#endif
-/*
- * Hangcheck detection for locked gpu:
- */
-static void recover_worker(struct work_struct *work)
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
- recover_work);
unsigned long flags;
unsigned int i = 0;
- dev_err(gpu->dev, "hangcheck recover!\n");
+ dev_err(gpu->dev, "recover hung GPU!\n");
if (pm_runtime_get_sync(gpu->dev) < 0)
return;
mutex_lock(&gpu->lock);
- /* Only catch the first event, or when manually re-armed */
- if (etnaviv_dump_core) {
- etnaviv_core_dump(gpu);
- etnaviv_dump_core = false;
- }
-
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
spin_lock_irqsave(&gpu->event_spinlock, flags);
- for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
- dma_fence_signal(gpu->event[i].fence);
- gpu->event[i].fence = NULL;
+ for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
- }
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
@@ -956,56 +1013,6 @@ static void recover_worker(struct work_struct *work)
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
-
- /* Retire the buffer objects in a work */
- etnaviv_queue_work(gpu->drm, &gpu->retire_work);
-}
-
-static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
-{
- DBG("%s", dev_name(gpu->dev));
- mod_timer(&gpu->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
-}
-
-static void hangcheck_handler(struct timer_list *t)
-{
- struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
- u32 fence = gpu->completed_fence;
- bool progress = false;
-
- if (fence != gpu->hangcheck_fence) {
- gpu->hangcheck_fence = fence;
- progress = true;
- }
-
- if (!progress) {
- u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
- int change = dma_addr - gpu->hangcheck_dma_addr;
-
- if (change < 0 || change > 16) {
- gpu->hangcheck_dma_addr = dma_addr;
- progress = true;
- }
- }
-
- if (!progress && fence_after(gpu->active_fence, fence)) {
- dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
- dev_err(gpu->dev, " completed fence: %u\n", fence);
- dev_err(gpu->dev, " active fence: %u\n",
- gpu->active_fence);
- etnaviv_queue_work(gpu->drm, &gpu->recover_work);
- }
-
- /* if still more pending work, reset the hangcheck timer: */
- if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
- hangcheck_timer_reset(gpu);
-}
-
-static void hangcheck_disable(struct etnaviv_gpu *gpu)
-{
- del_timer_sync(&gpu->hangcheck_timer);
- cancel_work_sync(&gpu->recover_work);
}
/* fence object management */
@@ -1081,54 +1088,6 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base;
}
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive, bool explicit)
-{
- struct reservation_object *robj = etnaviv_obj->resv;
- struct reservation_object_list *fobj;
- struct dma_fence *fence;
- int i, ret;
-
- if (!exclusive) {
- ret = reservation_object_reserve_shared(robj);
- if (ret)
- return ret;
- }
-
- if (explicit)
- return 0;
-
- /*
- * If we have any shared fences, then the exclusive fence
- * should be ignored as it will already have been signalled.
- */
- fobj = reservation_object_get_list(robj);
- if (!fobj || fobj->shared_count == 0) {
- /* Wait on any existing exclusive fence which isn't our own */
- fence = reservation_object_get_excl(robj);
- if (fence && fence->context != context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- if (!exclusive || !fobj)
- return 0;
-
- for (i = 0; i < fobj->shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(robj));
- if (fence->context != context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
/*
* event management:
*/
@@ -1195,86 +1154,47 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
/*
* Cmdstream submission/retirement:
*/
-
-static void retire_worker(struct work_struct *work)
-{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
- retire_work);
- u32 fence = gpu->completed_fence;
- struct etnaviv_cmdbuf *cmdbuf, *tmp;
- unsigned int i;
-
- mutex_lock(&gpu->lock);
- list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!dma_fence_is_signaled(cmdbuf->fence))
- break;
-
- list_del(&cmdbuf->node);
- dma_fence_put(cmdbuf->fence);
-
- for (i = 0; i < cmdbuf->nr_bos; i++) {
- struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
- struct etnaviv_gem_object *etnaviv_obj = mapping->object;
-
- atomic_dec(&etnaviv_obj->gpu_active);
- /* drop the refcount taken in etnaviv_gpu_submit */
- etnaviv_gem_mapping_unreference(mapping);
- }
-
- etnaviv_cmdbuf_free(cmdbuf);
- /*
- * We need to balance the runtime PM count caused by
- * each submission. Upon submission, we increment
- * the runtime PM counter, and allocate one event.
- * So here, we put the runtime PM count for each
- * completed event.
- */
- pm_runtime_put_autosuspend(gpu->dev);
- }
-
- gpu->retired_fence = fence;
-
- mutex_unlock(&gpu->lock);
-
- wake_up_all(&gpu->fence_event);
-}
-
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout)
+ u32 id, struct timespec *timeout)
{
+ struct dma_fence *fence;
int ret;
- if (fence_after(fence, gpu->next_fence)) {
- DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, gpu->next_fence);
- return -EINVAL;
- }
+ /*
+ * Look up the fence and take a reference. We might still find a fence
+ * whose refcount has already dropped to zero. dma_fence_get_rcu
+ * pretends we didn't find a fence in that case.
+ */
+ rcu_read_lock();
+ fence = idr_find(&gpu->fence_idr, id);
+ if (fence)
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+
+ if (!fence)
+ return 0;
if (!timeout) {
/* No timeout was requested: just test for completion */
- ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
+ ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
} else {
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
- ret = wait_event_interruptible_timeout(gpu->fence_event,
- fence_completed(gpu, fence),
- remaining);
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
- fence, gpu->retired_fence,
- gpu->completed_fence);
+ ret = dma_fence_wait_timeout(fence, true, remaining);
+ if (ret == 0)
ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
+ else if (ret != -ERESTARTSYS)
ret = 0;
- }
+
}
+ dma_fence_put(fence);
return ret;
}
/*
* Wait for an object to become inactive. This, on it's own, is not race
- * free: the object is moved by the retire worker off the active list, and
+ * free: the object is moved by the scheduler off the active list, and
* then the iova is put. Moreover, the object could be re-submitted just
* after we notice that it's become inactive.
*
@@ -1295,41 +1215,25 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
ret = wait_event_interruptible_timeout(gpu->fence_event,
!is_active(etnaviv_obj),
remaining);
- if (ret > 0) {
- struct etnaviv_drm_private *priv = gpu->drm->dev_private;
-
- /* Synchronise with the retire worker */
- flush_workqueue(priv->wq);
+ if (ret > 0)
return 0;
- } else if (ret == -ERESTARTSYS) {
+ else if (ret == -ERESTARTSYS)
return -ERESTARTSYS;
- } else {
+ else
return -ETIMEDOUT;
- }
-}
-
-int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
-{
- return pm_runtime_get_sync(gpu->dev);
-}
-
-void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
-{
- pm_runtime_mark_last_busy(gpu->dev);
- pm_runtime_put_autosuspend(gpu->dev);
}
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
- const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
- for (i = 0; i < cmdbuf->nr_pmrs; i++) {
- const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
if (pmr->flags == flags)
- etnaviv_perfmon_process(gpu, pmr);
+ etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
}
}
@@ -1354,14 +1258,14 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
- const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
u32 val;
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
- for (i = 0; i < cmdbuf->nr_pmrs; i++) {
- const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
@@ -1379,25 +1283,19 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
/* add bo's to gpu's ring, and kick gpu: */
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
{
- struct dma_fence *fence;
+ struct etnaviv_gpu *gpu = submit->gpu;
+ struct dma_fence *gpu_fence;
unsigned int i, nr_events = 1, event[3];
int ret;
- ret = etnaviv_gpu_pm_get_sync(gpu);
- if (ret < 0)
- return ret;
-
- /*
- * TODO
- *
- * - flush
- * - data endian
- * - prefetch
- *
- */
+ if (!submit->runtime_resumed) {
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+ return NULL;
+ submit->runtime_resumed = true;
+ }
/*
* if there are performance monitor requests we need to have
@@ -1406,105 +1304,96 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
* and update the sequence number for userspace.
*/
- if (cmdbuf->nr_pmrs)
+ if (submit->nr_pmrs)
nr_events = 3;
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
- goto out_pm_put;
+ return NULL;
}
mutex_lock(&gpu->lock);
- fence = etnaviv_gpu_fence_alloc(gpu);
- if (!fence) {
+ gpu_fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!gpu_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
- ret = -ENOMEM;
goto out_unlock;
}
- gpu->event[event[0]].fence = fence;
- submit->fence = dma_fence_get(fence);
- gpu->active_fence = submit->fence->seqno;
-
- if (gpu->lastctx != cmdbuf->ctx) {
- gpu->mmu->need_flush = true;
- gpu->switch_context = true;
- gpu->lastctx = cmdbuf->ctx;
- }
+ gpu->active_fence = gpu_fence->seqno;
- if (cmdbuf->nr_pmrs) {
+ if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
- gpu->event[event[1]].cmdbuf = cmdbuf;
+ kref_get(&submit->refcount);
+ gpu->event[event[1]].submit = submit;
etnaviv_sync_point_queue(gpu, event[1]);
}
- etnaviv_buffer_queue(gpu, event[0], cmdbuf);
+ gpu->event[event[0]].fence = gpu_fence;
+ submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
+ etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
+ &submit->cmdbuf);
- if (cmdbuf->nr_pmrs) {
+ if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
- gpu->event[event[2]].cmdbuf = cmdbuf;
+ kref_get(&submit->refcount);
+ gpu->event[event[2]].submit = submit;
etnaviv_sync_point_queue(gpu, event[2]);
}
- cmdbuf->fence = fence;
- list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
-
- /* We're committed to adding this command buffer, hold a PM reference */
- pm_runtime_get_noresume(gpu->dev);
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-
- /* Each cmdbuf takes a refcount on the mapping */
- etnaviv_gem_mapping_reference(submit->bos[i].mapping);
- cmdbuf->bo_map[i] = submit->bos[i].mapping;
- atomic_inc(&etnaviv_obj->gpu_active);
-
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(etnaviv_obj->resv,
- fence);
- else
- reservation_object_add_shared_fence(etnaviv_obj->resv,
- fence);
- }
- cmdbuf->nr_bos = submit->nr_bos;
- hangcheck_timer_reset(gpu);
- ret = 0;
-
out_unlock:
mutex_unlock(&gpu->lock);
-out_pm_put:
- etnaviv_gpu_pm_put(gpu);
-
- return ret;
+ return gpu_fence;
}
-static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
- struct etnaviv_event *event)
+static void sync_point_worker(struct work_struct *work)
{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ sync_point_work);
+ struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
event->sync_point(gpu, event);
+ etnaviv_submit_put(event->submit);
+ event_free(gpu, gpu->sync_point_event);
+
+ /* restart FE last to avoid GPU and IRQ racing against this worker */
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
-static void sync_point_worker(struct work_struct *work)
+static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
- sync_point_work);
+ u32 status_reg, status;
+ int i;
- etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
- event_free(gpu, gpu->sync_point_event);
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ status_reg = VIVS_MMUv2_STATUS;
+ else
+ status_reg = VIVS_MMUv2_SEC_STATUS;
+
+ status = gpu_read(gpu, status_reg);
+ dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
+
+ for (i = 0; i < 4; i++) {
+ u32 address_reg;
+
+ if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
+ continue;
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
+ else
+ address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
+
+ dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
+ gpu_read(gpu, address_reg));
+ }
}
-/*
- * Init/Cleanup:
- */
static irqreturn_t irq_handler(int irq, void *data)
{
struct etnaviv_gpu *gpu = data;
@@ -1525,17 +1414,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
- int i;
-
- dev_err_ratelimited(gpu->dev,
- "MMU fault status 0x%08x\n",
- gpu_read(gpu, VIVS_MMUv2_STATUS));
- for (i = 0; i < 4; i++) {
- dev_err_ratelimited(gpu->dev,
- "MMU %d fault addr 0x%08x\n",
- i, gpu_read(gpu,
- VIVS_MMUv2_EXCEPTION_ADDR(i)));
- }
+ dump_mmu_fault(gpu);
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
@@ -1550,7 +1429,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (gpu->event[event].sync_point) {
gpu->sync_point_event = event;
- etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
+ queue_work(gpu->wq, &gpu->sync_point_work);
}
fence = gpu->event[event].fence;
@@ -1558,7 +1437,6 @@ static irqreturn_t irq_handler(int irq, void *data)
continue;
gpu->event[event].fence = NULL;
- dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1571,13 +1449,11 @@ static irqreturn_t irq_handler(int irq, void *data)
*/
if (fence_after(fence->seqno, gpu->completed_fence))
gpu->completed_fence = fence->seqno;
+ dma_fence_signal(fence);
event_free(gpu, event);
}
- /* Retire the buffer objects in a work */
- etnaviv_queue_work(gpu->drm, &gpu->retire_work);
-
ret = IRQ_HANDLED;
}
@@ -1588,6 +1464,12 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
{
int ret;
+ if (gpu->clk_reg) {
+ ret = clk_prepare_enable(gpu->clk_reg);
+ if (ret)
+ return ret;
+ }
+
if (gpu->clk_bus) {
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
@@ -1626,6 +1508,8 @@ static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
clk_disable_unprepare(gpu->clk_core);
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
+ if (gpu->clk_reg)
+ clk_disable_unprepare(gpu->clk_reg);
return 0;
}
@@ -1653,9 +1537,11 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->buffer) {
+ if (gpu->buffer.suballoc) {
/* Replace the last WAIT with END */
+ mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
+ mutex_unlock(&gpu->lock);
/*
* We know that only the FE is busy here, this should
@@ -1680,7 +1566,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->switch_context = true;
+ gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1738,41 +1624,58 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
- if (IS_ENABLED(CONFIG_THERMAL)) {
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
if (IS_ERR(gpu->cooling))
return PTR_ERR(gpu->cooling);
}
+ gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+ if (!gpu->wq) {
+ ret = -ENOMEM;
+ goto out_thermal;
+ }
+
+ ret = etnaviv_sched_init(gpu);
+ if (ret)
+ goto out_workqueue;
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
- if (ret < 0) {
- thermal_cooling_device_unregister(gpu->cooling);
- return ret;
- }
+ if (ret < 0)
+ goto out_sched;
+
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
+ idr_init(&gpu->fence_idr);
spin_lock_init(&gpu->fence_spinlock);
- INIT_LIST_HEAD(&gpu->active_cmd_list);
- INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
- INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
- timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE);
-
priv->gpu[priv->num_gpus++] = gpu;
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
return 0;
+
+out_sched:
+ etnaviv_sched_fini(gpu);
+
+out_workqueue:
+ destroy_workqueue(gpu->wq);
+
+out_thermal:
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
+
+ return ret;
}
static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
@@ -1782,7 +1685,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
DBG("%s", dev_name(gpu->dev));
- hangcheck_disable(gpu);
+ flush_workqueue(gpu->wq);
+ destroy_workqueue(gpu->wq);
+
+ etnaviv_sched_fini(gpu);
#ifdef CONFIG_PM
pm_runtime_get_sync(gpu->dev);
@@ -1791,10 +1697,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
- if (gpu->buffer) {
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
- }
+ if (gpu->buffer.suballoc)
+ etnaviv_cmdbuf_free(&gpu->buffer);
if (gpu->cmdbuf_suballoc) {
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
@@ -1807,8 +1711,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
+ idr_destroy(&gpu->fence_idr);
- thermal_cooling_device_unregister(gpu->cooling);
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
gpu->cooling = NULL;
}
@@ -1823,6 +1729,7 @@ static const struct of_device_id etnaviv_gpu_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
@@ -1836,6 +1743,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
+ mutex_init(&gpu->fence_idr_lock);
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
@@ -1857,6 +1765,11 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
}
/* Get Clocks: */
+ gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ DBG("clk_reg: %p", gpu->clk_reg);
+ if (IS_ERR(gpu->clk_reg))
+ gpu->clk_reg = NULL;
+
gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
DBG("clk_bus: %p", gpu->clk_bus);
if (IS_ERR(gpu->clk_bus))
@@ -1931,7 +1844,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->buffer) {
+ if (gpu->drm && gpu->buffer.suballoc) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 4f10f14..3c30055 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
@@ -37,21 +38,17 @@ struct etnaviv_chip_identity {
/* Supported minor feature fields. */
u32 minor_features0;
-
- /* Supported minor feature 1 fields. */
u32 minor_features1;
-
- /* Supported minor feature 2 fields. */
u32 minor_features2;
-
- /* Supported minor feature 3 fields. */
u32 minor_features3;
-
- /* Supported minor feature 4 fields. */
u32 minor_features4;
-
- /* Supported minor feature 5 fields. */
u32 minor_features5;
+ u32 minor_features6;
+ u32 minor_features7;
+ u32 minor_features8;
+ u32 minor_features9;
+ u32 minor_features10;
+ u32 minor_features11;
/* Number of streams supported. */
u32 stream_count;
@@ -87,9 +84,15 @@ struct etnaviv_chip_identity {
u8 varyings_count;
};
+enum etnaviv_sec_mode {
+ ETNA_SEC_NONE = 0,
+ ETNA_SEC_KERNEL,
+ ETNA_SEC_TZ
+};
+
struct etnaviv_event {
struct dma_fence *fence;
- struct etnaviv_cmdbuf *cmdbuf;
+ struct etnaviv_gem_submit *submit;
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
@@ -105,11 +108,13 @@ struct etnaviv_gpu {
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
+ enum etnaviv_sec_mode sec_mode;
struct etnaviv_file_private *lastctx;
- bool switch_context;
+ struct workqueue_struct *wq;
+ struct drm_gpu_scheduler sched;
/* 'ring'-buffer: */
- struct etnaviv_cmdbuf *buffer;
+ struct etnaviv_cmdbuf buffer;
int exec_state;
/* bus base address of memory */
@@ -121,23 +126,18 @@ struct etnaviv_gpu {
struct completion event_free;
spinlock_t event_spinlock;
- /* list of currently in-flight command buffers */
- struct list_head active_cmd_list;
-
u32 idle_mask;
/* Fencing support */
+ struct mutex fence_idr_lock;
+ struct idr fence_idr;
u32 next_fence;
u32 active_fence;
u32 completed_fence;
- u32 retired_fence;
wait_queue_head_t fence_event;
u64 fence_context;
spinlock_t fence_spinlock;
- /* worker for handling active-list retiring: */
- struct work_struct retire_work;
-
/* worker for handling 'sync' points: */
struct work_struct sync_point_work;
int sync_point_event;
@@ -150,16 +150,10 @@ struct etnaviv_gpu {
/* Power Control: */
struct clk *clk_bus;
+ struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
- /* Hang Detction: */
-#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
-#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- u32 hangcheck_fence;
- u32 hangcheck_dma_addr;
- struct work_struct recover_work;
unsigned int freq_scale;
unsigned long base_rate_core;
unsigned long base_rate_shader;
@@ -180,29 +174,22 @@ static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
return fence_after_eq(gpu->completed_fence, fence);
}
-static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
-{
- return fence_after_eq(gpu->retired_fence, fence);
-}
-
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive, bool implicit);
-
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
u32 fence, struct timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
new file mode 100644
index 0000000..ea08bb3
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+
+static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ {
+ .model = 0x7000,
+ .revision = 0x6214,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 1024,
+ .shader_core_count = 4,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 2,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cad,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0xbb5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fbfa6f,
+ .minor_features8 = 0x00ef0ef0,
+ .minor_features9 = 0x0edbf03c,
+ .minor_features10 = 0x90044250,
+ .minor_features11 = 0x00000024,
+ },
+};
+
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_chip_identity *ident = &gpu->identity;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+ if (etnaviv_chip_identities[i].model == ident->model &&
+ etnaviv_chip_identities[i].revision == ident->revision) {
+ memcpy(ident, &etnaviv_chip_identities[i],
+ sizeof(*ident));
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 14e24ac..4b9b11c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -70,9 +70,8 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
return -ENOMEM;
}
- for (i = 0; i < PT_ENTRIES; i++)
- etnaviv_domain->pgtable_cpu[i] =
- etnaviv_domain->base.bad_page_dma;
+ memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
+ PT_ENTRIES);
return 0;
}
@@ -159,7 +158,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
+static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_domain_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index fc60fc8..9752dbd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -40,6 +40,9 @@
struct etnaviv_iommuv2_domain {
struct etnaviv_iommu_domain base;
+ /* P(age) T(able) A(rray) */
+ u64 *pta_cpu;
+ dma_addr_t pta_dma;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -114,6 +117,15 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
+ etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->pta_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->pta_cpu) {
+ ret = -ENOMEM;
+ goto fail_mem;
+ }
+
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
@@ -150,6 +162,11 @@ fail_mem:
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
+ if (etnaviv_domain->pta_cpu)
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu,
+ etnaviv_domain->pta_dma);
+
if (etnaviv_domain->mtlb_cpu)
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
@@ -176,6 +193,10 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
etnaviv_domain->base.bad_page_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu,
+ etnaviv_domain->pta_dma);
+
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
@@ -216,7 +237,7 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
@@ -229,14 +250,67 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->base.bad_page_dma);
- etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(gpu->mmu->domain);
+ u16 prefetch;
+
+ /* If the MMU is already enabled the state is still there. */
+ if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
+ return;
+
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
+ lower_32_bits(etnaviv_domain->pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
+ upper_32_bits(etnaviv_domain->pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
+
+ gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
+ lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
+ lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(etnaviv_domain->base.bad_page_dma)));
+
+ etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+
+ /* trigger a PTA load through the FE */
+ prefetch = etnaviv_buffer_config_pta(gpu);
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
+ prefetch);
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
+}
+
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+{
+ switch (gpu->sec_mode) {
+ case ETNA_SEC_NONE:
+ etnaviv_iommuv2_restore_nonsec(gpu);
+ break;
+ case ETNA_SEC_KERNEL:
+ etnaviv_iommuv2_restore_sec(gpu);
+ break;
+ default:
+ WARN(1, "unhandled GPU security mode\n");
+ break;
+ }
+}
+
+static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
.free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 35074b9..49e0497 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -29,7 +29,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
iova, size, pgsize);
return;
}
@@ -54,7 +54,7 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
@@ -263,18 +263,16 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mutex_unlock(&mmu->lock);
- return 0;
+ ret = 0;
+ goto unlock;
}
}
node = &mapping->vram_node;
ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
- if (ret < 0) {
- mutex_unlock(&mmu->lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
@@ -283,12 +281,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (ret < 0) {
drm_mm_remove_node(node);
- mutex_unlock(&mmu->lock);
- return ret;
+ goto unlock;
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mmu->need_flush = true;
+unlock:
mutex_unlock(&mmu->lock);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 768f5aa..26dddfc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -479,9 +479,9 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
}
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
- const struct etnaviv_perfmon_request *pmr)
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
u32 *bo = pmr->bo_vma;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
index 35dce19..c1653c6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -44,6 +44,6 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
u32 exec_state);
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
- const struct etnaviv_perfmon_request *pmr);
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state);
#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
new file mode 100644
index 0000000..6cf0775
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kthread.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_sched.h"
+
+static int etnaviv_job_hang_limit = 0;
+module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
+static int etnaviv_hw_jobs_limit = 4;
+module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
+
+static struct dma_fence *
+etnaviv_sched_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct dma_fence *fence;
+ int i;
+
+ if (unlikely(submit->in_fence)) {
+ fence = submit->in_fence;
+ submit->in_fence = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+ int j;
+
+ if (bo->excl) {
+ fence = bo->excl;
+ bo->excl = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+
+ for (j = 0; j < bo->nr_shared; j++) {
+ if (!bo->shared[j])
+ continue;
+
+ fence = bo->shared[j];
+ bo->shared[j] = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+ kfree(bo->shared);
+ bo->nr_shared = 0;
+ bo->shared = NULL;
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct dma_fence *fence = NULL;
+
+ if (likely(!sched_job->s_fence->finished.error))
+ fence = etnaviv_gpu_submit(submit);
+ else
+ dev_dbg(submit->gpu->dev, "skipping bad job\n");
+
+ return fence;
+}
+
+static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct etnaviv_gpu *gpu = submit->gpu;
+
+ /* block scheduler */
+ kthread_park(gpu->sched.thread);
+ drm_sched_hw_job_reset(&gpu->sched, sched_job);
+
+ /* get the GPU back into the init state */
+ etnaviv_core_dump(gpu);
+ etnaviv_gpu_recover_hang(gpu);
+
+ /* restart scheduler after GPU is usable again */
+ drm_sched_job_recovery(&gpu->sched);
+ kthread_unpark(gpu->sched.thread);
+}
+
+static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+
+ etnaviv_submit_put(submit);
+}
+
+static const struct drm_sched_backend_ops etnaviv_sched_ops = {
+ .dependency = etnaviv_sched_dependency,
+ .run_job = etnaviv_sched_run_job,
+ .timedout_job = etnaviv_sched_timedout_job,
+ .free_job = etnaviv_sched_free_job,
+};
+
+int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
+ struct etnaviv_gem_submit *submit)
+{
+ int ret;
+
+ ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
+ sched_entity, submit->cmdbuf.ctx);
+ if (ret)
+ return ret;
+
+ submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
+ mutex_lock(&submit->gpu->fence_idr_lock);
+ submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
+ submit->out_fence, 0,
+ INT_MAX, GFP_KERNEL);
+ mutex_unlock(&submit->gpu->fence_idr_lock);
+ if (submit->out_fence_id < 0)
+ return -ENOMEM;
+
+ /* the scheduler holds on to the job now */
+ kref_get(&submit->refcount);
+
+ drm_sched_entity_push_job(&submit->sched_job, sched_entity);
+
+ return 0;
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
+ etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
+ msecs_to_jiffies(500), dev_name(gpu->dev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
+{
+ drm_sched_fini(&gpu->sched);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
new file mode 100644
index 0000000..097635f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_SCHED_H__
+#define __ETNAVIV_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct etnaviv_gpu;
+
+static inline
+struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu);
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
+int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
+ struct etnaviv_gem_submit *submit);
+
+#endif /* __ETNAVIV_SCHED_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
index c27c148..421cb7c 100644
--- a/drivers/gpu/drm/etnaviv/state.xml.h
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_XML
#define STATE_XML
@@ -9,14 +8,40 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 18882 bytes, from 2015-03-25 11:42:32)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
-- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
-- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06)
-- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19)
-- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
-
-Copyright (C) 2015
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -24,9 +49,25 @@ Copyright (C) 2015
#define VARYING_COMPONENT_USE_USED 0x00000001
#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
+#define FE_DATA_TYPE_BYTE 0x00000000
+#define FE_DATA_TYPE_UNSIGNED_BYTE 0x00000001
+#define FE_DATA_TYPE_SHORT 0x00000002
+#define FE_DATA_TYPE_UNSIGNED_SHORT 0x00000003
+#define FE_DATA_TYPE_INT 0x00000004
+#define FE_DATA_TYPE_UNSIGNED_INT 0x00000005
+#define FE_DATA_TYPE_FLOAT 0x00000008
+#define FE_DATA_TYPE_HALF_FLOAT 0x00000009
+#define FE_DATA_TYPE_FIXED 0x0000000b
+#define FE_DATA_TYPE_INT_10_10_10_2 0x0000000c
+#define FE_DATA_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define FE_DATA_TYPE_BYTE_I 0x0000000e
+#define FE_DATA_TYPE_SHORT_I 0x0000000f
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK 0x00ff0000
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT 16
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK)
#define VIVS_FE 0x00000000
#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
@@ -34,17 +75,7 @@ Copyright (C) 2015
#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK)
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
@@ -76,6 +107,7 @@ Copyright (C) 2015
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
+#define VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART 0x00000100
#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
@@ -151,6 +183,8 @@ Copyright (C) 2015
#define VIVS_FE_AUTO_FLUSH 0x00000670
+#define VIVS_FE_PRIMITIVE_RESTART_INDEX 0x00000674
+
#define VIVS_FE_UNK00678 0x00000678
#define VIVS_FE_UNK0067C 0x0000067c
@@ -163,17 +197,40 @@ Copyright (C) 2015
#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
-#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
-#define VIVS_FE_UNK00700__ESIZE 0x00000004
-#define VIVS_FE_UNK00700__LEN 0x00000010
+#define VIVS_FE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_FE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_FE_GENERIC_ATTRIB__LEN 0x00000010
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK006C0(i0) (0x000006c0 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00700(i0) (0x00000700 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00740(i0) (0x00000740 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_SCALE(i0) (0x00000780 + 0x4*(i0))
+
+#define VIVS_FE_HALTI5_UNK007C4 0x000007c4
+
+#define VIVS_FE_HALTI5_UNK007D0(i0) (0x000007d0 + 0x4*(i0))
+#define VIVS_FE_HALTI5_UNK007D0__ESIZE 0x00000004
+#define VIVS_FE_HALTI5_UNK007D0__LEN 0x00000002
+
+#define VIVS_FE_HALTI5_UNK007D8 0x000007d8
+
+#define VIVS_FE_DESC_START 0x000007dc
+
+#define VIVS_FE_DESC_END 0x000007e0
+
+#define VIVS_FE_DESC_AVAIL 0x000007e4
+#define VIVS_FE_DESC_AVAIL_COUNT__MASK 0x0000007f
+#define VIVS_FE_DESC_AVAIL_COUNT__SHIFT 0
+#define VIVS_FE_DESC_AVAIL_COUNT(x) (((x) << VIVS_FE_DESC_AVAIL_COUNT__SHIFT) & VIVS_FE_DESC_AVAIL_COUNT__MASK)
+
+#define VIVS_FE_FENCE_WAIT_DATA_LOW 0x000007e8
-#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
-#define VIVS_FE_UNK00740__ESIZE 0x00000004
-#define VIVS_FE_UNK00740__LEN 0x00000010
+#define VIVS_FE_FENCE_WAIT_DATA_HIGH 0x000007f4
-#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
-#define VIVS_FE_UNK00780__ESIZE 0x00000004
-#define VIVS_FE_UNK00780__LEN 0x00000010
+#define VIVS_FE_ROBUSTNESS_UNK007F8 0x000007f8
#define VIVS_GL 0x00000000
@@ -188,6 +245,7 @@ Copyright (C) 2015
#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
#define VIVS_GL_EVENT_FROM_FE 0x00000020
#define VIVS_GL_EVENT_FROM_PE 0x00000040
+#define VIVS_GL_EVENT_FROM_BLT 0x00000080
#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
#define VIVS_GL_EVENT_SOURCE__SHIFT 8
#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
@@ -199,6 +257,9 @@ Copyright (C) 2015
#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK 0x30000000
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT 28
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK)
#define VIVS_GL_FLUSH_CACHE 0x0000380c
#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
@@ -208,6 +269,10 @@ Copyright (C) 2015
#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
+#define VIVS_GL_FLUSH_CACHE_UNK10 0x00000400
+#define VIVS_GL_FLUSH_CACHE_UNK11 0x00000800
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 0x00001000
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13 0x00002000
#define VIVS_GL_FLUSH_MMU 0x00003810
#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
@@ -244,30 +309,8 @@ Copyright (C) 2015
#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
+
+#define VIVS_GL_OCCLUSION_QUERY_ADDR 0x00003824
#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
@@ -321,6 +364,10 @@ Copyright (C) 2015
#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
+#define VIVS_GL_UNK0382C 0x0000382c
+
+#define VIVS_GL_OCCLUSION_QUERY_CONTROL 0x00003830
+
#define VIVS_GL_UNK03834 0x00003834
#define VIVS_GL_UNK03838 0x00003838
@@ -332,8 +379,58 @@ Copyright (C) 2015
#define VIVS_GL_CONTEXT_POINTER 0x00003850
+#define VIVS_GL_UNK03854 0x00003854
+
+#define VIVS_GL_BUG_FIXES 0x00003860
+
+#define VIVS_GL_FENCE_OUT_ADDRESS 0x00003868
+
+#define VIVS_GL_FENCE_OUT_DATA_LOW 0x0000386c
+
+#define VIVS_GL_HALTI5_UNK03884 0x00003884
+
+#define VIVS_GL_HALTI5_SH_SPECIALS 0x00003888
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK 0x0000007f
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT 0
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK 0x00007f00
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT 8
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK 0x007f0000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT 16
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK 0xff000000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT 24
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK)
+
+#define VIVS_GL_GS_UNK0388C 0x0000388c
+
+#define VIVS_GL_FENCE_OUT_DATA_HIGH 0x00003898
+
+#define VIVS_GL_SHADER_INDEX 0x0000389c
+
+#define VIVS_GL_GS_UNK038A0(i0) (0x000038a0 + 0x4*(i0))
+#define VIVS_GL_GS_UNK038A0__ESIZE 0x00000004
+#define VIVS_GL_GS_UNK038A0__LEN 0x00000008
+
+#define VIVS_GL_HALTI5_UNK038C0(i0) (0x000038c0 + 0x4*(i0))
+#define VIVS_GL_HALTI5_UNK038C0__ESIZE 0x00000004
+#define VIVS_GL_HALTI5_UNK038C0__LEN 0x00000010
+
+#define VIVS_GL_SECURITY_UNK3900 0x00003900
+
+#define VIVS_GL_SECURITY_UNK3904 0x00003904
+
#define VIVS_GL_UNK03A00 0x00003a00
+#define VIVS_GL_UNK03A04 0x00003a04
+
+#define VIVS_GL_UNK03A08 0x00003a08
+
+#define VIVS_GL_UNK03A0C 0x00003a0c
+
+#define VIVS_GL_UNK03A10 0x00003a10
+
#define VIVS_GL_STALL_TOKEN 0x00003c00
#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
@@ -344,6 +441,59 @@ Copyright (C) 2015
#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
+#define VIVS_NFE 0x00000000
+
+#define VIVS_NFE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_VERTEX_STREAMS__ESIZE 0x00000004
+#define VIVS_NFE_VERTEX_STREAMS__LEN 0x00000010
+
+#define VIVS_NFE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00014600 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_CONTROL(i0) (0x00014640 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_UNK14680(i0) (0x00014680 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_ROBUSTNESS_UNK146C0(i0) (0x000146c0 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_NFE_GENERIC_ATTRIB__LEN 0x00000020
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0(i0) (0x00017800 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK 0x0000000f
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK 0x00000030
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT 4
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK 0x00000700
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT 8
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK 0x00003000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT 12
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__MASK 0x0000c000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__SHIFT 14
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_OFF 0x00000000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_ON 0x00008000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK 0x00ff0000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT 16
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK)
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17880(i0) (0x00017880 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17900(i0) (0x00017900 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17980(i0) (0x00017980 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_SCALE(i0) (0x00017a00 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1(i0) (0x00017a80 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK 0x000000ff
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_NONCONSECUTIVE 0x00000800
+
#define VIVS_DUMMY 0x00000000
#define VIVS_DUMMY_DUMMY 0x0003fffc
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
index 73a97d3..ebbd4fc 100644
--- a/drivers/gpu/drm/etnaviv/state_3d.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -7,4 +7,9 @@
#define VIVS_TS_FLUSH_CACHE 0x00001650
#define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001
+#define VIVS_NTE_DESCRIPTOR_FLUSH 0x00014c44
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK 0xf0000000
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT 28
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28(x) (((x) << VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT) & VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK)
+
#endif /* STATE_3D_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
new file mode 100644
index 0000000..daae559
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -0,0 +1,52 @@
+#ifndef STATE_BLT_XML
+#define STATE_BLT_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+/* This is a cut-down version of the state_blt.xml.h file */
+
+#define VIVS_BLT_ENABLE 0x000140b8
+#define VIVS_BLT_ENABLE_ENABLE 0x00000001
+
+#endif /* STATE_BLT_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 60808da..41d8da2 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_HI_XML
#define STATE_HI_XML
@@ -9,10 +8,40 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_hi.xml ( 25620 bytes, from 2016-08-19 22:07:37)
-- common.xml ( 20583 bytes, from 2016-06-07 05:22:38)
-
-Copyright (C) 2016
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2018 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -192,6 +221,9 @@ Copyright (C) 2016
#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0
#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+#define VIVS_HI_COMPRESSION_FLAGS 0x00000090
+#define VIVS_HI_COMPRESSION_FLAGS_DEC300 0x00000040
+
#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
#define VIVS_HI_CHIP_SPECS_4 0x0000009c
@@ -203,6 +235,10 @@ Copyright (C) 2016
#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8
+#define VIVS_HI_BLT_INTR 0x000000d4
+
+#define VIVS_HI_AUXBIT 0x000000ec
+
#define VIVS_PM 0x00000000
#define VIVS_PM_POWER_CONTROLS 0x00000100
@@ -239,6 +275,17 @@ Copyright (C) 2016
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080
#define VIVS_PM_PULSE_EATER 0x0000010c
+#define VIVS_PM_PULSE_EATER_DISABLE 0x00000001
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK 0x0000ff00
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT 8
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD(x) (((x) << VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT) & VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK)
+#define VIVS_PM_PULSE_EATER_UNK16 0x00010000
+#define VIVS_PM_PULSE_EATER_UNK17 0x00020000
+#define VIVS_PM_PULSE_EATER_INTERNAL_DFS 0x00040000
+#define VIVS_PM_PULSE_EATER_UNK19 0x00080000
+#define VIVS_PM_PULSE_EATER_UNK20 0x00100000
+#define VIVS_PM_PULSE_EATER_UNK22 0x00400000
+#define VIVS_PM_PULSE_EATER_UNK23 0x00800000
#define VIVS_MMUv2 0x00000000
@@ -280,6 +327,68 @@ Copyright (C) 2016
#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
+#define VIVS_MMUv2_PROFILE_BLT_READ 0x000001a4
+
+#define VIVS_MMUv2_PTA_CONFIG 0x000001ac
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__MASK 0x0000ffff
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT 0
+#define VIVS_MMUv2_PTA_CONFIG_INDEX(x) (((x) << VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT) & VIVS_MMUv2_PTA_CONFIG_INDEX__MASK)
+#define VIVS_MMUv2_PTA_CONFIG_UNK16 0x00010000
+
+#define VIVS_MMUv2_AXI_POLICY(i0) (0x000001c0 + 0x4*(i0))
+#define VIVS_MMUv2_AXI_POLICY__ESIZE 0x00000004
+#define VIVS_MMUv2_AXI_POLICY__LEN 0x00000008
+
+#define VIVS_MMUv2_SEC_EXCEPTION_ADDR 0x00000380
+
+#define VIVS_MMUv2_SEC_STATUS 0x00000384
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT 0
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT 4
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT 8
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT 12
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_SEC_CONTROL 0x00000388
+#define VIVS_MMUv2_SEC_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_PTA_ADDRESS_LOW 0x0000038c
+
+#define VIVS_MMUv2_PTA_ADDRESS_HIGH 0x00000390
+
+#define VIVS_MMUv2_PTA_CONTROL 0x00000394
+#define VIVS_MMUv2_PTA_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW 0x00000398
+
+#define VIVS_MMUv2_SEC_SAFE_ADDR_LOW 0x0000039c
+
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG 0x000003a0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK 0x000000ff
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT 0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK15 0x00008000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK 0x00ff0000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT 16
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK31 0x80000000
+
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL 0x000003a4
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT 0
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE 0x00010000
+
+#define VIVS_MMUv2_AHB_CONTROL 0x000003a8
+#define VIVS_MMUv2_AHB_CONTROL_RESET 0x00000001
+#define VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS 0x00000002
+
#define VIVS_MC 0x00000000
#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
@@ -340,13 +449,13 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_HI_READ 0x0000046c
#define VIVS_MC_PROFILE_CONFIG0 0x00000470
-#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
-#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
@@ -354,7 +463,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
-#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
@@ -368,7 +477,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
#define VIVS_MC_PROFILE_CONFIG1 0x00000474
-#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
@@ -377,12 +486,12 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
-#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
@@ -392,7 +501,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
-#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
@@ -407,18 +516,21 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
#define VIVS_MC_PROFILE_CONFIG2 0x00000478
-#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_BLT__MASK 0xff000000
+#define VIVS_MC_PROFILE_CONFIG2_BLT__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG2_BLT_UNK0 0x00000000
#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
@@ -432,7 +544,13 @@ Copyright (C) 2016
#define VIVS_MC_START_COMPOSITION 0x00000554
-#define VIVS_MC_128B_MERGE 0x00000558
+#define VIVS_MC_FLAGS 0x00000558
+#define VIVS_MC_FLAGS_128B_MERGE 0x00000001
+#define VIVS_MC_FLAGS_TPCV11_COMPRESSION 0x08000000
+
+#define VIVS_MC_L2_CACHE_CONFIG 0x0000055c
+
+#define VIVS_MC_PROFILE_L2_READ 0x00000564
#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 5a7c9d8..735ce47 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -95,26 +95,21 @@ config DRM_EXYNOS_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
-config DRM_EXYNOS_IPP
- bool "Image Post Processor"
- help
- Choose this option if you want to use IPP feature for DRM.
-
config DRM_EXYNOS_FIMC
bool "FIMC"
- depends on DRM_EXYNOS_IPP && MFD_SYSCON
+ depends on BROKEN && MFD_SYSCON
help
Choose this option if you want to use Exynos FIMC for DRM.
config DRM_EXYNOS_ROTATOR
bool "Rotator"
- depends on DRM_EXYNOS_IPP
+ depends on BROKEN
help
Choose this option if you want to use Exynos Rotator for DRM.
config DRM_EXYNOS_GSC
bool "GScaler"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
+ depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index bdf4212..a51c545 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -18,7 +18,6 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6be5b53..1c330f2 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -21,13 +21,12 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <video/exynos5433_decon.h>
-
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
@@ -744,11 +743,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENXIO;
- }
-
ctx->addr = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->addr)) {
dev_err(dev, "ioremap failed\n");
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 615efcf..3931d5e 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -25,13 +25,13 @@
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
-#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_iommu.h"
+#include "regs-decon7.h"
/*
* DECON stands for Display and Enhancement controller.
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 39629e7..964831d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -41,6 +41,7 @@ struct exynos_dp_device {
struct device *dev;
struct videomode vm;
+ struct analogix_dp_device *adp;
struct analogix_dp_plat_data plat_data;
};
@@ -157,13 +158,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
int ret;
- /*
- * Just like the probe function said, we don't need the
- * device drvrate anymore, we should leave the charge to
- * analogix dp driver, set the device drvdata to NULL.
- */
- dev_set_drvdata(dev, NULL);
-
dp->dev = dev;
dp->drm_dev = drm_dev;
@@ -190,13 +184,22 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->plat_data.encoder = encoder;
- return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+ dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+ if (IS_ERR(dp->adp)) {
+ dp->encoder.funcs->destroy(&dp->encoder);
+ return PTR_ERR(dp->adp);
+ }
+
+ return 0;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
void *data)
{
- return analogix_dp_unbind(dev, master, data);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+
+ analogix_dp_unbind(dp->adp);
+ dp->encoder.funcs->destroy(&dp->encoder);
}
static const struct component_ops exynos_dp_ops = {
@@ -241,6 +244,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
+ dp->plat_data.skip_connector = !!bridge;
dp->ptn_bridge = bridge;
out:
@@ -257,12 +261,16 @@ static int exynos_dp_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int exynos_dp_suspend(struct device *dev)
{
- return analogix_dp_suspend(dev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+
+ return analogix_dp_suspend(dp->adp);
}
static int exynos_dp_resume(struct device *dev)
{
- return analogix_dp_resume(dev);
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+
+ return analogix_dp_resume(dp->adp);
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 27e423b..a518e9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/component.h>
@@ -28,7 +29,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_ipp.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
@@ -87,11 +87,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
file->driver_priv = NULL;
}
-static void exynos_drm_lastclose(struct drm_device *dev)
-{
- exynos_drm_fbdev_restore_mode(dev);
-}
-
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
@@ -113,14 +108,6 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
- DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -138,7 +125,7 @@ static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
- .lastclose = exynos_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
@@ -261,9 +248,6 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
}, {
DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
}, {
- DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
- DRM_VIRTUAL_DEVICE
- }, {
&exynos_drm_platform_driver,
DRM_VIRTUAL_DEVICE
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 589d465..df2262f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -188,7 +188,6 @@ struct exynos_drm_g2d_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct device *ipp_dev;
};
/*
@@ -291,6 +290,5 @@ extern struct platform_driver g2d_driver;
extern struct platform_driver fimc_driver;
extern struct platform_driver rotator_driver;
extern struct platform_driver gsc_driver;
-extern struct platform_driver ipp_driver;
extern struct platform_driver mic_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 8208df5..0faaf82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -205,7 +205,7 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
- .output_poll_changed = exynos_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = exynos_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index dfb66ec..132dd52 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -270,24 +270,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
private->fb_helper = NULL;
}
-void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
-
- if (!private || !private->fb_helper)
- return;
-
- drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
-}
-
-void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- drm_fb_helper_hotplug_event(fb_helper);
-}
-
void exynos_drm_fbdev_suspend(struct drm_device *dev)
{
struct exynos_drm_private *private = dev->dev_private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 645d1bb7f..b338472 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -19,8 +19,6 @@
int exynos_drm_fbdev_init(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
-void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
-void exynos_drm_output_poll_changed(struct drm_device *dev);
void exynos_drm_fbdev_suspend(struct drm_device *drm);
void exynos_drm_fbdev_resume(struct drm_device *drm);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2b8bf2d..f68ef1b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
- dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM;
goto err;
}
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct drm_device *drm_dev = g2d->subdrv.drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
- struct timeval now;
+ struct timespec64 now;
if (list_empty(&runqueue_node->event_list))
return;
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base);
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
return -EFAULT;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
- if (!runqueue_node) {
- dev_err(dev, "failed to allocate memory\n");
+ if (!runqueue_node)
return -ENOMEM;
- }
+
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
deleted file mode 100644
index 3edda18..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ /dev/null
@@ -1,1806 +0,0 @@
-/*
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * Authors:
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Jinyoung Jeon <jy0.jeon@samsung.com>
- * Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_ipp.h"
-#include "exynos_drm_iommu.h"
-
-/*
- * IPP stands for Image Post Processing and
- * supports image scaler/rotator and input/output DMA operations.
- * using FIMC, GSC, Rotator, so on.
- * IPP is integration device driver of same attribute h/w
- */
-
-/*
- * TODO
- * 1. expand command control id.
- * 2. integrate property and config.
- * 3. removed send_event id check routine.
- * 4. compare send_event id if needed.
- * 5. free subdrv_remove notifier callback list if needed.
- * 6. need to check subdrv_open about multi-open.
- * 7. need to power_on implement power and sysmmu ctrl.
- */
-
-#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
-#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
-
-/*
- * A structure of event.
- *
- * @base: base of event.
- * @event: ipp event.
- */
-struct drm_exynos_ipp_send_event {
- struct drm_pending_event base;
- struct drm_exynos_ipp_event event;
-};
-
-/*
- * A structure of memory node.
- *
- * @list: list head to memory queue information.
- * @ops_id: id of operations.
- * @prop_id: id of property.
- * @buf_id: id of buffer.
- * @buf_info: gem objects and dma address, size.
- * @filp: a pointer to drm_file.
- */
-struct drm_exynos_ipp_mem_node {
- struct list_head list;
- enum drm_exynos_ops_id ops_id;
- u32 prop_id;
- u32 buf_id;
- struct drm_exynos_ipp_buf_info buf_info;
-};
-
-/*
- * A structure of ipp context.
- *
- * @subdrv: prepare initialization using subdrv.
- * @ipp_lock: lock for synchronization of access to ipp_idr.
- * @prop_lock: lock for synchronization of access to prop_idr.
- * @ipp_idr: ipp driver idr.
- * @prop_idr: property idr.
- * @event_workq: event work queue.
- * @cmd_workq: command work queue.
- */
-struct ipp_context {
- struct exynos_drm_subdrv subdrv;
- struct mutex ipp_lock;
- struct mutex prop_lock;
- struct idr ipp_idr;
- struct idr prop_idr;
- struct workqueue_struct *event_workq;
- struct workqueue_struct *cmd_workq;
-};
-
-static LIST_HEAD(exynos_drm_ippdrv_list);
-static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
-static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
-
-int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
-{
- mutex_lock(&exynos_drm_ippdrv_lock);
- list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
- mutex_unlock(&exynos_drm_ippdrv_lock);
-
- return 0;
-}
-
-int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
-{
- mutex_lock(&exynos_drm_ippdrv_lock);
- list_del(&ippdrv->drv_list);
- mutex_unlock(&exynos_drm_ippdrv_lock);
-
- return 0;
-}
-
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
-{
- int ret;
-
- mutex_lock(lock);
- ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
- mutex_unlock(lock);
-
- return ret;
-}
-
-static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
-{
- mutex_lock(lock);
- idr_remove(id_idr, id);
- mutex_unlock(lock);
-}
-
-static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
-{
- void *obj;
-
- mutex_lock(lock);
- obj = idr_find(id_idr, id);
- mutex_unlock(lock);
-
- return obj;
-}
-
-static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_property *property)
-{
- if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
- !pm_runtime_suspended(ippdrv->dev)))
- return -EBUSY;
-
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property))
- return -EINVAL;
-
- return 0;
-}
-
-static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
- struct drm_exynos_ipp_property *property)
-{
- struct exynos_drm_ippdrv *ippdrv;
- u32 ipp_id = property->ipp_id;
- int ret;
-
- if (ipp_id) {
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
- if (!ippdrv) {
- DRM_DEBUG("ipp%d driver not found\n", ipp_id);
- return ERR_PTR(-ENODEV);
- }
-
- ret = ipp_check_driver(ippdrv, property);
- if (ret < 0) {
- DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
- return ERR_PTR(ret);
- }
-
- return ippdrv;
- } else {
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- ret = ipp_check_driver(ippdrv, property);
- if (ret == 0)
- return ippdrv;
- }
-
- DRM_DEBUG("cannot find driver suitable for given property.\n");
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
-{
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- int count = 0;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
-
- /*
- * This case is search ipp driver by prop_id handle.
- * sometimes, ipp subsystem find driver by prop_id.
- * e.g PAUSE state, queue buf, command control.
- */
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
-
- mutex_lock(&ippdrv->cmd_lock);
- list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
- if (c_node->property.prop_id == prop_id) {
- mutex_unlock(&ippdrv->cmd_lock);
- return ippdrv;
- }
- }
- mutex_unlock(&ippdrv->cmd_lock);
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_prop_list *prop_list = data;
- struct exynos_drm_ippdrv *ippdrv;
- int count = 0;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!prop_list) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
-
- if (!prop_list->ipp_id) {
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
- count++;
-
- /*
- * Supports ippdrv list count for user application.
- * First step user application getting ippdrv count.
- * and second step getting ippdrv capability using ipp_id.
- */
- prop_list->count = count;
- } else {
- /*
- * Getting ippdrv capability by ipp_id.
- * some device not supported wb, output interface.
- * so, user application detect correct ipp driver
- * using this ioctl.
- */
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
- prop_list->ipp_id);
- if (!ippdrv) {
- DRM_ERROR("not found ipp%d driver.\n",
- prop_list->ipp_id);
- return -ENODEV;
- }
-
- *prop_list = ippdrv->prop_list;
- }
-
- return 0;
-}
-
-static void ipp_print_property(struct drm_exynos_ipp_property *property,
- int idx)
-{
- struct drm_exynos_ipp_config *config = &property->config[idx];
- struct drm_exynos_pos *pos = &config->pos;
- struct drm_exynos_sz *sz = &config->sz;
-
- DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
- property->prop_id, idx ? "dst" : "src", config->fmt);
-
- DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
- pos->x, pos->y, pos->w, pos->h,
- sz->hsize, sz->vsize, config->flip, config->degree);
-}
-
-static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
-{
- struct drm_exynos_ipp_cmd_work *cmd_work;
-
- cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
- if (!cmd_work)
- return ERR_PTR(-ENOMEM);
-
- INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
-
- return cmd_work;
-}
-
-static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
-{
- struct drm_exynos_ipp_event_work *event_work;
-
- event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
- if (!event_work)
- return ERR_PTR(-ENOMEM);
-
- INIT_WORK(&event_work->work, ipp_sched_event);
-
- return event_work;
-}
-
-int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_property *property = data;
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- u32 prop_id;
- int ret, i;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!property) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- prop_id = property->prop_id;
-
- /*
- * This is log print for user application property.
- * user application set various property.
- */
- for_each_ipp_ops(i)
- ipp_print_property(property, i);
-
- /*
- * In case prop_id is not zero try to set existing property.
- */
- if (prop_id) {
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
-
- if (!c_node || c_node->filp != file) {
- DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
- return -EINVAL;
- }
-
- if (c_node->state != IPP_STATE_STOP) {
- DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
- return -EINVAL;
- }
-
- c_node->property = *property;
-
- return 0;
- }
-
- /* find ipp driver using ipp id */
- ippdrv = ipp_find_driver(ctx, property);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return -EINVAL;
- }
-
- /* allocate command node */
- c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
- if (!c_node)
- return -ENOMEM;
-
- ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
- if (ret < 0) {
- DRM_ERROR("failed to create id.\n");
- goto err_clear;
- }
- property->prop_id = ret;
-
- DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
- property->prop_id, property->cmd, ippdrv);
-
- /* stored property information and ippdrv in private data */
- c_node->property = *property;
- c_node->state = IPP_STATE_IDLE;
- c_node->filp = file;
-
- c_node->start_work = ipp_create_cmd_work();
- if (IS_ERR(c_node->start_work)) {
- DRM_ERROR("failed to create start work.\n");
- ret = PTR_ERR(c_node->start_work);
- goto err_remove_id;
- }
-
- c_node->stop_work = ipp_create_cmd_work();
- if (IS_ERR(c_node->stop_work)) {
- DRM_ERROR("failed to create stop work.\n");
- ret = PTR_ERR(c_node->stop_work);
- goto err_free_start;
- }
-
- c_node->event_work = ipp_create_event_work();
- if (IS_ERR(c_node->event_work)) {
- DRM_ERROR("failed to create event work.\n");
- ret = PTR_ERR(c_node->event_work);
- goto err_free_stop;
- }
-
- mutex_init(&c_node->lock);
- mutex_init(&c_node->mem_lock);
- mutex_init(&c_node->event_lock);
-
- init_completion(&c_node->start_complete);
- init_completion(&c_node->stop_complete);
-
- for_each_ipp_ops(i)
- INIT_LIST_HEAD(&c_node->mem_list[i]);
-
- INIT_LIST_HEAD(&c_node->event_list);
- mutex_lock(&ippdrv->cmd_lock);
- list_add_tail(&c_node->list, &ippdrv->cmd_list);
- mutex_unlock(&ippdrv->cmd_lock);
-
- /* make dedicated state without m2m */
- if (!ipp_is_m2m_cmd(property->cmd))
- ippdrv->dedicated = true;
-
- return 0;
-
-err_free_stop:
- kfree(c_node->stop_work);
-err_free_start:
- kfree(c_node->start_work);
-err_remove_id:
- ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
-err_clear:
- kfree(c_node);
- return ret;
-}
-
-static int ipp_validate_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_mem_node *m_node,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_config *ipp_cfg;
- unsigned int num_plane;
- unsigned long size, buf_size = 0, plane_size, img_size = 0;
- unsigned int bpp, width, height;
- int i;
-
- ipp_cfg = &c_node->property.config[m_node->ops_id];
- num_plane = drm_format_num_planes(ipp_cfg->fmt);
-
- /**
- * This is a rather simplified validation of a memory node.
- * It basically verifies provided gem object handles
- * and the buffer sizes with respect to current configuration.
- * This is not the best that can be done
- * but it seems more than enough
- */
- for (i = 0; i < num_plane; ++i) {
- width = ipp_cfg->sz.hsize;
- height = ipp_cfg->sz.vsize;
- bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
-
- /*
- * The result of drm_format_plane_cpp() for chroma planes must
- * be used with drm_format_xxxx_chroma_subsampling() for
- * correct result.
- */
- if (i > 0) {
- width /= drm_format_horz_chroma_subsampling(
- ipp_cfg->fmt);
- height /= drm_format_vert_chroma_subsampling(
- ipp_cfg->fmt);
- }
- plane_size = width * height * bpp;
- img_size += plane_size;
-
- if (m_node->buf_info.handles[i]) {
- size = exynos_drm_gem_get_size(drm_dev,
- m_node->buf_info.handles[i],
- c_node->filp);
- if (plane_size > size) {
- DRM_ERROR(
- "buffer %d is smaller than required\n",
- i);
- return -EINVAL;
- }
-
- buf_size += size;
- }
- }
-
- if (buf_size < img_size) {
- DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n",
- buf_size, img_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ipp_put_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node)
-{
- int i;
-
- DRM_DEBUG_KMS("node[%pK]\n", m_node);
-
- if (!m_node) {
- DRM_ERROR("invalid dequeue node.\n");
- return -EFAULT;
- }
-
- DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
- /* put gem buffer */
- for_each_ipp_planar(i) {
- unsigned long handle = m_node->buf_info.handles[i];
- if (handle)
- exynos_drm_gem_put_dma_addr(drm_dev, handle,
- c_node->filp);
- }
-
- list_del(&m_node->list);
- kfree(m_node);
-
- return 0;
-}
-
-static struct drm_exynos_ipp_mem_node
- *ipp_get_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info *buf_info;
- int i;
-
- m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
- if (!m_node)
- return ERR_PTR(-ENOMEM);
-
- buf_info = &m_node->buf_info;
-
- /* operations, buffer id */
- m_node->ops_id = qbuf->ops_id;
- m_node->prop_id = qbuf->prop_id;
- m_node->buf_id = qbuf->buf_id;
- INIT_LIST_HEAD(&m_node->list);
-
- DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
- DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
-
- for_each_ipp_planar(i) {
- DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
-
- /* get dma address by handle */
- if (qbuf->handle[i]) {
- dma_addr_t *addr;
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev,
- qbuf->handle[i], c_node->filp);
- if (IS_ERR(addr)) {
- DRM_ERROR("failed to get addr.\n");
- ipp_put_mem_node(drm_dev, c_node, m_node);
- return ERR_PTR(-EFAULT);
- }
-
- buf_info->handles[i] = qbuf->handle[i];
- buf_info->base[i] = *addr;
- DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
- &buf_info->base[i], buf_info->handles[i]);
- }
- }
-
- mutex_lock(&c_node->mem_lock);
- if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
- ipp_put_mem_node(drm_dev, c_node, m_node);
- mutex_unlock(&c_node->mem_lock);
- return ERR_PTR(-EFAULT);
- }
- list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
- mutex_unlock(&c_node->mem_lock);
-
- return m_node;
-}
-
-static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node, int ops)
-{
- struct drm_exynos_ipp_mem_node *m_node, *tm_node;
- struct list_head *head = &c_node->mem_list[ops];
-
- mutex_lock(&c_node->mem_lock);
-
- list_for_each_entry_safe(m_node, tm_node, head, list) {
- int ret;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- }
-
- mutex_unlock(&c_node->mem_lock);
-}
-
-static int ipp_get_event(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_send_event *e;
- int ret;
-
- DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- /* make event */
- e->event.base.type = DRM_EXYNOS_IPP_EVENT;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = qbuf->user_data;
- e->event.prop_id = qbuf->prop_id;
- e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
-
- ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- return ret;
- }
-
- mutex_lock(&c_node->event_lock);
- list_add_tail(&e->base.link, &c_node->event_list);
- mutex_unlock(&c_node->event_lock);
-
- return 0;
-}
-
-static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_send_event *e, *te;
- int count = 0;
-
- mutex_lock(&c_node->event_lock);
- list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
- DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
-
- /*
- * qbuf == NULL condition means all event deletion.
- * stop operations want to delete all event list.
- * another case delete only same buf id.
- */
- if (!qbuf) {
- /* delete list */
- list_del(&e->base.link);
- kfree(e);
- }
-
- /* compare buffer id */
- if (qbuf && (qbuf->buf_id ==
- e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
- /* delete list */
- list_del(&e->base.link);
- kfree(e);
- goto out_unlock;
- }
- }
-
-out_unlock:
- mutex_unlock(&c_node->event_lock);
- return;
-}
-
-static void ipp_clean_cmd_node(struct ipp_context *ctx,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- int i;
-
- /* cancel works */
- cancel_work_sync(&c_node->start_work->work);
- cancel_work_sync(&c_node->stop_work->work);
- cancel_work_sync(&c_node->event_work->work);
-
- /* put event */
- ipp_put_event(c_node, NULL);
-
- for_each_ipp_ops(i)
- ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
-
- /* delete list */
- list_del(&c_node->list);
-
- ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
- c_node->property.prop_id);
-
- /* destroy mutex */
- mutex_destroy(&c_node->lock);
- mutex_destroy(&c_node->mem_lock);
- mutex_destroy(&c_node->event_lock);
-
- /* free command node */
- kfree(c_node->start_work);
- kfree(c_node->stop_work);
- kfree(c_node->event_work);
- kfree(c_node);
-}
-
-static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
-{
- switch (c_node->property.cmd) {
- case IPP_CMD_WB:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
- case IPP_CMD_OUTPUT:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
- case IPP_CMD_M2M:
- default:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
- !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
- }
-}
-
-static struct drm_exynos_ipp_mem_node
- *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct list_head *head;
- int count = 0;
-
- DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
-
- /* source/destination memory list */
- head = &c_node->mem_list[qbuf->ops_id];
-
- /* find memory node from memory list */
- list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
-
- /* compare buffer id */
- if (m_node->buf_id == qbuf->buf_id)
- return m_node;
- }
-
- return NULL;
-}
-
-static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node)
-{
- struct exynos_drm_ipp_ops *ops = NULL;
- int ret = 0;
-
- DRM_DEBUG_KMS("node[%pK]\n", m_node);
-
- if (!m_node) {
- DRM_ERROR("invalid queue node.\n");
- return -EFAULT;
- }
-
- DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
- /* get operations callback */
- ops = ippdrv->ops[m_node->ops_id];
- if (!ops) {
- DRM_ERROR("not support ops.\n");
- return -EFAULT;
- }
-
- /* set address and enable irq */
- if (ops->set_addr) {
- ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
- m_node->buf_id, IPP_BUF_ENQUEUE);
- if (ret) {
- DRM_ERROR("failed to set addr.\n");
- return ret;
- }
- }
-
- return ret;
-}
-
-static void ipp_handle_cmd_work(struct device *dev,
- struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_work *cmd_work,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct ipp_context *ctx = get_ipp_context(dev);
-
- cmd_work->ippdrv = ippdrv;
- cmd_work->c_node = c_node;
- queue_work(ctx->cmd_workq, &cmd_work->work);
-}
-
-static int ipp_queue_buf_with_run(struct device *dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_property *property;
- struct exynos_drm_ipp_ops *ops;
- int ret;
-
- ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return -EFAULT;
- }
-
- ops = ippdrv->ops[qbuf->ops_id];
- if (!ops) {
- DRM_ERROR("failed to get ops.\n");
- return -EFAULT;
- }
-
- property = &c_node->property;
-
- if (c_node->state != IPP_STATE_START) {
- DRM_DEBUG_KMS("bypass for invalid state.\n");
- return 0;
- }
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- mutex_unlock(&c_node->mem_lock);
- DRM_DEBUG_KMS("empty memory.\n");
- return 0;
- }
-
- /*
- * If set destination buffer and enabled clock,
- * then m2m operations need start operations at queue_buf
- */
- if (ipp_is_m2m_cmd(property->cmd)) {
- struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
-
- cmd_work->ctrl = IPP_CTRL_PLAY;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- } else {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- mutex_unlock(&c_node->mem_lock);
- DRM_ERROR("failed to set m node.\n");
- return ret;
- }
- }
- mutex_unlock(&c_node->mem_lock);
-
- return 0;
-}
-
-static void ipp_clean_queue_buf(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node, *tm_node;
-
- /* delete list */
- mutex_lock(&c_node->mem_lock);
- list_for_each_entry_safe(m_node, tm_node,
- &c_node->mem_list[qbuf->ops_id], list) {
- if (m_node->buf_id == qbuf->buf_id &&
- m_node->ops_id == qbuf->ops_id)
- ipp_put_mem_node(drm_dev, c_node, m_node);
- }
- mutex_unlock(&c_node->mem_lock);
-}
-
-int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_queue_buf *qbuf = data;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct drm_exynos_ipp_mem_node *m_node;
- int ret;
-
- if (!qbuf) {
- DRM_ERROR("invalid buf parameter.\n");
- return -EINVAL;
- }
-
- if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
- DRM_ERROR("invalid ops parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
- qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
- qbuf->buf_id, qbuf->buf_type);
-
- /* find command node */
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
- qbuf->prop_id);
- if (!c_node || c_node->filp != file) {
- DRM_ERROR("failed to get command node.\n");
- return -ENODEV;
- }
-
- /* buffer control */
- switch (qbuf->buf_type) {
- case IPP_BUF_ENQUEUE:
- /* get memory node */
- m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
- if (IS_ERR(m_node)) {
- DRM_ERROR("failed to get m_node.\n");
- return PTR_ERR(m_node);
- }
-
- /*
- * first step get event for destination buffer.
- * and second step when M2M case run with destination buffer
- * if needed.
- */
- if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
- /* get event for destination buffer */
- ret = ipp_get_event(drm_dev, c_node, qbuf);
- if (ret) {
- DRM_ERROR("failed to get event.\n");
- goto err_clean_node;
- }
-
- /*
- * M2M case run play control for streaming feature.
- * other case set address and waiting.
- */
- ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
- if (ret) {
- DRM_ERROR("failed to run command.\n");
- goto err_clean_node;
- }
- }
- break;
- case IPP_BUF_DEQUEUE:
- mutex_lock(&c_node->lock);
-
- /* put event for destination buffer */
- if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
- ipp_put_event(c_node, qbuf);
-
- ipp_clean_queue_buf(drm_dev, c_node, qbuf);
-
- mutex_unlock(&c_node->lock);
- break;
- default:
- DRM_ERROR("invalid buffer control.\n");
- return -EINVAL;
- }
-
- return 0;
-
-err_clean_node:
- DRM_ERROR("clean memory nodes.\n");
-
- ipp_clean_queue_buf(drm_dev, c_node, qbuf);
- return ret;
-}
-
-static bool exynos_drm_ipp_check_valid(struct device *dev,
- enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
-{
- if (ctrl != IPP_CTRL_PLAY) {
- if (pm_runtime_suspended(dev)) {
- DRM_ERROR("pm:runtime_suspended.\n");
- goto err_status;
- }
- }
-
- switch (ctrl) {
- case IPP_CTRL_PLAY:
- if (state != IPP_STATE_IDLE)
- goto err_status;
- break;
- case IPP_CTRL_STOP:
- if (state == IPP_STATE_STOP)
- goto err_status;
- break;
- case IPP_CTRL_PAUSE:
- if (state != IPP_STATE_START)
- goto err_status;
- break;
- case IPP_CTRL_RESUME:
- if (state != IPP_STATE_STOP)
- goto err_status;
- break;
- default:
- DRM_ERROR("invalid state.\n");
- goto err_status;
- }
-
- return true;
-
-err_status:
- DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
- return false;
-}
-
-int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ippdrv *ippdrv = NULL;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
- struct drm_exynos_ipp_cmd_work *cmd_work;
- struct drm_exynos_ipp_cmd_node *c_node;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!cmd_ctrl) {
- DRM_ERROR("invalid control parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
- cmd_ctrl->ctrl, cmd_ctrl->prop_id);
-
- ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return PTR_ERR(ippdrv);
- }
-
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
- cmd_ctrl->prop_id);
- if (!c_node || c_node->filp != file) {
- DRM_ERROR("invalid command node list.\n");
- return -ENODEV;
- }
-
- if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
- c_node->state)) {
- DRM_ERROR("invalid state.\n");
- return -EINVAL;
- }
-
- switch (cmd_ctrl->ctrl) {
- case IPP_CTRL_PLAY:
- if (pm_runtime_suspended(ippdrv->dev))
- pm_runtime_get_sync(ippdrv->dev);
-
- c_node->state = IPP_STATE_START;
-
- cmd_work = c_node->start_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- break;
- case IPP_CTRL_STOP:
- cmd_work = c_node->stop_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
-
- if (!wait_for_completion_timeout(&c_node->stop_complete,
- msecs_to_jiffies(300))) {
- DRM_ERROR("timeout stop:prop_id[%d]\n",
- c_node->property.prop_id);
- }
-
- c_node->state = IPP_STATE_STOP;
- ippdrv->dedicated = false;
- mutex_lock(&ippdrv->cmd_lock);
- ipp_clean_cmd_node(ctx, c_node);
-
- if (list_empty(&ippdrv->cmd_list))
- pm_runtime_put_sync(ippdrv->dev);
- mutex_unlock(&ippdrv->cmd_lock);
- break;
- case IPP_CTRL_PAUSE:
- cmd_work = c_node->stop_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
-
- if (!wait_for_completion_timeout(&c_node->stop_complete,
- msecs_to_jiffies(200))) {
- DRM_ERROR("timeout stop:prop_id[%d]\n",
- c_node->property.prop_id);
- }
-
- c_node->state = IPP_STATE_STOP;
- break;
- case IPP_CTRL_RESUME:
- c_node->state = IPP_STATE_START;
- cmd_work = c_node->start_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- break;
- default:
- DRM_ERROR("could not support this state currently.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
- cmd_ctrl->ctrl, cmd_ctrl->prop_id);
-
- return 0;
-}
-
-int exynos_drm_ippnb_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(
- &exynos_drm_ippnb_list, nb);
-}
-
-int exynos_drm_ippnb_unregister(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(
- &exynos_drm_ippnb_list, nb);
-}
-
-int exynos_drm_ippnb_send_event(unsigned long val, void *v)
-{
- return blocking_notifier_call_chain(
- &exynos_drm_ippnb_list, val, v);
-}
-
-static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_property *property)
-{
- struct exynos_drm_ipp_ops *ops = NULL;
- bool swap = false;
- int ret, i;
-
- if (!property) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* reset h/w block */
- if (ippdrv->reset &&
- ippdrv->reset(ippdrv->dev)) {
- return -EINVAL;
- }
-
- /* set source,destination operations */
- for_each_ipp_ops(i) {
- struct drm_exynos_ipp_config *config =
- &property->config[i];
-
- ops = ippdrv->ops[i];
- if (!ops || !config) {
- DRM_ERROR("not support ops and config.\n");
- return -EINVAL;
- }
-
- /* set format */
- if (ops->set_fmt) {
- ret = ops->set_fmt(ippdrv->dev, config->fmt);
- if (ret)
- return ret;
- }
-
- /* set transform for rotation, flip */
- if (ops->set_transf) {
- ret = ops->set_transf(ippdrv->dev, config->degree,
- config->flip, &swap);
- if (ret)
- return ret;
- }
-
- /* set size */
- if (ops->set_size) {
- ret = ops->set_size(ippdrv->dev, swap, &config->pos,
- &config->sz);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct list_head *head;
- int ret, i;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* store command info in ippdrv */
- ippdrv->c_node = c_node;
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- DRM_DEBUG_KMS("empty memory.\n");
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- /* set current property in ippdrv */
- ret = ipp_set_property(ippdrv, property);
- if (ret) {
- DRM_ERROR("failed to set property.\n");
- ippdrv->c_node = NULL;
- goto err_unlock;
- }
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
-
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- case IPP_CMD_WB:
- /* destination memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
-
- list_for_each_entry(m_node, head, list) {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- case IPP_CMD_OUTPUT:
- /* source memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
- list_for_each_entry(m_node, head, list) {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- mutex_unlock(&c_node->mem_lock);
-
- DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
-
- /* start operations */
- if (ippdrv->start) {
- ret = ippdrv->start(ippdrv->dev, property->cmd);
- if (ret) {
- DRM_ERROR("failed to start ops.\n");
- ippdrv->c_node = NULL;
- return ret;
- }
- }
-
- return 0;
-
-err_unlock:
- mutex_unlock(&c_node->mem_lock);
- ippdrv->c_node = NULL;
- return ret;
-}
-
-static int ipp_stop_property(struct drm_device *drm_dev,
- struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_property *property = &c_node->property;
- int i;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* stop operations */
- if (ippdrv->stop)
- ippdrv->stop(ippdrv->dev, property->cmd);
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i)
- ipp_clean_mem_nodes(drm_dev, c_node, i);
- break;
- case IPP_CMD_WB:
- ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
- break;
- case IPP_CMD_OUTPUT:
- ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-void ipp_sched_cmd(struct work_struct *work)
-{
- struct drm_exynos_ipp_cmd_work *cmd_work =
- container_of(work, struct drm_exynos_ipp_cmd_work, work);
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct drm_exynos_ipp_property *property;
- int ret;
-
- ippdrv = cmd_work->ippdrv;
- if (!ippdrv) {
- DRM_ERROR("invalid ippdrv list.\n");
- return;
- }
-
- c_node = cmd_work->c_node;
- if (!c_node) {
- DRM_ERROR("invalid command node list.\n");
- return;
- }
-
- mutex_lock(&c_node->lock);
-
- property = &c_node->property;
-
- switch (cmd_work->ctrl) {
- case IPP_CTRL_PLAY:
- case IPP_CTRL_RESUME:
- ret = ipp_start_property(ippdrv, c_node);
- if (ret) {
- DRM_ERROR("failed to start property:prop_id[%d]\n",
- c_node->property.prop_id);
- goto err_unlock;
- }
-
- /*
- * M2M case supports wait_completion of transfer.
- * because M2M case supports single unit operation
- * with multiple queue.
- * M2M need to wait completion of data transfer.
- */
- if (ipp_is_m2m_cmd(property->cmd)) {
- if (!wait_for_completion_timeout
- (&c_node->start_complete, msecs_to_jiffies(200))) {
- DRM_ERROR("timeout event:prop_id[%d]\n",
- c_node->property.prop_id);
- goto err_unlock;
- }
- }
- break;
- case IPP_CTRL_STOP:
- case IPP_CTRL_PAUSE:
- ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
- c_node);
- if (ret) {
- DRM_ERROR("failed to stop property.\n");
- goto err_unlock;
- }
-
- complete(&c_node->stop_complete);
- break;
- default:
- DRM_ERROR("unknown control type\n");
- break;
- }
-
- DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
-
-err_unlock:
- mutex_unlock(&c_node->lock);
-}
-
-static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
-{
- struct drm_device *drm_dev = ippdrv->drm_dev;
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_queue_buf qbuf;
- struct drm_exynos_ipp_send_event *e;
- struct list_head *head;
- struct timeval now;
- u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
- int ret, i;
-
- for_each_ipp_ops(i)
- DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
-
- if (!drm_dev) {
- DRM_ERROR("failed to get drm_dev.\n");
- return -EINVAL;
- }
-
- if (!property) {
- DRM_ERROR("failed to get property.\n");
- return -EINVAL;
- }
-
- mutex_lock(&c_node->event_lock);
- if (list_empty(&c_node->event_list)) {
- DRM_DEBUG_KMS("event list is empty.\n");
- ret = 0;
- goto err_event_unlock;
- }
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- DRM_DEBUG_KMS("empty memory.\n");
- ret = 0;
- goto err_mem_unlock;
- }
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- tbuf_id[i] = m_node->buf_id;
- DRM_DEBUG_KMS("%s buf_id[%d]\n",
- i ? "dst" : "src", tbuf_id[i]);
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- }
- break;
- case IPP_CMD_WB:
- /* clear buf for finding */
- memset(&qbuf, 0x0, sizeof(qbuf));
- qbuf.ops_id = EXYNOS_DRM_OPS_DST;
- qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
-
- /* get memory node entry */
- m_node = ipp_find_mem_node(c_node, &qbuf);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
-
- tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- break;
- case IPP_CMD_OUTPUT:
- /* source memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- ret = -EINVAL;
- goto err_mem_unlock;
- }
- mutex_unlock(&c_node->mem_lock);
-
- if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
- DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
- tbuf_id[1], buf_id[1], property->prop_id);
-
- /*
- * command node have event list of destination buffer
- * If destination buffer enqueue to mem list,
- * then we make event and link to event list tail.
- * so, we get first event for first enqueued buffer.
- */
- e = list_first_entry(&c_node->event_list,
- struct drm_exynos_ipp_send_event, base.link);
-
- do_gettimeofday(&now);
- DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- e->event.prop_id = property->prop_id;
-
- /* set buffer id about source destination */
- for_each_ipp_ops(i)
- e->event.buf_id[i] = tbuf_id[i];
-
- drm_send_event(drm_dev, &e->base);
- mutex_unlock(&c_node->event_lock);
-
- DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
- property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
-
- return 0;
-
-err_mem_unlock:
- mutex_unlock(&c_node->mem_lock);
-err_event_unlock:
- mutex_unlock(&c_node->event_lock);
- return ret;
-}
-
-void ipp_sched_event(struct work_struct *work)
-{
- struct drm_exynos_ipp_event_work *event_work =
- container_of(work, struct drm_exynos_ipp_event_work, work);
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- int ret;
-
- if (!event_work) {
- DRM_ERROR("failed to get event_work.\n");
- return;
- }
-
- DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
-
- ippdrv = event_work->ippdrv;
- if (!ippdrv) {
- DRM_ERROR("failed to get ipp driver.\n");
- return;
- }
-
- c_node = ippdrv->c_node;
- if (!c_node) {
- DRM_ERROR("failed to get command node.\n");
- return;
- }
-
- /*
- * IPP supports command thread, event thread synchronization.
- * If IPP close immediately from user land, then IPP make
- * synchronization with command thread, so make complete event.
- * or going out operations.
- */
- if (c_node->state != IPP_STATE_START) {
- DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
- c_node->state, c_node->property.prop_id);
- goto err_completion;
- }
-
- ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
- if (ret) {
- DRM_ERROR("failed to send event.\n");
- goto err_completion;
- }
-
-err_completion:
- if (ipp_is_m2m_cmd(c_node->property.cmd))
- complete(&c_node->start_complete);
-}
-
-static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct ipp_context *ctx = get_ipp_context(dev);
- struct exynos_drm_ippdrv *ippdrv;
- int ret, count = 0;
-
- /* get ipp driver entry */
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- ippdrv->drm_dev = drm_dev;
-
- ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
- if (ret < 0) {
- DRM_ERROR("failed to create id.\n");
- goto err;
- }
- ippdrv->prop_list.ipp_id = ret;
-
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
- count++, ippdrv, ret);
-
- /* store parent device for node */
- ippdrv->parent_dev = dev;
-
- /* store event work queue and handler */
- ippdrv->event_workq = ctx->event_workq;
- ippdrv->sched_event = ipp_sched_event;
- INIT_LIST_HEAD(&ippdrv->cmd_list);
- mutex_init(&ippdrv->cmd_lock);
-
- ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
- if (ret) {
- DRM_ERROR("failed to activate iommu\n");
- goto err;
- }
- }
-
- return 0;
-
-err:
- /* get ipp driver entry */
- list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
- drv_list) {
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
-
- ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
- ippdrv->prop_list.ipp_id);
- }
-
- return ret;
-}
-
-static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- struct exynos_drm_ippdrv *ippdrv, *t;
- struct ipp_context *ctx = get_ipp_context(dev);
-
- /* get ipp driver entry */
- list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
-
- ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
- ippdrv->prop_list.ipp_id);
-
- ippdrv->drm_dev = NULL;
- exynos_drm_ippdrv_unregister(ippdrv);
- }
-}
-
-static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
-
- file_priv->ipp_dev = dev;
-
- DRM_DEBUG_KMS("done priv[%pK]\n", dev);
-
- return 0;
-}
-
-static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
-{
- struct exynos_drm_ippdrv *ippdrv = NULL;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
- int count = 0;
-
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- mutex_lock(&ippdrv->cmd_lock);
- list_for_each_entry_safe(c_node, tc_node,
- &ippdrv->cmd_list, list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
- count++, ippdrv);
-
- if (c_node->filp == file) {
- /*
- * userland goto unnormal state. process killed.
- * and close the file.
- * so, IPP didn't called stop cmd ctrl.
- * so, we are make stop operation in this state.
- */
- if (c_node->state == IPP_STATE_START) {
- ipp_stop_property(drm_dev, ippdrv,
- c_node);
- c_node->state = IPP_STATE_STOP;
- }
-
- ippdrv->dedicated = false;
- ipp_clean_cmd_node(ctx, c_node);
- if (list_empty(&ippdrv->cmd_list))
- pm_runtime_put_sync(ippdrv->dev);
- }
- }
- mutex_unlock(&ippdrv->cmd_lock);
- }
-
- return;
-}
-
-static int ipp_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ipp_context *ctx;
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- mutex_init(&ctx->ipp_lock);
- mutex_init(&ctx->prop_lock);
-
- idr_init(&ctx->ipp_idr);
- idr_init(&ctx->prop_idr);
-
- /*
- * create single thread for ipp event
- * IPP supports event thread for IPP drivers.
- * IPP driver send event_work to this thread.
- * and IPP event thread send event to user process.
- */
- ctx->event_workq = create_singlethread_workqueue("ipp_event");
- if (!ctx->event_workq) {
- dev_err(dev, "failed to create event workqueue\n");
- return -EINVAL;
- }
-
- /*
- * create single thread for ipp command
- * IPP supports command thread for user process.
- * user process make command node using set property ioctl.
- * and make start_work and send this work to command thread.
- * and then this command thread start property.
- */
- ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
- if (!ctx->cmd_workq) {
- dev_err(dev, "failed to create cmd workqueue\n");
- ret = -EINVAL;
- goto err_event_workq;
- }
-
- /* set sub driver informations */
- subdrv = &ctx->subdrv;
- subdrv->dev = dev;
- subdrv->probe = ipp_subdrv_probe;
- subdrv->remove = ipp_subdrv_remove;
- subdrv->open = ipp_subdrv_open;
- subdrv->close = ipp_subdrv_close;
-
- platform_set_drvdata(pdev, ctx);
-
- ret = exynos_drm_subdrv_register(subdrv);
- if (ret < 0) {
- DRM_ERROR("failed to register drm ipp device.\n");
- goto err_cmd_workq;
- }
-
- dev_info(dev, "drm ipp registered successfully.\n");
-
- return 0;
-
-err_cmd_workq:
- destroy_workqueue(ctx->cmd_workq);
-err_event_workq:
- destroy_workqueue(ctx->event_workq);
- return ret;
-}
-
-static int ipp_remove(struct platform_device *pdev)
-{
- struct ipp_context *ctx = platform_get_drvdata(pdev);
-
- /* unregister sub driver */
- exynos_drm_subdrv_unregister(&ctx->subdrv);
-
- /* remove,destroy ipp idr */
- idr_destroy(&ctx->ipp_idr);
- idr_destroy(&ctx->prop_idr);
-
- mutex_destroy(&ctx->ipp_lock);
- mutex_destroy(&ctx->prop_lock);
-
- /* destroy command, event work queue */
- destroy_workqueue(ctx->cmd_workq);
- destroy_workqueue(ctx->event_workq);
-
- return 0;
-}
-
-struct platform_driver ipp_driver = {
- .probe = ipp_probe,
- .remove = ipp_remove,
- .driver = {
- .name = "exynos-drm-ipp",
- .owner = THIS_MODULE,
- },
-};
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
deleted file mode 100644
index 2a61547..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Jinyoung Jeon <jy0.jeon@samsung.com>
- * Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IPP_H_
-#define _EXYNOS_DRM_IPP_H_
-
-#define for_each_ipp_ops(pos) \
- for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
-#define for_each_ipp_planar(pos) \
- for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
-
-#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
-#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
-#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
-
-/* definition of state */
-enum drm_exynos_ipp_state {
- IPP_STATE_IDLE,
- IPP_STATE_START,
- IPP_STATE_STOP,
-};
-
-/*
- * A structure of command work information.
- * @work: work structure.
- * @ippdrv: current work ippdrv.
- * @c_node: command node information.
- * @ctrl: command control.
- */
-struct drm_exynos_ipp_cmd_work {
- struct work_struct work;
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- enum drm_exynos_ipp_ctrl ctrl;
-};
-
-/*
- * A structure of command node.
- *
- * @list: list head to command queue information.
- * @event_list: list head of event.
- * @mem_list: list head to source,destination memory queue information.
- * @lock: lock for synchronization of access to ioctl.
- * @mem_lock: lock for synchronization of access to memory nodes.
- * @event_lock: lock for synchronization of access to scheduled event.
- * @start_complete: completion of start of command.
- * @stop_complete: completion of stop of command.
- * @property: property information.
- * @start_work: start command work structure.
- * @stop_work: stop command work structure.
- * @event_work: event work structure.
- * @state: state of command node.
- * @filp: associated file pointer.
- */
-struct drm_exynos_ipp_cmd_node {
- struct list_head list;
- struct list_head event_list;
- struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
- struct mutex lock;
- struct mutex mem_lock;
- struct mutex event_lock;
- struct completion start_complete;
- struct completion stop_complete;
- struct drm_exynos_ipp_property property;
- struct drm_exynos_ipp_cmd_work *start_work;
- struct drm_exynos_ipp_cmd_work *stop_work;
- struct drm_exynos_ipp_event_work *event_work;
- enum drm_exynos_ipp_state state;
- struct drm_file *filp;
-};
-
-/*
- * A structure of buffer information.
- *
- * @handles: Y, Cb, Cr each gem object handle.
- * @base: Y, Cb, Cr each planar address.
- */
-struct drm_exynos_ipp_buf_info {
- unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
- dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
-};
-
-/*
- * A structure of wb setting information.
- *
- * @enable: enable flag for wb.
- * @refresh: HZ of the refresh rate.
- */
-struct drm_exynos_ipp_set_wb {
- __u32 enable;
- __u32 refresh;
-};
-
-/*
- * A structure of event work information.
- *
- * @work: work structure.
- * @ippdrv: current work ippdrv.
- * @buf_id: id of src, dst buffer.
- */
-struct drm_exynos_ipp_event_work {
- struct work_struct work;
- struct exynos_drm_ippdrv *ippdrv;
- u32 buf_id[EXYNOS_DRM_OPS_MAX];
-};
-
-/*
- * A structure of source,destination operations.
- *
- * @set_fmt: set format of image.
- * @set_transf: set transform(rotations, flip).
- * @set_size: set size of region.
- * @set_addr: set address for dma.
- */
-struct exynos_drm_ipp_ops {
- int (*set_fmt)(struct device *dev, u32 fmt);
- int (*set_transf)(struct device *dev,
- enum drm_exynos_degree degree,
- enum drm_exynos_flip flip, bool *swap);
- int (*set_size)(struct device *dev, int swap,
- struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
- int (*set_addr)(struct device *dev,
- struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
- enum drm_exynos_ipp_buf_type buf_type);
-};
-
-/*
- * A structure of ipp driver.
- *
- * @drv_list: list head for registed sub driver information.
- * @parent_dev: parent device information.
- * @dev: platform device.
- * @drm_dev: drm device.
- * @dedicated: dedicated ipp device.
- * @ops: source, destination operations.
- * @event_workq: event work queue.
- * @c_node: current command information.
- * @cmd_list: list head for command information.
- * @cmd_lock: lock for synchronization of access to cmd_list.
- * @prop_list: property informations of current ipp driver.
- * @check_property: check property about format, size, buffer.
- * @reset: reset ipp block.
- * @start: ipp each device start.
- * @stop: ipp each device stop.
- * @sched_event: work schedule handler.
- */
-struct exynos_drm_ippdrv {
- struct list_head drv_list;
- struct device *parent_dev;
- struct device *dev;
- struct drm_device *drm_dev;
- bool dedicated;
- struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
- struct workqueue_struct *event_workq;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct list_head cmd_list;
- struct mutex cmd_lock;
- struct drm_exynos_ipp_prop_list prop_list;
-
- int (*check_property)(struct device *dev,
- struct drm_exynos_ipp_property *property);
- int (*reset)(struct device *dev);
- int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
- void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
- void (*sched_event)(struct work_struct *work);
-};
-
-#ifdef CONFIG_DRM_EXYNOS_IPP
-extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
-extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
-extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ippnb_register(struct notifier_block *nb);
-extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
-extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
-extern void ipp_sched_cmd(struct work_struct *work);
-extern void ipp_sched_event(struct work_struct *work);
-
-#else
-static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
-{
- return -ENOTTY;
-}
-#endif
-
-#endif /* _EXYNOS_DRM_IPP_H_ */
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
deleted file mode 100644
index 71a0b4c..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * YoungJun Cho <yj44.cho@samsung.com>
- * Eunchul Kim <chulspro.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ROTATOR_H_
-#define _EXYNOS_DRM_ROTATOR_H_
-
-/* TODO */
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 82d1b7e..abd84cb 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -829,7 +829,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
}
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi, m);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
+ &hdata->connector, m);
if (!ret)
ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
sizeof(buf));
@@ -1067,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
| HDMI_I2S_SEL_LRCK(6));
- hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
- | HDMI_I2S_SEL_SDATA2(4));
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
+ | HDMI_I2S_SEL_SDATA0(4));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
| HDMI_I2S_SEL_SDATA2(2));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
/* I2S_CON_1 & 2 */
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index dc5d794..257299e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -179,18 +179,6 @@ static const u8 filter_cr_horiz_tap4[] = {
70, 59, 48, 37, 27, 19, 11, 5,
};
-static inline bool is_alpha_format(unsigned int pixel_format)
-{
- switch (pixel_format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ARGB4444:
- return true;
- default:
- return false;
- }
-}
-
static inline u32 vp_reg_read(struct mixer_context *ctx, u32 reg_id)
{
return readl(ctx->vp_regs + reg_id);
@@ -625,7 +613,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
mixer_cfg_layer(ctx, win, priority, true);
- mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format));
+ mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha);
/* layer update mandatory for mixer 16.0.33.0 */
if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
new file mode 100644
index 0000000..19ad9e4
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifndef EXYNOS_REGS_DECON5433_H
+#define EXYNOS_REGS_DECON5433_H
+
+/* Exynos543X DECON */
+#define DECON_VIDCON0 0x0000
+#define DECON_VIDOUTCON0 0x0010
+#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
+#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
+#define DECON_SHADOWCON 0x00A0
+#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
+#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
+#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
+#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
+#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
+#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
+#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
+#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
+#define DECON_VIDINTCON0 0x0220
+#define DECON_VIDINTCON1 0x0224
+#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
+#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
+#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
+#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
+#define DECON_QOSLUT07_00 0x02C0
+#define DECON_QOSLUT15_08 0x02C4
+#define DECON_QOSCTRL 0x02C8
+#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
+#define DECON_BLENDCON 0x0310
+#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
+#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
+#define DECON_FRAMEFIFO_REG7 0x051C
+#define DECON_FRAMEFIFO_REG8 0x0520
+#define DECON_FRAMEFIFO_STATUS 0x0524
+#define DECON_CMU 0x1404
+#define DECON_UPDATE 0x1410
+#define DECON_CRFMID 0x1414
+#define DECON_UPDATE_SCHEME 0x1438
+#define DECON_VIDCON1 0x2000
+#define DECON_VIDCON2 0x2004
+#define DECON_VIDCON3 0x2008
+#define DECON_VIDCON4 0x200C
+#define DECON_VIDTCON2 0x2028
+#define DECON_FRAME_SIZE 0x2038
+#define DECON_LINECNT_OP_THRESHOLD 0x203C
+#define DECON_TRIGCON 0x2040
+#define DECON_TRIGSKIP 0x2050
+#define DECON_CRCRDATA 0x20B0
+#define DECON_CRCCTRL 0x20B4
+
+/* Exynos5430 DECON */
+#define DECON_VIDTCON0 0x2020
+#define DECON_VIDTCON1 0x2024
+
+/* Exynos5433 DECON */
+#define DECON_VIDTCON00 0x2010
+#define DECON_VIDTCON01 0x2014
+#define DECON_VIDTCON10 0x2018
+#define DECON_VIDTCON11 0x201C
+
+/* Exynos543X DECON Internal */
+#define DECON_W013DSTREOCON 0x0320
+#define DECON_W233DSTREOCON 0x0324
+#define DECON_FRAMEFIFO_REG0 0x0500
+#define DECON_ENHANCER_CTRL 0x2100
+
+/* Exynos543X DECON TV */
+#define DECON_VCLKCON0 0x0014
+#define DECON_VIDINTCON2 0x0228
+#define DECON_VIDINTCON3 0x022C
+
+/* VIDCON0 */
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_CLKVALUP (1 << 14)
+#define VIDCON0_VLCKFREE (1 << 5)
+#define VIDCON0_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUT_INTERLACE_FIELD_F (1 << 29)
+#define VIDOUT_INTERLACE_EN_F (1 << 28)
+#define VIDOUT_LCD_ON (1 << 24)
+#define VIDOUT_IF_F_MASK (0x3 << 20)
+#define VIDOUT_RGB_IF (0x0 << 20)
+#define VIDOUT_COMMAND_IF (0x2 << 20)
+
+/* WINCONx */
+#define WINCONx_HAWSWP_F (1 << 16)
+#define WINCONx_WSWP_F (1 << 15)
+#define WINCONx_BURSTLEN_MASK (0x3 << 10)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
+#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_BLD_PIX_F (1 << 6)
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
+#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
+#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
+#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
+#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
+#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
+#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
+#define WINCONx_ALPHA_SEL_F (1 << 1)
+#define WINCONx_ENWIN_F (1 << 0)
+
+/* SHADOWCON */
+#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
+#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+
+/* VIDOSDxD */
+#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
+#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
+#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
+
+/* VIDINTCON0 */
+#define VIDINTCON0_FRAMEDONE (1 << 17)
+#define VIDINTCON0_FRAMESEL_BP (0 << 15)
+#define VIDINTCON0_FRAMESEL_VS (1 << 15)
+#define VIDINTCON0_FRAMESEL_AC (2 << 15)
+#define VIDINTCON0_FRAMESEL_FP (3 << 15)
+#define VIDINTCON0_INTFRMEN (1 << 12)
+#define VIDINTCON0_INTEN (1 << 0)
+
+/* VIDINTCON1 */
+#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
+#define VIDINTCON1_INTFRMPEND (1 << 1)
+#define VIDINTCON1_INTFIFOPEND (1 << 0)
+
+/* DECON_CMU */
+#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
+#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
+
+/* DECON_UPDATE */
+#define STANDALONE_UPDATE_F (1 << 0)
+
+/* DECON_VIDCON1 */
+#define VIDCON1_LINECNT_MASK (0x0fff << 16)
+#define VIDCON1_I80_ACTIVE (1 << 15)
+#define VIDCON1_VSTATUS_MASK (0x3 << 13)
+#define VIDCON1_VSTATUS_VS (0 << 13)
+#define VIDCON1_VSTATUS_BP (1 << 13)
+#define VIDCON1_VSTATUS_AC (2 << 13)
+#define VIDCON1_VSTATUS_FP (3 << 13)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+
+
+/* DECON_VIDTCON00 */
+#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON01 */
+#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON10 */
+#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON11 */
+#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON2 */
+#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
+#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
+
+/* TRIGCON */
+#define TRIGCON_TRIGEN_PER_F (1 << 31)
+#define TRIGCON_TRIGEN_F (1 << 30)
+#define TRIGCON_TE_AUTO_MASK (1 << 29)
+#define TRIGCON_WB_SWTRIGCMD (1 << 28)
+#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26)
+#define TRIGCON_TRIGMODE_W4BUF (1 << 25)
+#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21)
+#define TRIGCON_TRIGMODE_W3BUF (1 << 20)
+#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16)
+#define TRIGCON_TRIGMODE_W2BUF (1 << 15)
+#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11)
+#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
+#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
+#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
+#define TRIGCON_HWTRIGMASK (1 << 4)
+#define TRIGCON_HWTRIGEN (1 << 3)
+#define TRIGCON_HWTRIG_INV (1 << 2)
+#define TRIGCON_SWTRIGCMD (1 << 1)
+#define TRIGCON_SWTRIGEN (1 << 0)
+
+/* DECON_CRCCTRL */
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+#define CRCCTRL_MASK (0x7)
+
+#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h
new file mode 100644
index 0000000..5df7765
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-decon7.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef EXYNOS_REGS_DECON7_H
+#define EXYNOS_REGS_DECON7_H
+
+/* VIDCON0 */
+#define VIDCON0 0x00
+
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_DECON_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUTCON0 0x4
+
+#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
+#define VIDOUTCON0_DUAL_ON (0x3 << 24)
+#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
+#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
+#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
+#define VIDOUTCON0_IF_SHIFT 23
+#define VIDOUTCON0_IF_MASK (0x1 << 23)
+#define VIDOUTCON0_RGBIF (0x0 << 23)
+#define VIDOUTCON0_I80IF (0x1 << 23)
+
+/* VIDCON3 */
+#define VIDCON3 0x8
+
+/* VIDCON4 */
+#define VIDCON4 0xC
+#define VIDCON4_FIFOCNT_START_EN (1 << 0)
+
+/* VCLKCON0 */
+#define VCLKCON0 0x10
+#define VCLKCON0_CLKVALUP (1 << 8)
+#define VCLKCON0_VCLKFREE (1 << 0)
+
+/* VCLKCON */
+#define VCLKCON1 0x14
+#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
+#define VCLKCON2 0x18
+
+/* SHADOWCON */
+#define SHADOWCON 0x30
+
+#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
+
+/* WINCONx */
+#define WINCON(_win) (0x50 + ((_win) * 4))
+
+#define WINCONx_BUFSTATUS (0x3 << 30)
+#define WINCONx_BUFSEL_MASK (0x3 << 28)
+#define WINCONx_BUFSEL_SHIFT 28
+#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
+#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
+#define WINCONx_BURSTLEN_MASK (0x1 << 11)
+#define WINCONx_BURSTLEN_SHIFT 11
+#define WINCONx_BLD_PLANE (0 << 8)
+#define WINCONx_BLD_PIX (1 << 8)
+#define WINCONx_ALPHA_MUL (1 << 7)
+
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_SHIFT 2
+#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
+#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
+#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
+#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
+#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
+#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
+#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
+#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
+#define WINCONx_ALPHA_SEL (1 << 1)
+#define WINCONx_ENWIN (1 << 0)
+
+#define WINCON1_ALPHA_MUL_F (1 << 7)
+#define WINCON2_ALPHA_MUL_F (1 << 7)
+#define WINCON3_ALPHA_MUL_F (1 << 7)
+#define WINCON4_ALPHA_MUL_F (1 << 7)
+
+/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
+#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
+
+/* Frame buffer start addresses: VIDWxxADD0n */
+#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
+#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
+#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
+
+#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
+#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
+#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
+#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
+#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
+#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
+
+/* Interrupt controls register */
+#define VIDINTCON2 0x228
+
+#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
+#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON3 0x22C
+
+#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
+#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
+
+/* VIDOSDxA ~ VIDOSDxE */
+#define VIDOSD_BASE 0x230
+
+#define OSD_STRIDE 0x20
+
+#define VIDOSD_A(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x00)
+#define VIDOSD_B(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x04)
+#define VIDOSD_C(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x08)
+#define VIDOSD_D(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x0C)
+#define VIDOSD_E(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x10)
+
+#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
+#define VIDOSDxA_TOPLEFT_X_SHIFT 13
+#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
+#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
+#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
+#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
+#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
+#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
+#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
+
+#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
+
+/* Window MAP (Color map) */
+#define WINxMAP(_win) (0x340 + ((_win) * 4))
+
+#define WINxMAP_MAP (1 << 24)
+#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
+#define WINxMAP_MAP_COLOUR_SHIFT 0
+#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
+#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
+
+/* Window colour-key control registers */
+#define WKEYCON 0x370
+
+#define WKEYCON0 0x00
+#define WKEYCON1 0x04
+#define WxKEYCON0_KEYBL_EN (1 << 26)
+#define WxKEYCON0_KEYEN_F (1 << 25)
+#define WxKEYCON0_DIRCON (1 << 24)
+#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
+#define WxKEYCON0_COMPKEY_SHIFT 0
+#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
+#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
+#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
+#define WxKEYCON1_COLVAL_SHIFT 0
+#define WxKEYCON1_COLVAL_LIMIT 0xffffff
+#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
+
+/* Window KEY Alpha value */
+#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
+
+#define Wx_KEYALPHA_R_F_SHIFT 16
+#define Wx_KEYALPHA_G_F_SHIFT 8
+#define Wx_KEYALPHA_B_F_SHIFT 0
+
+/* Blending equation */
+#define BLENDE(_win) (0x03C0 + ((_win) * 4))
+#define BLENDE_COEF_ZERO 0x0
+#define BLENDE_COEF_ONE 0x1
+#define BLENDE_COEF_ALPHA_A 0x2
+#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
+#define BLENDE_COEF_ALPHA_B 0x4
+#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
+#define BLENDE_COEF_ALPHA0 0x6
+#define BLENDE_COEF_A 0xA
+#define BLENDE_COEF_ONE_MINUS_A 0xB
+#define BLENDE_COEF_B 0xC
+#define BLENDE_COEF_ONE_MINUS_B 0xD
+#define BLENDE_Q_FUNC(_v) ((_v) << 18)
+#define BLENDE_P_FUNC(_v) ((_v) << 12)
+#define BLENDE_B_FUNC(_v) ((_v) << 6)
+#define BLENDE_A_FUNC(_v) ((_v) << 0)
+
+/* Blending equation control */
+#define BLENDCON 0x3D8
+#define BLENDCON_NEW_MASK (1 << 0)
+#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
+/* Interrupt control register */
+#define VIDINTCON0 0x500
+
+#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
+#define VIDINTCON0_INTEXTRAEN (1 << 21)
+
+#define VIDINTCON0_FRAMESEL0_SHIFT 15
+#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
+#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
+#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
+#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
+#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
+
+#define VIDINTCON0_INT_FRAME (1 << 11)
+
+#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
+#define VIDINTCON0_FIFOLEVEL_SHIFT 3
+#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
+#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
+
+#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
+#define VIDINTCON0_INT_FIFO (1 << 1)
+
+#define VIDINTCON0_INT_ENABLE (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON1 0x504
+
+#define VIDINTCON1_INT_EXTRA (1 << 3)
+#define VIDINTCON1_INT_I80 (1 << 2)
+#define VIDINTCON1_INT_FRAME (1 << 1)
+#define VIDINTCON1_INT_FIFO (1 << 0)
+
+/* VIDCON1 */
+#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
+#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
+#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
+#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
+#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
+#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
+#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
+#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
+
+/* VIDTCON0 */
+#define VIDTCON0 0x610
+
+#define VIDTCON0_VBPD_MASK (0xffff << 16)
+#define VIDTCON0_VBPD_SHIFT 16
+#define VIDTCON0_VBPD_LIMIT 0xffff
+#define VIDTCON0_VBPD(_x) ((_x) << 16)
+
+#define VIDTCON0_VFPD_MASK (0xffff << 0)
+#define VIDTCON0_VFPD_SHIFT 0
+#define VIDTCON0_VFPD_LIMIT 0xffff
+#define VIDTCON0_VFPD(_x) ((_x) << 0)
+
+/* VIDTCON1 */
+#define VIDTCON1 0x614
+
+#define VIDTCON1_VSPW_MASK (0xffff << 16)
+#define VIDTCON1_VSPW_SHIFT 16
+#define VIDTCON1_VSPW_LIMIT 0xffff
+#define VIDTCON1_VSPW(_x) ((_x) << 16)
+
+/* VIDTCON2 */
+#define VIDTCON2 0x618
+
+#define VIDTCON2_HBPD_MASK (0xffff << 16)
+#define VIDTCON2_HBPD_SHIFT 16
+#define VIDTCON2_HBPD_LIMIT 0xffff
+#define VIDTCON2_HBPD(_x) ((_x) << 16)
+
+#define VIDTCON2_HFPD_MASK (0xffff << 0)
+#define VIDTCON2_HFPD_SHIFT 0
+#define VIDTCON2_HFPD_LIMIT 0xffff
+#define VIDTCON2_HFPD(_x) ((_x) << 0)
+
+/* VIDTCON3 */
+#define VIDTCON3 0x61C
+
+#define VIDTCON3_HSPW_MASK (0xffff << 16)
+#define VIDTCON3_HSPW_SHIFT 16
+#define VIDTCON3_HSPW_LIMIT 0xffff
+#define VIDTCON3_HSPW(_x) ((_x) << 16)
+
+/* VIDTCON4 */
+#define VIDTCON4 0x620
+
+#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
+#define VIDTCON4_LINEVAL_SHIFT 16
+#define VIDTCON4_LINEVAL_LIMIT 0xfff
+#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
+
+#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
+#define VIDTCON4_HOZVAL_SHIFT 0
+#define VIDTCON4_HOZVAL_LIMIT 0xfff
+#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
+
+/* LINECNT OP THRSHOLD*/
+#define LINECNT_OP_THRESHOLD 0x630
+
+/* CRCCTRL */
+#define CRCCTRL 0x6C8
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+
+/* DECON_CMU */
+#define DECON_CMU 0x704
+
+#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
+#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
+#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
+#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
+
+/* DECON_UPDATE */
+#define DECON_UPDATE 0x710
+
+#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
+#define DECON_UPDATE_STANDALONE_F (1 << 0)
+
+#endif /* EXYNOS_REGS_DECON7_H */
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 3049613..d7cbe53 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
/* Real input DMA size register */
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 04be0f7e..4420c20 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -464,7 +464,7 @@
/* I2S_PIN_SEL_1 */
#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
-#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+#define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7)
/* I2S_PIN_SEL_2 */
#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index faf17b8..8023232 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -188,26 +189,17 @@ static struct drm_driver fsl_dcu_drm_driver = {
static int fsl_dcu_drm_pm_suspend(struct device *dev)
{
struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+ int ret;
if (!fsl_dev)
return 0;
disable_irq(fsl_dev->irq);
- drm_kms_helper_poll_disable(fsl_dev->drm);
-
- console_lock();
- drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
- console_unlock();
-
- fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
- if (IS_ERR(fsl_dev->state)) {
- console_lock();
- drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
- console_unlock();
- drm_kms_helper_poll_enable(fsl_dev->drm);
+ ret = drm_mode_config_helper_suspend(fsl_dev->drm);
+ if (ret) {
enable_irq(fsl_dev->irq);
- return PTR_ERR(fsl_dev->state);
+ return ret;
}
clk_disable_unprepare(fsl_dev->clk);
@@ -233,13 +225,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
enable_irq(fsl_dev->irq);
- drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
-
- console_lock();
- drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
- console_unlock();
- drm_kms_helper_poll_enable(fsl_dev->drm);
+ drm_mode_config_helper_resume(fsl_dev->drm);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index da9bfd4..93bfb98 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -196,7 +196,6 @@ struct fsl_dcu_drm_device {
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
const struct fsl_dcu_soc_data *soc;
- struct drm_atomic_state *state;
};
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 8745971..3a3bf75 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -185,21 +185,22 @@ static int cdv_backlight_init(struct drm_device *dev)
* for this and the MID devices.
*/
-static inline u32 CDV_MSG_READ32(uint port, uint offset)
+static inline u32 CDV_MSG_READ32(int domain, uint port, uint offset)
{
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
-static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
+ u32 value)
{
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD4, value);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
@@ -216,11 +217,12 @@ static void cdv_init_pm(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 pwr_cnt;
+ int domain = pci_domain_nr(dev->pdev->bus);
int i;
- dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
PSB_APMBA) & 0xFFFF;
- dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ dev_priv->ospm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
PSB_OSPMBA) & 0xFFFF;
/* Power status */
@@ -251,7 +253,7 @@ static void cdv_errata(struct drm_device *dev)
* Bonus Launch to work around the issue, by degrading
* performance.
*/
- CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+ CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
}
/**
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2570c7f..cb0a2ae 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -576,13 +576,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-static void psbfb_output_poll_changed(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
- drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
-}
-
/**
* psb_user_framebuffer_create_handle - add hamdle to a framebuffer
* @fb: framebuffer
@@ -623,7 +616,7 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
- .output_poll_changed = psbfb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static void psb_setup_outputs(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 4a295f9..a7fb6de 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -19,7 +19,9 @@
void gma_get_core_freq(struct drm_device *dev)
{
uint32_t clock;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root =
+ pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ 0, 0);
struct drm_psb_private *dev_priv = dev->dev_private;
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 1fa1633..7171b74 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -32,7 +32,9 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root =
+ pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ 0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
@@ -104,7 +106,9 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
+ struct pci_dev *pci_gfx_root =
+ pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
if (pci_gfx_root == NULL) {
WARN_ON(1);
@@ -281,7 +285,9 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
u32 addr;
u8 __iomem *vbt_virtual;
struct mid_vbt_header vbt_header;
- struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ struct pci_dev *pci_gfx_root =
+ pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
+ 0, PCI_DEVFN(2, 0));
int ret = -1;
/* Get the address of the platform config vbt */
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 0eaf11c..ccb161c 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -395,7 +395,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
- kunmap_atomic(pt->v);
+ kunmap_atomic(v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 37a3be7..ac32ab5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -107,19 +107,6 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_ioctl_desc psb_ioctls[] = {
};
-static void psb_driver_lastclose(struct drm_device *dev)
-{
- int ret;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = dev_priv->fbdev;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-
- return;
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -261,7 +248,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
goto out_err;
if (IS_MRST(dev)) {
- dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
+ int domain = pci_domain_nr(dev->pdev->bus);
+
+ dev_priv->aux_pdev =
+ pci_get_domain_bus_and_slot(domain, 0,
+ PCI_DEVFN(3, 0));
if (dev_priv->aux_pdev) {
resource_start = pci_resource_start(dev_priv->aux_pdev,
@@ -281,7 +272,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
}
dev_priv->gmbus_reg = dev_priv->aux_reg;
- dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
+ dev_priv->lpc_pdev =
+ pci_get_domain_bus_and_slot(domain, 0,
+ PCI_DEVFN(31, 0));
if (dev_priv->lpc_pdev) {
pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
&dev_priv->lpc_gpio_base);
@@ -479,7 +472,7 @@ static struct drm_driver driver = {
DRIVER_MODESET | DRIVER_GEM,
.load = psb_driver_load,
.unload = psb_driver_unload,
- .lastclose = psb_driver_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.irq_preinstall = psb_irq_preinstall,
@@ -527,4 +520,4 @@ module_exit(psb_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 821497d..e8300f5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -36,7 +36,6 @@
#include "mmu.h"
#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
-#define DRIVER_LICENSE "GPL"
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
@@ -781,38 +780,40 @@ extern const struct psb_ops cdv_chip_ops;
extern int drm_idle_check_interval;
/* Utilities */
-static inline u32 MRST_MSG_READ32(uint port, uint offset)
+static inline u32 MRST_MSG_READ32(int domain, uint port, uint offset)
{
int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
-static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
+ u32 value)
{
int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD4, value);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
-static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
{
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
-static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
+ u32 value)
{
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD4, value);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 3518167..4871025 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -200,10 +200,8 @@ static struct ttm_backend_func hibmc_tt_backend_func = {
.destroy = &hibmc_ttm_backend_destroy,
};
-static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
- u32 page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo,
+ u32 page_flags)
{
struct ttm_tt *tt;
int ret;
@@ -214,7 +212,7 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
}
tt->func = &hibmc_tt_backend_func;
- ret = ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page);
+ ret = ttm_tt_init(tt, bo, page_flags);
if (ret) {
DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
kfree(tt);
@@ -223,20 +221,8 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
-static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
-{
- return ttm_pool_populate(ttm);
-}
-
-static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
struct ttm_bo_driver hibmc_bo_driver = {
.ttm_tt_create = hibmc_ttm_tt_create,
- .ttm_tt_populate = hibmc_ttm_tt_populate,
- .ttm_tt_unpopulate = hibmc_ttm_tt_unpopulate,
.init_mem_type = hibmc_bo_init_mem_type,
.evict_flags = hibmc_bo_evict_flags,
.move = NULL,
@@ -330,7 +316,7 @@ int hibmc_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size,
ttm_bo_type_device, &hibmcbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, acc_size,
NULL, NULL, hibmc_bo_ttm_destroy);
if (ret) {
hibmc_bo_unref(&hibmcbo);
@@ -344,6 +330,7 @@ int hibmc_bo_create(struct drm_device *dev, int size, int align,
int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
@@ -356,7 +343,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
hibmc_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -368,6 +355,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
int hibmc_bo_unpin(struct hibmc_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
@@ -380,7 +368,7 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("validate failed for unpin: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index ecaa587..c52d7a3 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -326,8 +326,7 @@ sil164_encoder_destroy(struct drm_encoder *encoder)
{
struct sil164_priv *priv = to_sil164_priv(encoder);
- if (priv->duallink_slave)
- i2c_unregister_device(priv->duallink_slave);
+ i2c_unregister_device(priv->duallink_slave);
kfree(priv);
drm_i2c_encoder_destroy(encoder);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 1278152..9e67a7b 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1100,7 +1100,6 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
kfree(edid);
@@ -1601,8 +1600,7 @@ fail:
/* if encoder_init fails, the encoder slave is never registered,
* so cleanup here:
*/
- if (priv->cec)
- i2c_unregister_device(priv->cec);
+ i2c_unregister_device(priv->cec);
return -ENXIO;
}
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index aed7d20..108d21f 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,7 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
select DRM_DP_AUX_CHARDEV
@@ -49,6 +50,20 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_TRACE_GEM
+ bool "Insert extra ftrace output from the GEM internals"
+ depends on DRM_I915_DEBUG_GEM
+ select TRACING
+ default n
+ help
+ Enable additional and verbose debugging output that will spam
+ ordinary tests, but may be vital for post-mortem debugging when
+ used with /proc/sys/kernel/ftrace_dump_on_oops
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915
@@ -90,6 +105,20 @@ config DRM_I915_SELFTEST
If in doubt, say "N".
+config DRM_I915_SELFTEST_BROKEN
+ bool "Enable broken and dangerous selftests"
+ depends on DRM_I915_SELFTEST
+ depends on BROKEN
+ default n
+ help
+ This option enables the execution of selftests that are "dangerous"
+ and may trigger unintended HW side-effects as they break strict
+ rules given in the HW specification. For science.
+
+ Recommended for masochistic driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_LOW_LEVEL_TRACEPOINTS
bool "Enable low level request tracing events"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2acf3b3..4eee91a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,27 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+# Add a set of useful warning flags and enable -Werror for CI to prevent
+# trivial mistakes from creeping in. We have to do this piecemeal as we reject
+# any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we
+# need to filter out dubious warnings. Still it is our interest
+# to keep running locally with W=1 C=1 until we are completely clean.
+#
+# Note the danger in using -Wall -Wextra is that when CI updates gcc we
+# will most likely get a sudden build breakage... Hopefully we will fix
+# new warnings before CI updates!
+subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
+subdir-ccflags-y += $(call cc-disable-warning, type-limits)
+subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
+
+# Fine grained warnings disable
+CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
+CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
+
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
@@ -27,6 +47,7 @@ i915-y := i915_drv.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code
i915-y += i915_cmd_parser.o \
@@ -42,13 +63,14 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \
i915_gem_object.o \
i915_gem_render_state.o \
- i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_query.o \
+ i915_request.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
@@ -63,11 +85,13 @@ i915-y += i915_cmd_parser.o \
i915-y += intel_uc.o \
intel_uc_fw.o \
intel_guc.o \
+ intel_guc_ads.o \
intel_guc_ct.o \
- intel_guc_log.o \
intel_guc_fw.o \
+ intel_guc_log.o \
+ intel_guc_submission.o \
intel_huc.o \
- i915_guc_submission.o
+ intel_huc_fw.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
@@ -88,6 +112,7 @@ i915-y += intel_audio.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
+ intel_hdcp.o \
intel_hotplug.o \
intel_modes.o \
intel_overlay.o \
@@ -144,7 +169,9 @@ i915-y += i915_perf.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o \
- i915_oa_cflgt2.o
+ i915_oa_cflgt2.o \
+ i915_oa_cflgt3.o \
+ i915_oa_cnl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 4950b82..c73aff1 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -59,28 +59,28 @@
* This must not be set while VR01_DVO_BYPASS_ENABLE is set.
*/
# define VR01_LCD_ENABLE (1 << 2)
-/** Enables the DVO repeater. */
+/* Enables the DVO repeater. */
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
-/** Enables the DVO clock */
+/* Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
-/** Enable dithering for 18bpp panels. Not documented. */
+/* Enable dithering for 18bpp panels. Not documented. */
# define VR01_DITHER_ENABLE (1 << 4)
/*
* LCD Interface Format
*/
#define VR10 0x10
-/** Enables LVDS output instead of CMOS */
+/* Enables LVDS output instead of CMOS */
# define VR10_LVDS_ENABLE (1 << 4)
-/** Enables 18-bit LVDS output. */
+/* Enables 18-bit LVDS output. */
# define VR10_INTERFACE_1X18 (0 << 2)
-/** Enables 24-bit LVDS or CMOS output */
+/* Enables 24-bit LVDS or CMOS output */
# define VR10_INTERFACE_1X24 (1 << 2)
-/** Enables 2x18-bit LVDS or CMOS output. */
+/* Enables 2x18-bit LVDS or CMOS output. */
# define VR10_INTERFACE_2X18 (2 << 2)
-/** Enables 2x24-bit LVDS output */
+/* Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
-/** Mask that defines the depth of the pipeline */
+/* Mask that defines the depth of the pipeline */
# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
/*
@@ -97,7 +97,7 @@
* Panel power down status
*/
#define VR30 0x30
-/** Read only bit indicating that the panel is not in a safe poweroff state. */
+/* Read only bit indicating that the panel is not in a safe poweroff state. */
# define VR30_PANEL_ON (1 << 15)
#define VR40 0x40
@@ -183,7 +183,7 @@ struct ivch_priv {
static void ivch_dump_regs(struct intel_dvo_device *dvo);
-/**
+/*
* Reads a register on the ivch.
*
* Each of the 256 registers are 16 bits long.
@@ -230,7 +230,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
return false;
}
-/** Writes a 16-bit register on the ivch */
+/* Writes a 16-bit register on the ivch */
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -258,7 +258,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
return false;
}
-/** Probes the given bus and slave address for an ivch */
+/* Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
@@ -338,7 +338,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
}
-/** Sets the power state of the panel connected to the ivch */
+/* Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 2641ba5..b016dc7 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -2,7 +2,8 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
+ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
+ fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4ce2e6b..c62346f 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
- if (map) {
- vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
- MEMREMAP_WC);
- if (!vgpu->gm.aperture_va)
- return -ENOMEM;
- } else {
- memunmap(vgpu->gm.aperture_va);
- vgpu->gm.aperture_va = NULL;
- }
-
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
- if (ret) {
- memunmap(vgpu->gm.aperture_va);
- vgpu->gm.aperture_va = NULL;
+ if (ret)
return ret;
- }
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
@@ -335,7 +322,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
- ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ ret = intel_vgpu_opregion_base_write_handler(vgpu,
+ *(u32 *)p_data);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 49af946..db6b94d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
* used when ret from 2nd level batch buffer
*/
int saved_buf_addr_type;
+ bool is_ctx_wa;
struct cmd_info *info;
@@ -709,18 +710,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
print_opcode(cmd_val(s, 0), s->ring_id);
- /* print the whole page to trace */
- pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
- s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
- cmd_val(s, 2), cmd_val(s, 3));
-
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
while (cnt < 1024) {
- pr_err("ip_va=%p: ", s->ip_va);
+ gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
for (i = 0; i < 8; i++)
- pr_err("%08x ", cmd_val(s, i));
- pr_err("\n");
+ gvt_dbg_cmd("%08x ", cmd_val(s, i));
+ gvt_dbg_cmd("\n");
s->ip_va += 8 * sizeof(u32);
cnt += 8;
@@ -825,11 +821,26 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
- return -EINVAL;
+ return -EPERM;
}
return 0;
}
+static inline bool is_mocs_mmio(unsigned int offset)
+{
+ return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
+ ((offset >= 0xb020) && (offset <= 0xb0a0));
+}
+
+static int mocs_cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ if (!is_mocs_mmio(offset))
+ return -EINVAL;
+ vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -839,7 +850,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
- return -EINVAL;
+ return -EFAULT;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@@ -853,10 +864,14 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
- if (is_force_nonpriv_mmio(offset) &&
- force_nonpriv_reg_handler(s, offset, index))
+ if (is_mocs_mmio(offset) &&
+ mocs_cmd_reg_handler(s, offset, index))
return -EINVAL;
+ if (is_force_nonpriv_mmio(offset) &&
+ force_nonpriv_reg_handler(s, offset, index))
+ return -EPERM;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -894,11 +909,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ ret |= (cmd_reg_inhibit(s, i)) ?
+ -EBADRQC : 0;
}
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
+ if (ret)
+ break;
}
return ret;
}
@@ -912,11 +930,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
ret |= ((cmd_reg_inhibit(s, i) ||
(cmd_reg_inhibit(s, i + 1)))) ?
- -EINVAL : 0;
+ -EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
+ if (ret)
+ break;
ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
+ if (ret)
+ break;
}
return ret;
}
@@ -934,15 +956,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
if (IS_BROADWELL(gvt->dev_priv))
- ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
+ ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
+ if (ret)
+ break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ if (ret)
+ break;
}
i += gmadr_dw_number(s) + 1;
}
@@ -958,11 +984,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
+ if (ret)
+ break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
+ if (ret)
+ break;
}
i += gmadr_dw_number(s) + 1;
}
@@ -1116,7 +1146,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
v = (dword0 & GENMASK(21, 19)) >> 19;
if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
- return -EINVAL;
+ return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
info->plane = gen8_plane_code[v].plane;
@@ -1136,7 +1166,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_reg = SPRSURF(info->pipe);
} else {
WARN_ON(1);
- return -EINVAL;
+ return -EBADRQC;
}
return 0;
}
@@ -1185,7 +1215,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
default:
gvt_vgpu_err("unknown plane code %d\n", plane);
- return -EINVAL;
+ return -EBADRQC;
}
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@@ -1210,13 +1240,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
return 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
} else {
- stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6;
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
}
if (stride != info->stride_val)
@@ -1235,21 +1265,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
- set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10);
} else {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10);
}
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0;
}
@@ -1348,10 +1378,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
{
unsigned long addr;
unsigned long gma_high, gma_low;
- int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_vgpu *vgpu = s->vgpu;
+ int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
+ if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
+ gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
return INTEL_GVT_INVALID_ADDR;
+ }
gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 4) {
@@ -1374,16 +1407,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
if (op_size > max_surface_size) {
gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
- return -EINVAL;
+ return -EFAULT;
}
if (index_mode) {
- if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
- ret = -EINVAL;
+ if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+ ret = -EFAULT;
goto err;
}
} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto err;
}
@@ -1439,7 +1472,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
- return -EINVAL;
+ return -EBADRQC;
}
static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@@ -1545,10 +1578,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
return -EFAULT;
}
- offset = gma & (GTT_PAGE_SIZE - 1);
+ offset = gma & (I915_GTT_PAGE_SIZE - 1);
- copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
- GTT_PAGE_SIZE - offset : end_gma - gma;
+ copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
+ I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
@@ -1576,110 +1609,118 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static int find_bb_size(struct parser_exec_state *s)
+static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
struct cmd_info *info;
- int bb_size = 0;
uint32_t cmd_len = 0;
- bool met_bb_end = false;
+ bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
+ *bb_size = 0;
+
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
- cmd = cmd_val(s, 0);
+ if (gma == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
+ cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
do {
- copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
- gma, gma + 4, &cmd);
+ if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
+ gma, gma + 4, &cmd) < 0)
+ return -EFAULT;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
if (info->opcode == OP_MI_BATCH_BUFFER_END) {
- met_bb_end = true;
+ bb_end = true;
} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
- if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
+ if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
/* chained batch buffer */
- met_bb_end = true;
- }
+ bb_end = true;
}
cmd_len = get_cmd_length(info, cmd) << 2;
- bb_size += cmd_len;
+ *bb_size += cmd_len;
gma += cmd_len;
+ } while (!bb_end);
- } while (!met_bb_end);
-
- return bb_size;
+ return 0;
}
static int perform_bb_shadow(struct parser_exec_state *s)
{
- struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
- int bb_size;
- void *dst = NULL;
+ unsigned long bb_size;
int ret = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
+ if (gma == INTEL_GVT_INVALID_ADDR)
+ return -EFAULT;
- /* get the size of the batch buffer */
- bb_size = find_bb_size(s);
- if (bb_size < 0)
- return -EINVAL;
+ ret = find_bb_size(s, &bb_size);
+ if (ret)
+ return ret;
- /* allocate shadow batch buffer */
- entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
- if (entry_obj == NULL)
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
+ if (!bb)
return -ENOMEM;
- entry_obj->obj =
- i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size, PAGE_SIZE));
- if (IS_ERR(entry_obj->obj)) {
- ret = PTR_ERR(entry_obj->obj);
- goto free_entry;
+ bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
+ roundup(bb_size, PAGE_SIZE));
+ if (IS_ERR(bb->obj)) {
+ ret = PTR_ERR(bb->obj);
+ goto err_free_bb;
}
- entry_obj->len = bb_size;
- INIT_LIST_HEAD(&entry_obj->list);
- dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto put_obj;
- }
+ ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
+ if (ret)
+ goto err_free_obj;
- ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
- if (ret) {
- gvt_vgpu_err("failed to set shadow batch to CPU\n");
- goto unmap_src;
+ bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
+ if (IS_ERR(bb->va)) {
+ ret = PTR_ERR(bb->va);
+ goto err_finish_shmem_access;
}
- entry_obj->va = dst;
- entry_obj->bb_start_cmd_va = s->ip_va;
+ if (bb->clflush & CLFLUSH_BEFORE) {
+ drm_clflush_virt_range(bb->va, bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_BEFORE;
+ }
- /* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
- dst);
+ bb->va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
- goto unmap_src;
+ ret = -EFAULT;
+ goto err_unmap;
}
- list_add(&entry_obj->list, &s->workload->shadow_bb);
+ INIT_LIST_HEAD(&bb->list);
+ list_add(&bb->list, &s->workload->shadow_bb);
+
+ bb->accessing = true;
+ bb->bb_start_cmd_va = s->ip_va;
+
+ if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
+ bb->bb_offset = s->ip_va - s->rb_va;
+ else
+ bb->bb_offset = 0;
+
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
@@ -1688,17 +1729,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = dst;
+ s->ip_va = bb->va;
s->ip_gma = gma;
-
return 0;
-
-unmap_src:
- i915_gem_object_unpin_map(entry_obj->obj);
-put_obj:
- i915_gem_object_put(entry_obj->obj);
-free_entry:
- kfree(entry_obj);
+err_unmap:
+ i915_gem_object_unpin_map(bb->obj);
+err_finish_shmem_access:
+ i915_gem_obj_finish_shmem_access(bb->obj);
+err_free_obj:
+ i915_gem_object_put(bb->obj);
+err_free_bb:
+ kfree(bb);
return ret;
}
@@ -1710,13 +1751,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
- return -EINVAL;
+ return -EFAULT;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
- return -EINVAL;
+ return -EFAULT;
}
s->saved_buf_addr_type = s->buf_addr_type;
@@ -1740,7 +1781,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (ret < 0)
return ret;
}
-
return ret;
}
@@ -2430,7 +2470,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
- return -EINVAL;
+ return -EBADRQC;
}
s->info = info;
@@ -2465,6 +2505,10 @@ static inline bool gma_out_of_range(unsigned long gma,
return (gma > gma_tail) && (gma < gma_head);
}
+/* Keep the consistent return type, e.g EBADRQC for unknown
+ * cmd, EFAULT for invalid address, EPERM for nonpriv. later
+ * works as the input of VM healthy status.
+ */
static int command_scan(struct parser_exec_state *s,
unsigned long rb_head, unsigned long rb_tail,
unsigned long rb_start, unsigned long rb_len)
@@ -2487,7 +2531,7 @@ static int command_scan(struct parser_exec_state *s,
s->ip_gma, rb_start,
gma_bottom);
parser_exec_state_dump(s);
- return -EINVAL;
+ return -EFAULT;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_vgpu_err("ip_gma %lx out of range."
@@ -2516,7 +2560,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
int ret = 0;
/* ring base is page aligned */
- if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
+ if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
return -EINVAL;
gma_head = workload->rb_start + workload->rb_head;
@@ -2533,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.ring_tail = gma_tail;
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
+ s.is_ctx_wa = false;
if ((bypass_scan_mask & (1 << workload->ring_id)) ||
gma_head == gma_tail)
@@ -2565,7 +2610,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx);
/* ring base is page aligned */
- if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
+ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
+ I915_GTT_PAGE_SIZE)))
return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@@ -2585,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ s.is_ctx_wa = true;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
@@ -2604,6 +2651,7 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
@@ -2619,19 +2667,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
- void *va = vgpu->reserve_ring_buffer_va[ring_id];
+ if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ void *p;
+
/* realloc the new ring buffer if needed */
- vgpu->reserve_ring_buffer_va[ring_id] =
- krealloc(va, workload->rb_len, GFP_KERNEL);
- if (!vgpu->reserve_ring_buffer_va[ring_id]) {
- gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
+ GFP_KERNEL);
+ if (!p) {
+ gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[ring_id] = p;
+ s->ring_scan_buffer_size[ring_id] = workload->rb_len;
}
- shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index b0cff4d..c602712 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -25,41 +25,41 @@
#define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
- DRM_ERROR("gvt: "fmt, ##args)
+ pr_err("gvt: "fmt, ##args)
#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
- DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
+ pr_err("gvt: "fmt, ##args); \
else \
- DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+ pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)
#define gvt_dbg_core(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+ pr_debug("gvt: core: "fmt, ##args)
#define gvt_dbg_irq(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
+ pr_debug("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
+ pr_debug("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
+ pr_debug("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
+ pr_debug("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
+ pr_debug("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
+ pr_debug("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
+ pr_debug("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
- DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
+ pr_debug("gvt: cmd: "fmt, ##args)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
new file mode 100644
index 0000000..32a66df
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/list_sort.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+struct mmio_diff_param {
+ struct intel_vgpu *vgpu;
+ int total;
+ int diff;
+ struct list_head diff_mmio_list;
+};
+
+struct diff_mmio {
+ struct list_head node;
+ u32 offset;
+ u32 preg;
+ u32 vreg;
+};
+
+/* Compare two diff_mmio items. */
+static int mmio_offset_compare(void *priv,
+ struct list_head *a, struct list_head *b)
+{
+ struct diff_mmio *ma;
+ struct diff_mmio *mb;
+
+ ma = container_of(a, struct diff_mmio, node);
+ mb = container_of(b, struct diff_mmio, node);
+ if (ma->offset < mb->offset)
+ return -1;
+ else if (ma->offset > mb->offset)
+ return 1;
+ return 0;
+}
+
+static inline int mmio_diff_handler(struct intel_gvt *gvt,
+ u32 offset, void *data)
+{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct mmio_diff_param *param = data;
+ struct diff_mmio *node;
+ u32 preg, vreg;
+
+ preg = I915_READ_NOTRACE(_MMIO(offset));
+ vreg = vgpu_vreg(param->vgpu, offset);
+
+ if (preg != vreg) {
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->offset = offset;
+ node->preg = preg;
+ node->vreg = vreg;
+ list_add(&node->node, &param->diff_mmio_list);
+ param->diff++;
+ }
+ param->total++;
+ return 0;
+}
+
+/* Show the all the different values of tracked mmio. */
+static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
+{
+ struct intel_vgpu *vgpu = s->private;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct mmio_diff_param param = {
+ .vgpu = vgpu,
+ .total = 0,
+ .diff = 0,
+ };
+ struct diff_mmio *node, *next;
+
+ INIT_LIST_HEAD(&param.diff_mmio_list);
+
+ mutex_lock(&gvt->lock);
+ spin_lock_bh(&gvt->scheduler.mmio_context_lock);
+
+ mmio_hw_access_pre(gvt->dev_priv);
+ /* Recognize all the diff mmios to list. */
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
+ mmio_hw_access_post(gvt->dev_priv);
+
+ spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
+ mutex_unlock(&gvt->lock);
+
+ /* In an ascending order by mmio offset. */
+ list_sort(NULL, &param.diff_mmio_list, mmio_offset_compare);
+
+ seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff");
+ list_for_each_entry_safe(node, next, &param.diff_mmio_list, node) {
+ u32 diff = node->preg ^ node->vreg;
+
+ seq_printf(s, "%08x %08x %08x %*pbl\n",
+ node->offset, node->preg, node->vreg,
+ 32, &diff);
+ list_del(&node->node);
+ kfree(node);
+ }
+ seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
+ return 0;
+}
+
+static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vgpu_mmio_diff_show, inode->i_private);
+}
+
+static const struct file_operations vgpu_mmio_diff_fops = {
+ .open = vgpu_mmio_diff_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+{
+ struct dentry *ent;
+ char name[10] = "";
+
+ sprintf(name, "vgpu%d", vgpu->id);
+ vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
+ if (!vgpu->debugfs)
+ return -ENOMEM;
+
+ ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
+ &vgpu->active);
+ if (!ent)
+ return -ENOMEM;
+
+ ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
+ vgpu, &vgpu_mmio_diff_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU
+ * @vgpu: a vGPU
+ */
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
+{
+ debugfs_remove_recursive(vgpu->debugfs);
+ vgpu->debugfs = NULL;
+}
+
+/**
+ * intel_gvt_debugfs_init - register gvt debugfs root entry
+ * @gvt: GVT device
+ *
+ * Returns:
+ * zero on success, negative if failed.
+ */
+int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+{
+ struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct dentry *ent;
+
+ gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+ if (!gvt->debugfs_root) {
+ gvt_err("Cannot create debugfs dir\n");
+ return -ENOMEM;
+ }
+
+ ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+ &gvt->mmio.num_tracked_mmio);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * intel_gvt_debugfs_clean - remove debugfs entries
+ * @gvt: GVT device
+ */
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
+{
+ debugfs_remove_recursive(gvt->debugfs_root);
+ gvt->debugfs_root = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 309f3fa..dd96ffc 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -67,14 +67,14 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
return 1;
}
-static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -169,105 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
- vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+ vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+ vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
- vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -369,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index d73de22..b46b868 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
new file mode 100644
index 0000000..b555eb2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+
+static int vgpu_gem_get_pages(
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i, ret;
+ gen8_pte_t __iomem *gtt_entries;
+ struct intel_vgpu_fb_info *fb_info;
+
+ fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+ if (WARN_ON(!fb_info))
+ return -ENODEV;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (unlikely(!st))
+ return -ENOMEM;
+
+ ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ return ret;
+ }
+ gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (fb_info->start >> PAGE_SHIFT);
+ for_each_sg(st->sgl, sg, fb_info->size, i) {
+ sg->offset = 0;
+ sg->length = PAGE_SIZE;
+ sg_dma_address(sg) =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+
+ __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+
+ return 0;
+}
+
+static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void dmabuf_gem_object_free(struct kref *kref)
+{
+ struct intel_vgpu_dmabuf_obj *obj =
+ container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
+ struct intel_vgpu *vgpu = obj->vgpu;
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos,
+ struct intel_vgpu_dmabuf_obj, list);
+ if (dmabuf_obj == obj) {
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ idr_remove(&vgpu->object_idr,
+ dmabuf_obj->dmabuf_id);
+ kfree(dmabuf_obj->info);
+ kfree(dmabuf_obj);
+ list_del(pos);
+ break;
+ }
+ }
+ } else {
+ /* Free the orphan dmabuf_objs here */
+ kfree(obj->info);
+ kfree(obj);
+ }
+}
+
+
+static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_put(&obj->kref, dmabuf_gem_object_free);
+}
+
+static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
+{
+
+ struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+
+ if (vgpu) {
+ mutex_lock(&vgpu->dmabuf_lock);
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ } else {
+ /* vgpu is NULL, as it has been removed already */
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ }
+}
+
+static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+ .flags = I915_GEM_OBJECT_IS_PROXY,
+ .get_pages = vgpu_gem_get_pages,
+ .put_pages = vgpu_gem_put_pages,
+ .release = vgpu_gem_release,
+};
+
+static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
+ struct intel_vgpu_fb_info *info)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev_priv);
+ if (obj == NULL)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->base,
+ info->size << PAGE_SHIFT);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ unsigned int tiling_mode = 0;
+ unsigned int stride = 0;
+
+ switch (info->drm_format_mod << 10) {
+ case PLANE_CTL_TILED_LINEAR:
+ tiling_mode = I915_TILING_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ tiling_mode = I915_TILING_X;
+ stride = info->stride;
+ break;
+ case PLANE_CTL_TILED_Y:
+ tiling_mode = I915_TILING_Y;
+ stride = info->stride;
+ break;
+ default:
+ gvt_dbg_core("not supported tiling mode\n");
+ }
+ obj->tiling_and_stride = tiling_mode | stride;
+ } else {
+ obj->tiling_and_stride = info->drm_format_mod ?
+ I915_TILING_X : 0;
+ }
+
+ return obj;
+}
+
+static int vgpu_get_plane_info(struct drm_device *dev,
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *info,
+ int plane_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_vgpu_primary_plane_format p;
+ struct intel_vgpu_cursor_plane_format c;
+ int ret;
+
+ if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
+ ret = intel_vgpu_decode_primary_plane(vgpu, &p);
+ if (ret)
+ return ret;
+ info->start = p.base;
+ info->start_gpa = p.base_gpa;
+ info->width = p.width;
+ info->height = p.height;
+ info->stride = p.stride;
+ info->drm_format = p.drm_format;
+ info->drm_format_mod = p.tiled;
+ info->size = (((p.stride * p.height * p.bpp) / 8) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
+ ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
+ if (ret)
+ return ret;
+ info->start = c.base;
+ info->start_gpa = c.base_gpa;
+ info->width = c.width;
+ info->height = c.height;
+ info->stride = c.width * (c.bpp / 8);
+ info->drm_format = c.drm_format;
+ info->drm_format_mod = 0;
+ info->x_pos = c.x_pos;
+ info->y_pos = c.y_pos;
+
+ /* The invalid cursor hotspot value is delivered to host
+ * until we find a way to get the cursor hotspot info of
+ * guest OS.
+ */
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ info->size = (((info->stride * c.height * c.bpp) / 8)
+ + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else {
+ gvt_vgpu_err("invalid plane id:%d\n", plane_id);
+ return -EINVAL;
+ }
+
+ if (info->size == 0) {
+ gvt_vgpu_err("fb size is zero\n");
+ return -EINVAL;
+ }
+
+ if (info->start & (PAGE_SIZE - 1)) {
+ gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
+ return -EFAULT;
+ }
+ if (((info->start >> PAGE_SHIFT) + info->size) >
+ ggtt_total_entries(&dev_priv->ggtt)) {
+ gvt_vgpu_err("Invalid GTT offset or size\n");
+ return -EFAULT;
+ }
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
+ gvt_vgpu_err("invalid gma addr\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_info(struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *latest_info)
+{
+ struct list_head *pos;
+ struct intel_vgpu_fb_info *fb_info;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if ((dmabuf_obj == NULL) ||
+ (dmabuf_obj->info == NULL))
+ continue;
+
+ fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
+ if ((fb_info->start == latest_info->start) &&
+ (fb_info->start_gpa == latest_info->start_gpa) &&
+ (fb_info->size == latest_info->size) &&
+ (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
+ (fb_info->drm_format == latest_info->drm_format) &&
+ (fb_info->width == latest_info->width) &&
+ (fb_info->height == latest_info->height)) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
+{
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if (!dmabuf_obj)
+ continue;
+
+ if (dmabuf_obj->dmabuf_id == id) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
+ struct intel_vgpu_fb_info *fb_info)
+{
+ gvt_dmabuf->drm_format = fb_info->drm_format;
+ gvt_dmabuf->width = fb_info->width;
+ gvt_dmabuf->height = fb_info->height;
+ gvt_dmabuf->stride = fb_info->stride;
+ gvt_dmabuf->size = fb_info->size;
+ gvt_dmabuf->x_pos = fb_info->x_pos;
+ gvt_dmabuf->y_pos = fb_info->y_pos;
+ gvt_dmabuf->x_hot = fb_info->x_hot;
+ gvt_dmabuf->y_hot = fb_info->y_hot;
+}
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct vfio_device_gfx_plane_info *gfx_plane_info = args;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct intel_vgpu_fb_info fb_info;
+ int ret = 0;
+
+ if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
+ VFIO_GFX_PLANE_TYPE_PROBE))
+ return ret;
+ else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
+ (!gfx_plane_info->flags))
+ return -EINVAL;
+
+ ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
+ gfx_plane_info->drm_plane_type);
+ if (ret != 0)
+ goto out;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ /* If exists, pick up the exposed dmabuf_obj */
+ dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
+ if (dmabuf_obj) {
+ update_fb_info(gfx_plane_info, &fb_info);
+ gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
+
+ /* This buffer may be released between query_plane ioctl and
+ * get_dmabuf ioctl. Add the refcount to make sure it won't
+ * be released between the two ioctls.
+ */
+ if (!dmabuf_obj->initref) {
+ dmabuf_obj->initref = true;
+ dmabuf_obj_get(dmabuf_obj);
+ }
+ ret = 0;
+ gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+ vgpu->id, kref_read(&dmabuf_obj->kref),
+ gfx_plane_info->dmabuf_id);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out;
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ /* Need to allocate a new one*/
+ dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
+ if (unlikely(!dmabuf_obj)) {
+ gvt_vgpu_err("alloc dmabuf_obj failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
+ GFP_KERNEL);
+ if (unlikely(!dmabuf_obj->info)) {
+ gvt_vgpu_err("allocate intel vgpu fb info failed\n");
+ ret = -ENOMEM;
+ goto out_free_dmabuf;
+ }
+ memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
+
+ ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
+
+ dmabuf_obj->vgpu = vgpu;
+
+ ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
+ if (ret < 0)
+ goto out_free_info;
+ gfx_plane_info->dmabuf_id = ret;
+ dmabuf_obj->dmabuf_id = ret;
+
+ dmabuf_obj->initref = true;
+
+ kref_init(&dmabuf_obj->kref);
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
+ gvt_vgpu_err("get vfio device failed\n");
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out_free_info;
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ update_fb_info(gfx_plane_info, &fb_info);
+
+ INIT_LIST_HEAD(&dmabuf_obj->list);
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
+ __func__, kref_read(&dmabuf_obj->kref), ret);
+
+ return 0;
+
+out_free_info:
+ kfree(dmabuf_obj->info);
+out_free_dmabuf:
+ kfree(dmabuf_obj);
+out:
+ /* ENODEV means plane isn't ready, which might be a normal case. */
+ return (ret == -ENODEV) ? 0 : ret;
+}
+
+/* To associate an exposed dmabuf with the dmabuf_obj */
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ int dmabuf_fd;
+ int ret = 0;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+
+ dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
+ if (dmabuf_obj == NULL) {
+ gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ obj = vgpu_create_gem(dev, dmabuf_obj->info);
+ if (obj == NULL) {
+ gvt_vgpu_err("create gvt gem obj failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ obj->gvt_info = dmabuf_obj->info;
+
+ dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ if (IS_ERR(dmabuf)) {
+ gvt_vgpu_err("export dma-buf failed\n");
+ ret = PTR_ERR(dmabuf);
+ goto out_free_gem;
+ }
+
+ i915_gem_object_put(obj);
+
+ ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
+ if (ret < 0) {
+ gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
+ goto out_free_dmabuf;
+ }
+ dmabuf_fd = ret;
+
+ dmabuf_obj_get(dmabuf_obj);
+
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
+ " file count: %ld, GEM ref: %d\n",
+ vgpu->id, dmabuf_obj->dmabuf_id,
+ kref_read(&dmabuf_obj->kref),
+ dmabuf_fd,
+ file_count(dmabuf->file),
+ kref_read(&obj->base.refcount));
+
+ return dmabuf_fd;
+
+out_free_dmabuf:
+ dma_buf_put(dmabuf);
+out_free_gem:
+ i915_gem_object_put(obj);
+out:
+ mutex_unlock(&vgpu->dmabuf_lock);
+ return ret;
+}
+
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ dmabuf_obj->vgpu = NULL;
+
+ idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ list_del(pos);
+
+ /* dmabuf_obj might be freed in dmabuf_obj_put */
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
new file mode 100644
index 0000000..5f8f03f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#ifndef _GVT_DMABUF_H_
+#define _GVT_DMABUF_H_
+#include <linux/vfio.h>
+
+struct intel_vgpu_fb_info {
+ __u64 start;
+ __u64 start_gpa;
+ __u64 drm_format_mod;
+ __u32 drm_format; /* drm format of plane */
+ __u32 width; /* width of plane */
+ __u32 height; /* height of plane */
+ __u32 stride; /* stride of plane */
+ __u32 size; /* size of plane in bytes, align on page */
+ __u32 x_pos; /* horizontal position of cursor plane */
+ __u32 y_pos; /* vertical position of cursor plane */
+ __u32 x_hot; /* horizontal position of cursor hotspot */
+ __u32 y_hot; /* vertical position of cursor hotspot */
+ struct intel_vgpu_dmabuf_obj *obj;
+};
+
+/**
+ * struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
+ */
+struct intel_vgpu_dmabuf_obj {
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_fb_info *info;
+ __u32 dmabuf_id;
+ struct kref kref;
+ bool initref;
+ struct list_head list;
+};
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 42cd09ec..f613376 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
- vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
- if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 940cdaa..70494e3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,8 +46,6 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
-
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -135,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+ unsigned long hwsp_gpa;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
@@ -160,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
ctx_status_ptr.write_ptr = write_pointer;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
+ /* Update the CSB and CSB write pointer in HWSP */
+ hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ vgpu->hws_pga[ring_id]);
+ if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
+ write_pointer * 8,
+ status, 8);
+ intel_gvt_hypervisor_write_gpa(vgpu,
+ hwsp_gpa +
+ intel_hws_csb_write_index(dev_priv) * 4,
+ &write_pointer, 4);
+ }
+
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw);
@@ -358,218 +372,47 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
return 0;
}
-static void free_workload(struct intel_vgpu_workload *workload)
-{
- intel_vgpu_unpin_mm(workload->shadow_mm);
- intel_gvt_mm_unreference(workload->shadow_mm);
- kmem_cache_free(workload->vgpu->workloads, workload);
-}
-
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
- const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
- struct intel_shadow_bb_entry *entry_obj;
-
- /* pin the gem object to ggtt */
- list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
- struct i915_vma *vma;
-
- vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
- if (IS_ERR(vma)) {
- return PTR_ERR(vma);
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- /* update the relocate gma with shadow batch buffer*/
- entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
- if (gmadr_bytes == 8)
- entry_obj->bb_start_cmd_va[2] = 0;
- }
- return 0;
-}
-
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
- struct intel_vgpu_workload *workload = container_of(wa_ctx,
- struct intel_vgpu_workload,
- wa_ctx);
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_gem_object *ctx_obj =
- shadow_ctx->engine[ring_id].state->obj;
- struct execlist_ring_context *shadow_ring_context;
- struct page *page;
-
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
-
- shadow_ring_context->bb_per_ctx_ptr.val =
- (shadow_ring_context->bb_per_ctx_ptr.val &
- (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
- shadow_ring_context->rcs_indirect_ctx.val =
- (shadow_ring_context->rcs_indirect_ctx.val &
- (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
- kunmap_atomic(shadow_ring_context);
- return 0;
-}
-
-static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
- struct i915_vma *vma;
- unsigned char *per_ctx_va =
- (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
- wa_ctx->indirect_ctx.size;
-
- if (wa_ctx->indirect_ctx.size == 0)
- return 0;
-
- vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
- 0, CACHELINE_BYTES, 0);
- if (IS_ERR(vma)) {
- return PTR_ERR(vma);
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
-
- wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
- memset(per_ctx_va, 0, CACHELINE_BYTES);
-
- update_wa_ctx_2_shadow_ctx(wa_ctx);
- return 0;
-}
-
-static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
-{
- /* release all the shadow batch buffer */
- if (!list_empty(&workload->shadow_bb)) {
- struct intel_shadow_bb_entry *entry_obj =
- list_first_entry(&workload->shadow_bb,
- struct intel_shadow_bb_entry,
- list);
- struct intel_shadow_bb_entry *temp;
-
- list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
- list) {
- i915_gem_object_unpin_map(entry_obj->obj);
- i915_gem_object_put(entry_obj->obj);
- list_del(&entry_obj->list);
- kfree(entry_obj);
- }
- }
-}
-
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret;
- ret = intel_vgpu_pin_mm(workload->shadow_mm);
- if (ret) {
- gvt_vgpu_err("fail to vgpu pin mm\n");
- goto out;
- }
-
- ret = intel_vgpu_sync_oos_pages(workload->vgpu);
- if (ret) {
- gvt_vgpu_err("fail to vgpu sync oos pages\n");
- goto err_unpin_mm;
- }
-
- ret = intel_vgpu_flush_post_shadow(workload->vgpu);
- if (ret) {
- gvt_vgpu_err("fail to flush post shadow\n");
- goto err_unpin_mm;
- }
-
- ret = intel_gvt_generate_request(workload);
- if (ret) {
- gvt_vgpu_err("fail to generate request\n");
- goto err_unpin_mm;
- }
-
- ret = prepare_shadow_batch_buffer(workload);
- if (ret) {
- gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
- goto err_unpin_mm;
- }
-
- ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (ret) {
- gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
- goto err_shadow_batch;
- }
-
if (!workload->emulate_schedule_in)
return 0;
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
- if (!ret)
- goto out;
- else
+ ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
-
- release_shadow_wa_ctx(&workload->wa_ctx);
-err_shadow_batch:
- release_shadow_batch_buffer(workload);
-err_unpin_mm:
- intel_vgpu_unpin_mm(workload->shadow_mm);
-out:
- return ret;
+ return ret;
+ }
+ return 0;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
- int ret;
+ int ret = 0;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (!workload->status) {
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
- }
-
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
- /* if workload->status is not successful means HW GPU
- * has occurred GPU hang or something wrong with i915/GVT,
- * and GVT won't inject context switch interrupt to guest.
- * So this error is a vGPU hang actually to the guest.
- * According to this we should emunlate a vGPU hang. If
- * there are pending workloads which are already submitted
- * from guest, we should clean them up like HW GPU does.
- *
- * if it is in middle of engine resetting, the pending
- * workloads won't be submitted to HW GPU and will be
- * cleaned up during the resetting process later, so doing
- * the workload clean up here doesn't have any impact.
- **/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
goto out;
- }
if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
@@ -584,213 +427,60 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
if (lite_restore) {
gvt_dbg_el("next context == current - no schedule-out\n");
- free_workload(workload);
- return 0;
+ goto out;
}
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
- if (ret)
- goto err;
out:
- free_workload(workload);
- return 0;
-err:
- free_workload(workload);
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ intel_vgpu_destroy_workload(workload);
return ret;
}
-#define RING_CTX_OFF(x) \
- offsetof(struct execlist_ring_context, x)
-
-static void read_guest_pdps(struct intel_vgpu *vgpu,
- u64 ring_context_gpa, u32 pdp[8])
-{
- u64 gpa;
- int i;
-
- gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
-
- for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_read_gpa(vgpu,
- gpa + i * 8, &pdp[7 - i], 4);
-}
-
-static int prepare_mm(struct intel_vgpu_workload *workload)
-{
- struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
- struct intel_vgpu_mm *mm;
- struct intel_vgpu *vgpu = workload->vgpu;
- int page_table_level;
- u32 pdp[8];
-
- if (desc->addressing_mode == 1) { /* legacy 32-bit */
- page_table_level = 3;
- } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
- page_table_level = 4;
- } else {
- gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
- return -EINVAL;
- }
-
- read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
-
- mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
- if (mm) {
- intel_gvt_mm_reference(mm);
- } else {
-
- mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
- pdp, page_table_level, 0);
- if (IS_ERR(mm)) {
- gvt_vgpu_err("fail to create mm object.\n");
- return PTR_ERR(mm);
- }
- }
- workload->shadow_mm = mm;
- return 0;
-}
-
-#define get_last_workload(q) \
- (list_empty(q) ? NULL : container_of(q->prev, \
- struct intel_vgpu_workload, list))
-
static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
- struct list_head *q = workload_q_head(vgpu, ring_id);
- struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u64 ring_context_gpa;
- u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
- int ret;
-
- ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
- if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
- return -EINVAL;
- }
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ring_header.val), &head, 4);
+ workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ if (IS_ERR(workload))
+ return PTR_ERR(workload);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ring_tail.val), &tail, 4);
-
- head &= RB_HEAD_OFF_MASK;
- tail &= RB_TAIL_OFF_MASK;
-
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
- gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
- /*
- * cannot use guest context head pointer here,
- * as it might not be updated at this time
- */
- head = last_workload->rb_tail;
- }
-
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
-
- workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
- if (!workload)
- return -ENOMEM;
-
- /* record some ring buffer register values for scan and shadow */
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rb_start.val), &start, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
-
- INIT_LIST_HEAD(&workload->list);
- INIT_LIST_HEAD(&workload->shadow_bb);
-
- init_waitqueue_head(&workload->shadow_ctx_status_wq);
- atomic_set(&workload->shadow_ctx_active, 0);
-
- workload->vgpu = vgpu;
- workload->ring_id = ring_id;
- workload->ctx_desc = *desc;
- workload->ring_context_gpa = ring_context_gpa;
- workload->rb_head = head;
- workload->rb_tail = tail;
- workload->rb_start = start;
- workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
- workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
- workload->shadowed = false;
-
- if (ring_id == RCS) {
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
- RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
-
- workload->wa_ctx.indirect_ctx.guest_gma =
- indirect_ctx & INDIRECT_CTX_ADDR_MASK;
- workload->wa_ctx.indirect_ctx.size =
- (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
- CACHELINE_BYTES;
- workload->wa_ctx.per_ctx.guest_gma =
- per_ctx & PER_CTX_ADDR_MASK;
- workload->wa_ctx.per_ctx.valid = per_ctx & 1;
- }
if (emulate_schedule_in)
- workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
-
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
- ret = prepare_mm(workload);
- if (ret) {
- kmem_cache_free(vgpu->workloads, workload);
- return ret;
- }
-
- /* Only scan and shadow the first workload in the queue
- * as there is only one pre-allocated buf-obj for shadow.
- */
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
- }
-
- queue_workload(workload);
+ intel_vgpu_queue_workload(workload);
return 0;
}
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
- struct execlist_ctx_descriptor_format desc[2];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
- desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
- desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+ desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+ desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
- if (!desc[0].valid) {
+ if (!desc[0]->valid) {
gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
goto inv_desc;
}
for (i = 0; i < ARRAY_SIZE(desc); i++) {
- if (!desc[i].valid)
+ if (!desc[i]->valid)
continue;
- if (!desc[i].privilege_access) {
+ if (!desc[i]->privilege_access) {
gvt_vgpu_err("unexpected GGTT elsp submission\n");
goto inv_desc;
}
@@ -798,9 +488,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
/* submit workload */
for (i = 0; i < ARRAY_SIZE(desc); i++) {
- if (!desc[i].valid)
+ if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
+ ret = submit_context(vgpu, ring_id, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -811,13 +501,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
inv_desc:
gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
- desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
+ desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw);
return -EINVAL;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
- struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
@@ -830,98 +521,47 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
-
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
+ unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ struct intel_vgpu_submission *s = &vgpu->submission;
- /* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- list_for_each_entry_safe(pos, n,
- &vgpu->workload_q_head[engine->id], list) {
- list_del_init(&pos->list);
- free_workload(pos);
- }
-
- clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
- }
-}
-
-void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
-{
- enum intel_engine_id i;
- struct intel_engine_cs *engine;
-
- clean_workloads(vgpu, ALL_ENGINES);
- kmem_cache_destroy(vgpu->workloads);
-
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- kfree(vgpu->reserve_ring_buffer_va[i]);
- vgpu->reserve_ring_buffer_va[i] = NULL;
- vgpu->reserve_ring_buffer_size[i] = 0;
- }
-
-}
-
-#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
-int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
-{
- enum intel_engine_id i;
- struct intel_engine_cs *engine;
-
- /* each ring has a virtual execlist engine */
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- init_vgpu_execlist(vgpu, i);
- INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ kfree(s->ring_scan_buffer[engine->id]);
+ s->ring_scan_buffer[engine->id] = NULL;
+ s->ring_scan_buffer_size[engine->id] = 0;
}
-
- vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
- sizeof(struct intel_vgpu_workload), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vgpu->workloads)
- return -ENOMEM;
-
- /* each ring has a shadow ring buffer until vgpu destroyed */
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- vgpu->reserve_ring_buffer_va[i] =
- kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
- if (!vgpu->reserve_ring_buffer_va[i]) {
- gvt_vgpu_err("fail to alloc reserve ring buffer\n");
- goto out;
- }
- vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
- }
- return 0;
-out:
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (vgpu->reserve_ring_buffer_size[i]) {
- kfree(vgpu->reserve_ring_buffer_va[i]);
- vgpu->reserve_ring_buffer_va[i] = NULL;
- vgpu->reserve_ring_buffer_size[i] = 0;
- }
- }
- return -ENOMEM;
}
-void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
+static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
unsigned int tmp;
- clean_workloads(vgpu, engine_mask);
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
+
+static int init_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
+{
+ reset_execlist(vgpu, engine_mask);
+ return 0;
+}
+
+const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
+ .name = "execlist",
+ .init = init_execlist,
+ .reset = reset_execlist,
+ .clean = clean_execlist,
+};
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 7eced40..427e40e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -37,10 +37,6 @@
struct execlist_ctx_descriptor_format {
union {
- u32 udw;
- u32 context_id;
- };
- union {
u32 ldw;
struct {
u32 valid : 1;
@@ -54,6 +50,10 @@ struct execlist_ctx_descriptor_format {
u32 lrca : 20;
};
};
+ union {
+ u32 udw;
+ u32 context_id;
+ };
};
struct execlist_status_format {
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
new file mode 100644
index 0000000..6b50fe7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include <uapi/drm/drm_fourcc.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define PRIMARY_FORMAT_NUM 16
+struct pixel_format {
+ int drm_format; /* Pixel format in DRM definition */
+ int bpp; /* Bits per pixel, 0 indicates invalid */
+ char *desc; /* The description */
+};
+
+static struct pixel_format bdw_pixel_formats[] = {
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static struct pixel_format skl_pixel_formats[] = {
+ {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"},
+ {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"},
+ {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"},
+ {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"},
+
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static int bdw_format_to_drm(int format)
+{
+ int bdw_pixel_formats_index = 6;
+
+ switch (format) {
+ case DISPPLANE_8BPP:
+ bdw_pixel_formats_index = 0;
+ break;
+ case DISPPLANE_BGRX565:
+ bdw_pixel_formats_index = 1;
+ break;
+ case DISPPLANE_BGRX888:
+ bdw_pixel_formats_index = 2;
+ break;
+ case DISPPLANE_RGBX101010:
+ bdw_pixel_formats_index = 3;
+ break;
+ case DISPPLANE_BGRX101010:
+ bdw_pixel_formats_index = 4;
+ break;
+ case DISPPLANE_RGBX888:
+ bdw_pixel_formats_index = 5;
+ break;
+
+ default:
+ break;
+ }
+
+ return bdw_pixel_formats_index;
+}
+
+static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
+ int yuv_order)
+{
+ int skl_pixel_formats_index = 12;
+
+ switch (format) {
+ case PLANE_CTL_FORMAT_INDEXED:
+ skl_pixel_formats_index = 4;
+ break;
+ case PLANE_CTL_FORMAT_RGB_565:
+ skl_pixel_formats_index = 5;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order)
+ skl_pixel_formats_index = alpha ? 6 : 7;
+ else
+ skl_pixel_formats_index = alpha ? 8 : 9;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ skl_pixel_formats_index = rgb_order ? 10 : 11;
+ break;
+ case PLANE_CTL_FORMAT_YUV422:
+ skl_pixel_formats_index = yuv_order >> 16;
+ if (skl_pixel_formats_index > 3)
+ return -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return skl_pixel_formats_index;
+}
+
+static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
+ u32 tiled, int stride_mask, int bpp)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
+ u32 stride = stride_reg;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ switch (tiled) {
+ case PLANE_CTL_TILED_LINEAR:
+ stride = stride_reg * 64;
+ break;
+ case PLANE_CTL_TILED_X:
+ stride = stride_reg * 512;
+ break;
+ case PLANE_CTL_TILED_Y:
+ stride = stride_reg * 128;
+ break;
+ case PLANE_CTL_TILED_YF:
+ if (bpp == 8)
+ stride = stride_reg * 64;
+ else if (bpp == 16 || bpp == 32 || bpp == 64)
+ stride = stride_reg * 128;
+ else
+ gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
+ break;
+ default:
+ gvt_dbg_core("skl: unsupported tile format:%x\n",
+ tiled);
+ }
+ }
+
+ return stride;
+}
+
+static int get_active_pipe(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (pipe_is_enabled(vgpu, i))
+ break;
+
+ return i;
+}
+
+/**
+ * intel_vgpu_decode_primary_plane - Decode primary plane
+ * @vgpu: input vgpu
+ * @plane: primary plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane)
+{
+ u32 val, fmt;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
+ plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
+ _PLANE_CTL_TILED_SHIFT;
+ fmt = skl_format_to_drm(
+ val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK,
+ val & PLANE_CTL_YUV422_ORDER_MASK);
+
+ if (fmt >= ARRAY_SIZE(skl_pixel_formats)) {
+ gvt_vgpu_err("Out-of-bounds pixel format index\n");
+ return -EINVAL;
+ }
+
+ plane->bpp = skl_pixel_formats[fmt].bpp;
+ plane->drm_format = skl_pixel_formats[fmt].drm_format;
+ } else {
+ plane->tiled = !!(val & DISPPLANE_TILED);
+ fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
+ plane->bpp = bdw_pixel_formats[fmt].bpp;
+ plane->drm_format = bdw_pixel_formats[fmt].drm_format;
+ }
+
+ if (!plane->bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+
+ plane->hw_format = fmt;
+
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (_PRI_PLANE_STRIDE_MASK >> 6) :
+ _PRI_PLANE_STRIDE_MASK, plane->bpp);
+
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ _PIPE_H_SRCSZ_SHIFT;
+ plane->width += 1;
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
+ _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
+ plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
+ _PRI_PLANE_X_OFF_SHIFT;
+ plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
+ _PRI_PLANE_Y_OFF_SHIFT;
+
+ return 0;
+}
+
+#define CURSOR_FORMAT_NUM (1 << 6)
+struct cursor_mode_format {
+ int drm_format; /* Pixel format in DRM definition */
+ u8 bpp; /* Bits per pixel; 0 indicates invalid */
+ u32 width; /* In pixel */
+ u32 height; /* In lines */
+ char *desc; /* The description */
+};
+
+static struct cursor_mode_format cursor_pixel_formats[] = {
+ {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, 0, 0, NULL},
+};
+
+static int cursor_mode_to_drm(int mode)
+{
+ int cursor_pixel_formats_index = 4;
+
+ switch (mode) {
+ case CURSOR_MODE_128_ARGB_AX:
+ cursor_pixel_formats_index = 0;
+ break;
+ case CURSOR_MODE_256_ARGB_AX:
+ cursor_pixel_formats_index = 1;
+ break;
+ case CURSOR_MODE_64_ARGB_AX:
+ cursor_pixel_formats_index = 2;
+ break;
+ case CURSOR_MODE_64_32B_AX:
+ cursor_pixel_formats_index = 3;
+ break;
+
+ default:
+ break;
+ }
+
+ return cursor_pixel_formats_index;
+}
+
+/**
+ * intel_vgpu_decode_cursor_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: cursor plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane)
+{
+ u32 val, mode, index;
+ u32 alpha_plane, alpha_force;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
+ mode = val & CURSOR_MODE;
+ plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ index = cursor_mode_to_drm(mode);
+
+ if (!cursor_pixel_formats[index].bpp) {
+ gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
+ return -EINVAL;
+ }
+ plane->mode = mode;
+ plane->bpp = cursor_pixel_formats[index].bpp;
+ plane->drm_format = cursor_pixel_formats[index].drm_format;
+ plane->width = cursor_pixel_formats[index].width;
+ plane->height = cursor_pixel_formats[index].height;
+
+ alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
+ _CURSOR_ALPHA_PLANE_SHIFT;
+ alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
+ _CURSOR_ALPHA_FORCE_SHIFT;
+ if (alpha_plane || alpha_force)
+ gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
+ alpha_plane, alpha_force);
+
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ val = vgpu_vreg_t(vgpu, CURPOS(pipe));
+ plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
+ plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
+ plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
+ plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+
+ return 0;
+}
+
+#define SPRITE_FORMAT_NUM (1 << 3)
+
+static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
+ [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
+ [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
+ [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
+ [0x4] = {DRM_FORMAT_AYUV, 32,
+ "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
+};
+
+/**
+ * intel_vgpu_decode_sprite_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: sprite plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane)
+{
+ u32 val, fmt;
+ u32 color_order, yuv_order;
+ int drm_format;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
+ plane->enabled = !!(val & SPRITE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ plane->tiled = !!(val & SPRITE_TILED);
+ color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
+ yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
+ _SPRITE_YUV_ORDER_SHIFT;
+
+ fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
+ if (!sprite_pixel_formats[fmt].bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+ plane->hw_format = fmt;
+ plane->bpp = sprite_pixel_formats[fmt].bpp;
+ drm_format = sprite_pixel_formats[fmt].drm_format;
+
+ /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
+ * BGR depending on the state of the color_order field
+ */
+ if (!color_order) {
+ if (drm_format == DRM_FORMAT_XRGB2101010)
+ drm_format = DRM_FORMAT_XBGR2101010;
+ else if (drm_format == DRM_FORMAT_XRGB8888)
+ drm_format = DRM_FORMAT_XBGR8888;
+ }
+
+ if (drm_format == DRM_FORMAT_YUV422) {
+ switch (yuv_order) {
+ case 0:
+ drm_format = DRM_FORMAT_YUYV;
+ break;
+ case 1:
+ drm_format = DRM_FORMAT_UYVY;
+ break;
+ case 2:
+ drm_format = DRM_FORMAT_YVYU;
+ break;
+ case 3:
+ drm_format = DRM_FORMAT_VYUY;
+ break;
+ default:
+ /* yuv_order has only 2 bits */
+ break;
+ }
+ }
+
+ plane->drm_format = drm_format;
+
+ plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
+ _SPRITE_STRIDE_MASK;
+
+ val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
+ plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
+ _SPRITE_SIZE_HEIGHT_SHIFT;
+ plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
+ _SPRITE_SIZE_WIDTH_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+ plane->width += 1; /* raw width is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
+ plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
+ plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
+
+ val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
+ plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
+ _SPRITE_OFFSET_START_X_SHIFT;
+ plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
+ _SPRITE_OFFSET_START_Y_SHIFT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
new file mode 100644
index 0000000..cb055f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_FB_DECODER_H_
+#define _GVT_FB_DECODER_H_
+
+#define _PLANE_CTL_FORMAT_SHIFT 24
+#define _PLANE_CTL_TILED_SHIFT 10
+#define _PIPE_V_SRCSZ_SHIFT 0
+#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT)
+#define _PIPE_H_SRCSZ_SHIFT 16
+#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT)
+
+#define _PRI_PLANE_FMT_SHIFT 26
+#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6)
+#define _PRI_PLANE_X_OFF_SHIFT 0
+#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
+#define _PRI_PLANE_Y_OFF_SHIFT 16
+#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
+
+#define _CURSOR_MODE 0x3f
+#define _CURSOR_ALPHA_FORCE_SHIFT 8
+#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
+#define _CURSOR_ALPHA_PLANE_SHIFT 10
+#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
+#define _CURSOR_POS_X_SHIFT 0
+#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT)
+#define _CURSOR_SIGN_X_SHIFT 15
+#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT)
+#define _CURSOR_POS_Y_SHIFT 16
+#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT)
+#define _CURSOR_SIGN_Y_SHIFT 31
+#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT)
+
+#define _SPRITE_FMT_SHIFT 25
+#define _SPRITE_COLOR_ORDER_SHIFT 20
+#define _SPRITE_YUV_ORDER_SHIFT 16
+#define _SPRITE_STRIDE_SHIFT 6
+#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT)
+#define _SPRITE_SIZE_WIDTH_SHIFT 0
+#define _SPRITE_SIZE_HEIGHT_SHIFT 16
+#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
+#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
+#define _SPRITE_POS_X_SHIFT 0
+#define _SPRITE_POS_Y_SHIFT 16
+#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT)
+#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT)
+#define _SPRITE_OFFSET_START_X_SHIFT 0
+#define _SPRITE_OFFSET_START_Y_SHIFT 16
+#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
+#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
+
+enum GVT_FB_EVENT {
+ FB_MODE_SET_START = 1,
+ FB_MODE_SET_END,
+ FB_DISPLAY_FLIP,
+};
+
+enum DDI_PORT {
+ DDI_PORT_NONE = 0,
+ DDI_PORT_B = 1,
+ DDI_PORT_C = 2,
+ DDI_PORT_D = 3,
+ DDI_PORT_E = 4
+};
+
+struct intel_gvt;
+
+/* color space conversion and gamma correction are not included */
+struct intel_vgpu_primary_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the PRI_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* framebuffer base in graphics memory */
+ u64 base_gpa;
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_sprite_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the SPR_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* sprite base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_cursor_plane_format {
+ u8 enabled;
+ u8 mode; /* cursor mode select */
+ u8 bpp; /* bits per pixel */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* cursor base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u8 x_sign; /* X Position Sign */
+ u8 y_sign; /* Y Position Sign */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 x_hot; /* in pixels */
+ u32 y_hot; /* in pixels */
+};
+
+struct intel_vgpu_pipe_format {
+ struct intel_vgpu_primary_plane_format primary;
+ struct intel_vgpu_sprite_plane_format sprite;
+ struct intel_vgpu_cursor_plane_format cursor;
+ enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
+};
+
+struct intel_vgpu_fb_format {
+ struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
+};
+
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane);
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane);
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a26c170..a73e1d4 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -66,20 +66,23 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int expose_firmware_sysfs(struct intel_gvt *gvt)
+static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
+
+ *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
+ return 0;
+}
+
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
+{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- struct intel_gvt_mmio_info *e;
- struct gvt_mmio_block *block = gvt->mmio.mmio_block;
- int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i, j;
- int ret;
+ int i, ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
@@ -104,15 +107,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->mmio_offset;
- hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
- *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
-
- for (i = 0; i < num; i++, block++) {
- for (j = 0; j < block->size; j += 4)
- *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
- I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
- block->offset) + j));
- }
+ /* Take a snapshot of hw mmio registers. */
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
memcpy(gvt->firmware.mmio, p, info->mmio_size);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 64d67ff..d292812 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -38,6 +38,12 @@
#include "i915_pvinfo.h"
#include "trace.h"
+#if defined(VERBOSE_DEBUG)
+#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
+#else
+#define gvt_vdbg_mm(fmt, args...)
+#endif
+
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
@@ -94,12 +100,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
u64 h_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
&h_addr);
if (ret)
return ret;
- *h_index = h_addr >> GTT_PAGE_SHIFT;
+ *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@@ -109,12 +115,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
u64 g_addr;
int ret;
- ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
+ ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
&g_addr);
if (ret)
return ret;
- *g_index = g_addr >> GTT_PAGE_SHIFT;
+ *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@@ -156,13 +162,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
struct gtt_type_table_entry {
int entry_type;
+ int pt_type;
int next_pt_type;
int pse_entry_type;
};
-#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
+#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
[type] = { \
.entry_type = e_type, \
+ .pt_type = cpt_type, \
.next_pt_type = npt_type, \
.pse_entry_type = pse_type, \
}
@@ -170,55 +178,68 @@ struct gtt_type_table_entry {
static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
+ GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
+ GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+ GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
+ GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
+ GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_INVALID,
+ GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
};
@@ -227,6 +248,11 @@ static inline int get_next_pt_type(int type)
return gtt_type_table[type].next_pt_type;
}
+static inline int get_pt_type(int type)
+{
+ return gtt_type_table[type].pt_type;
+}
+
static inline int get_entry_type(int type)
{
return gtt_type_table[type].entry_type;
@@ -244,7 +270,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
return readq(addr);
}
-static void gtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct drm_i915_private *dev_priv)
{
mmio_hw_access_pre(dev_priv);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -311,20 +337,20 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
+#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
+#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
unsigned long pfn;
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
- pfn = (e->val64 & ADDR_1G_MASK) >> 12;
+ pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
- pfn = (e->val64 & ADDR_2M_MASK) >> 12;
+ pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
else
- pfn = (e->val64 & ADDR_4K_MASK) >> 12;
+ pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
return pfn;
}
@@ -332,16 +358,16 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
{
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
e->val64 &= ~ADDR_1G_MASK;
- pfn &= (ADDR_1G_MASK >> 12);
+ pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
e->val64 &= ~ADDR_2M_MASK;
- pfn &= (ADDR_2M_MASK >> 12);
+ pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
} else {
e->val64 &= ~ADDR_4K_MASK;
- pfn &= (ADDR_4K_MASK >> 12);
+ pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
}
- e->val64 |= (pfn << 12);
+ e->val64 |= (pfn << PAGE_SHIFT);
}
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
@@ -351,7 +377,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
return false;
e->type = get_entry_type(e->type);
- if (!(e->val64 & (1 << 7)))
+ if (!(e->val64 & _PAGE_PSE))
return false;
e->type = get_pse_type(e->type);
@@ -369,12 +395,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
- return (e->val64 & (1 << 0));
+ return (e->val64 & _PAGE_PRESENT);
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
- e->val64 &= ~(1 << 0);
+ e->val64 &= ~_PAGE_PRESENT;
+}
+
+static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
+{
+ e->val64 |= _PAGE_PRESENT;
}
/*
@@ -382,7 +413,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
*/
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{
- unsigned long x = (gma >> GTT_PAGE_SHIFT);
+ unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x);
return x;
@@ -406,6 +437,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.get_entry = gtt_get_entry64,
.set_entry = gtt_set_entry64,
.clear_present = gtt_entry_clear_present,
+ .set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
.get_pfn = gen8_gtt_get_pfn,
@@ -421,58 +453,91 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
.gma_to_pml4_index = gen8_gma_to_pml4_index,
};
-static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
- struct intel_gvt_gtt_entry *m)
+/*
+ * MM helpers.
+ */
+static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index,
+ bool guest)
{
- struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- unsigned long gfn, mfn;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
- *m = *p;
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
- if (!ops->test_present(p))
- return 0;
+ entry->type = mm->ppgtt_mm.root_entry_type;
+ pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
+ mm->ppgtt_mm.shadow_pdps,
+ entry, index, false, 0, mm->vgpu);
- gfn = ops->get_pfn(p);
+ pte_ops->test_pse(entry);
+}
- mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
- return -ENXIO;
- }
+static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_get_root_entry(mm, entry, index, true);
+}
- ops->set_pfn(m, mfn);
- return 0;
+static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_get_root_entry(mm, entry, index, false);
}
-/*
- * MM helpers.
- */
-int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index)
+static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index,
+ bool guest)
{
- struct intel_gvt *gvt = mm->vgpu->gvt;
- struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
- int ret;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
- e->type = mm->page_table_entry_type;
+ pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
+ mm->ppgtt_mm.shadow_pdps,
+ entry, index, false, 0, mm->vgpu);
+}
- ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
- if (ret)
- return ret;
+static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_set_root_entry(mm, entry, index, true);
+}
- ops->test_pse(e);
- return 0;
+static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ _ppgtt_set_root_entry(mm, entry, index, false);
}
-int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index)
+static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
{
- struct intel_gvt *gvt = mm->vgpu->gvt;
- struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ entry->type = GTT_TYPE_GGTT_PTE;
+ pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+ false, 0, mm->vgpu);
+}
+
+static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
+ false, 0, mm->vgpu);
+}
- return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
+static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
}
/*
@@ -494,12 +559,15 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
if (ret)
return ret;
ops->test_pse(e);
+
+ gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+ type, e->type, index, e->val64);
return 0;
}
@@ -515,18 +583,21 @@ static inline int ppgtt_spt_set_entry(
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return -EINVAL;
+ gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
+ type, e->type, index, e->val64);
+
return ops->set_entry(page_table, e, index, guest,
- spt->guest_page.gfn << GTT_PAGE_SHIFT,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
}
#define ppgtt_get_guest_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, NULL, \
- spt->guest_page_type, e, index, true)
+ spt->guest_page.type, e, index, true)
#define ppgtt_set_guest_entry(spt, e, index) \
ppgtt_spt_set_entry(spt, NULL, \
- spt->guest_page_type, e, index, true)
+ spt->guest_page.type, e, index, true)
#define ppgtt_get_shadow_entry(spt, e, index) \
ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
@@ -536,140 +607,6 @@ static inline int ppgtt_spt_set_entry(
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
spt->shadow_page.type, e, index, false)
-/**
- * intel_vgpu_init_guest_page - init a guest page data structure
- * @vgpu: a vGPU
- * @p: a guest page data structure
- * @gfn: guest memory page frame number
- * @handler: function will be called when target guest memory page has
- * been modified.
- *
- * This function is called when user wants to track a guest memory page.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p,
- unsigned long gfn,
- int (*handler)(void *, u64, void *, int),
- void *data)
-{
- INIT_HLIST_NODE(&p->node);
-
- p->writeprotection = false;
- p->gfn = gfn;
- p->handler = handler;
- p->data = data;
- p->oos_page = NULL;
- p->write_cnt = 0;
-
- hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
- return 0;
-}
-
-static int detach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page);
-
-/**
- * intel_vgpu_clean_guest_page - release the resource owned by guest page data
- * structure
- * @vgpu: a vGPU
- * @p: a tracked guest page
- *
- * This function is called when user tries to stop tracking a guest memory
- * page.
- */
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
-{
- if (!hlist_unhashed(&p->node))
- hash_del(&p->node);
-
- if (p->oos_page)
- detach_oos_page(vgpu, p->oos_page);
-
- if (p->writeprotection)
- intel_gvt_hypervisor_unset_wp_page(vgpu, p);
-}
-
-/**
- * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
- * @vgpu: a vGPU
- * @gfn: guest memory page frame number
- *
- * This function is called when emulation logic wants to know if a trapped GFN
- * is a tracked guest page.
- *
- * Returns:
- * Pointer to guest page data structure, NULL if failed.
- */
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- struct intel_vgpu_guest_page *p;
-
- hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
- p, node, gfn) {
- if (p->gfn == gfn)
- return p;
- }
- return NULL;
-}
-
-static inline int init_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p, int type)
-{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
-
- daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(kdev, daddr)) {
- gvt_vgpu_err("fail to map dma addr\n");
- return -EINVAL;
- }
-
- p->vaddr = page_address(p->page);
- p->type = type;
-
- INIT_HLIST_NODE(&p->node);
-
- p->mfn = daddr >> GTT_PAGE_SHIFT;
- hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
- return 0;
-}
-
-static inline void clean_shadow_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_shadow_page *p)
-{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-
- dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
- PCI_DMA_BIDIRECTIONAL);
-
- if (!hlist_unhashed(&p->node))
- hash_del(&p->node);
-}
-
-static inline struct intel_vgpu_shadow_page *find_shadow_page(
- struct intel_vgpu *vgpu, unsigned long mfn)
-{
- struct intel_vgpu_shadow_page *p;
-
- hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
- p, node, mfn) {
- if (p->mfn == mfn)
- return p;
- }
- return NULL;
-}
-
-#define guest_page_to_ppgtt_spt(ptr) \
- container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
-
-#define shadow_page_to_ppgtt_spt(ptr) \
- container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
-
static void *alloc_spt(gfp_t gfp_mask)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -692,61 +629,96 @@ static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
kfree(spt);
}
-static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int detach_oos_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_oos_page *oos_page);
+
+static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
+ struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
- clean_shadow_page(spt->vgpu, &spt->shadow_page);
- intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
- list_del_init(&spt->post_shadow_list);
+ trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
+ dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
+ radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
+
+ if (spt->guest_page.oos_page)
+ detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+
+ intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+
+ list_del_init(&spt->post_shadow_list);
free_spt(spt);
}
-static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
+static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct hlist_node *n;
- struct intel_vgpu_shadow_page *sp;
- int i;
+ struct intel_vgpu_ppgtt_spt *spt;
+ struct radix_tree_iter iter;
+ void **slot;
- hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
- ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
+ radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
+ spt = radix_tree_deref_slot(slot);
+ ppgtt_free_spt(spt);
+ }
}
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_ppgtt_spt *spt,
u64 pa, void *p_data, int bytes);
-static int ppgtt_write_protection_handler(void *gp, u64 pa,
- void *p_data, int bytes)
+static int ppgtt_write_protection_handler(
+ struct intel_vgpu_page_track *page_track,
+ u64 gpa, void *data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
+ struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
+
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
- if (!gpt->writeprotection)
- return -EINVAL;
-
- ret = ppgtt_handle_guest_write_page_table_bytes(gp,
- pa, p_data, bytes);
+ ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
if (ret)
return ret;
return ret;
}
-static int reclaim_one_mm(struct intel_gvt *gvt);
+/* Find a spt by guest gfn. */
+static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (track && track->handler == ppgtt_write_protection_handler)
+ return track->priv_data;
+
+ return NULL;
+}
+
+/* Find the spt by shadow page mfn. */
+static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
+ struct intel_vgpu *vgpu, unsigned long mfn)
+{
+ return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
+}
-static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
+
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, int type, unsigned long gfn)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
+ dma_addr_t daddr;
int ret;
retry:
spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
if (!spt) {
- if (reclaim_one_mm(vgpu->gvt))
+ if (reclaim_one_ppgtt_mm(vgpu->gvt))
goto retry;
gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
@@ -754,51 +726,55 @@ retry:
}
spt->vgpu = vgpu;
- spt->guest_page_type = type;
atomic_set(&spt->refcount, 1);
INIT_LIST_HEAD(&spt->post_shadow_list);
/*
- * TODO: guest page type may be different with shadow page type,
- * when we support PSE page in future.
+ * Init shadow_page.
*/
- ret = init_shadow_page(vgpu, &spt->shadow_page, type);
- if (ret) {
- gvt_vgpu_err("fail to initialize shadow page for spt\n");
- goto err;
+ spt->shadow_page.type = type;
+ daddr = dma_map_page(kdev, spt->shadow_page.page,
+ 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev, daddr)) {
+ gvt_vgpu_err("fail to map dma addr\n");
+ ret = -EINVAL;
+ goto err_free_spt;
}
+ spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
+ spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
- gfn, ppgtt_write_protection_handler, NULL);
- if (ret) {
- gvt_vgpu_err("fail to initialize guest page for spt\n");
- goto err;
- }
+ /*
+ * Init guest_page.
+ */
+ spt->guest_page.type = type;
+ spt->guest_page.gfn = gfn;
- trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
- return spt;
-err:
- ppgtt_free_shadow_page(spt);
- return ERR_PTR(ret);
-}
+ ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
+ ppgtt_write_protection_handler, spt);
+ if (ret)
+ goto err_unmap_dma;
-static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
- struct intel_vgpu *vgpu, unsigned long mfn)
-{
- struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
+ ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
+ if (ret)
+ goto err_unreg_page_track;
- if (p)
- return shadow_page_to_ppgtt_spt(p);
+ trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+ return spt;
- gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
- return NULL;
+err_unreg_page_track:
+ intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
+err_unmap_dma:
+ dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+err_free_spt:
+ free_spt(spt);
+ return ERR_PTR(ret);
}
#define pt_entry_size_shift(spt) \
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \
- (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
+ (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
@@ -810,7 +786,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (!ppgtt_get_shadow_entry(spt, e, i) && \
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
-static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
{
int v = atomic_read(&spt->refcount);
@@ -819,17 +795,16 @@ static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
atomic_inc(&spt->refcount);
}
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
-static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
+static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
intel_gvt_gtt_type_t cur_pt_type;
- if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
- return -EINVAL;
+ GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -838,16 +813,33 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
}
- s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
if (!s) {
gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
ops->get_pfn(e));
return -ENXIO;
}
- return ppgtt_invalidate_shadow_page(s);
+ return ppgtt_invalidate_spt(s);
+}
+
+static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+ int type;
+
+ pfn = ops->get_pfn(entry);
+ type = spt->shadow_page.type;
+
+ if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+ return;
+
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
-static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e;
@@ -863,23 +855,33 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
if (atomic_dec_return(&spt->refcount) > 0)
return 0;
- if (gtt_type_is_pte_pt(spt->shadow_page.type))
- goto release;
-
for_each_present_shadow_entry(spt, &e, index) {
- if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
- gvt_vgpu_err("GVT doesn't support pse bit for now\n");
- return -EINVAL;
+ switch (e.type) {
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ gvt_vdbg_mm("invalidate 4K entry\n");
+ ppgtt_invalidate_pte(spt, &e);
+ break;
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ WARN(1, "GVT doesn't support 2M/1GB page\n");
+ continue;
+ case GTT_TYPE_PPGTT_PML4_ENTRY:
+ case GTT_TYPE_PPGTT_PDP_ENTRY:
+ case GTT_TYPE_PPGTT_PDE_ENTRY:
+ gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
+ ret = ppgtt_invalidate_spt_by_shadow_entry(
+ spt->vgpu, &e);
+ if (ret)
+ goto fail;
+ break;
+ default:
+ GEM_BUG_ON(1);
}
- ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
- spt->vgpu, &e);
- if (ret)
- goto fail;
}
-release:
+
trace_spt_change(spt->vgpu->id, "release", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
- ppgtt_free_shadow_page(spt);
+ spt->guest_page.gfn, spt->shadow_page.type);
+ ppgtt_free_spt(spt);
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
@@ -887,49 +889,44 @@ fail:
return ret;
}
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
-static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
+static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_vgpu_ppgtt_spt *s = NULL;
- struct intel_vgpu_guest_page *g;
+ struct intel_vgpu_ppgtt_spt *spt = NULL;
int ret;
- if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
- ret = -EINVAL;
- goto fail;
- }
+ GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
- g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
- if (g) {
- s = guest_page_to_ppgtt_spt(g);
- ppgtt_get_shadow_page(s);
- } else {
+ spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
+ if (spt)
+ ppgtt_get_spt(spt);
+ else {
int type = get_next_pt_type(we->type);
- s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
- if (IS_ERR(s)) {
- ret = PTR_ERR(s);
+ spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+ if (IS_ERR(spt)) {
+ ret = PTR_ERR(spt);
goto fail;
}
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
+ ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret)
goto fail;
- ret = ppgtt_populate_shadow_page(s);
+ ret = ppgtt_populate_spt(spt);
if (ret)
goto fail;
- trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
- s->shadow_page.type);
+ trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
+ spt->shadow_page.type);
}
- return s;
+ return spt;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
- s, we->val64, we->type);
+ spt, we->val64, we->type);
return ERR_PTR(ret);
}
@@ -944,42 +941,78 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
+static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
+ struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+ struct intel_gvt_gtt_entry *ge)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry se = *ge;
+ unsigned long gfn;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!pte_ops->test_present(ge))
+ return 0;
+
+ gfn = pte_ops->get_pfn(ge);
+
+ switch (ge->type) {
+ case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+ gvt_vdbg_mm("shadow 4K gtt entry\n");
+ break;
+ case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+ case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+ gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+ return -EINVAL;
+ default:
+ GEM_BUG_ON(1);
+ };
+
+ /* direct shadow */
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+ if (ret)
+ return -ENXIO;
+
+ pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
+ ppgtt_set_shadow_entry(spt, &se, index);
+ return 0;
+}
+
+static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
- unsigned long i;
+ unsigned long gfn, i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
- spt->guest_page.gfn, spt->shadow_page.type);
+ spt->guest_page.gfn, spt->shadow_page.type);
- if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
- for_each_present_guest_entry(spt, &ge, i) {
- ret = gtt_entry_p2m(vgpu, &ge, &se);
- if (ret)
+ for_each_present_guest_entry(spt, &ge, i) {
+ if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
+ s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
goto fail;
+ }
+ ppgtt_get_shadow_entry(spt, &se, i);
+ ppgtt_generate_shadow_entry(&se, s, &ge);
ppgtt_set_shadow_entry(spt, &se, i);
+ } else {
+ gfn = ops->get_pfn(&ge);
+ if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ ops->set_pfn(&se, gvt->gtt.scratch_mfn);
+ ppgtt_set_shadow_entry(spt, &se, i);
+ continue;
+ }
+
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
+ if (ret)
+ goto fail;
}
- return 0;
- }
-
- for_each_present_guest_entry(spt, &ge, i) {
- if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
- gvt_vgpu_err("GVT doesn't support pse bit now\n");
- ret = -EINVAL;
- goto fail;
- }
-
- s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
- if (IS_ERR(s)) {
- ret = PTR_ERR(s);
- goto fail;
- }
- ppgtt_get_shadow_entry(spt, &se, i);
- ppgtt_generate_shadow_entry(&se, s, &ge);
- ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
fail:
@@ -988,36 +1021,40 @@ fail:
return ret;
}
-static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *se, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
- struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int ret;
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
- index);
+ trace_spt_guest_change(spt->vgpu->id, "remove", spt,
+ spt->shadow_page.type, se->val64, index);
+
+ gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
+ se->type, index, se->val64);
if (!ops->test_present(se))
return 0;
- if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ if (ops->get_pfn(se) ==
+ vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
return 0;
if (gtt_type_is_pt(get_next_pt_type(se->type))) {
struct intel_vgpu_ppgtt_spt *s =
- ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
+ intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
if (!s) {
gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
- ret = ppgtt_invalidate_shadow_page(s);
+ ret = ppgtt_invalidate_spt(s);
if (ret)
goto fail;
- }
+ } else
+ ppgtt_invalidate_pte(spt, se);
+
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
@@ -1025,21 +1062,22 @@ fail:
return ret;
}
-static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
+static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
- struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry m;
struct intel_vgpu_ppgtt_spt *s;
int ret;
- trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
- we->val64, index);
+ trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
+ we->val64, index);
+
+ gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
+ we->type, index, we->val64);
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
- s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
+ s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto fail;
@@ -1048,10 +1086,9 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
ppgtt_generate_shadow_entry(&m, s, we);
ppgtt_set_shadow_entry(spt, &m, index);
} else {
- ret = gtt_entry_p2m(vgpu, we, &m);
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
if (ret)
goto fail;
- ppgtt_set_shadow_entry(spt, &m, index);
}
return 0;
fail:
@@ -1066,41 +1103,39 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
- struct intel_vgpu_ppgtt_spt *spt =
- guest_page_to_ppgtt_spt(oos_page->guest_page);
- struct intel_gvt_gtt_entry old, new, m;
+ struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
+ struct intel_gvt_gtt_entry old, new;
int index;
int ret;
trace_oos_change(vgpu->id, "sync", oos_page->id,
- oos_page->guest_page, spt->guest_page_type);
+ spt, spt->guest_page.type);
- old.type = new.type = get_entry_type(spt->guest_page_type);
+ old.type = new.type = get_entry_type(spt->guest_page.type);
old.val64 = new.val64 = 0;
- for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
- index++) {
+ for (index = 0; index < (I915_GTT_PAGE_SIZE >>
+ info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
- oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
+ spt->guest_page.gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
continue;
trace_oos_sync(vgpu->id, oos_page->id,
- oos_page->guest_page, spt->guest_page_type,
+ spt, spt->guest_page.type,
new.val64, index);
- ret = gtt_entry_p2m(vgpu, &new, &m);
+ ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
if (ret)
return ret;
ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
- ppgtt_set_shadow_entry(spt, &m, index);
}
- oos_page->guest_page->write_cnt = 0;
+ spt->guest_page.write_cnt = 0;
list_del_init(&spt->post_shadow_list);
return 0;
}
@@ -1109,15 +1144,14 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_vgpu_ppgtt_spt *spt =
- guest_page_to_ppgtt_spt(oos_page->guest_page);
+ struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
trace_oos_change(vgpu->id, "detach", oos_page->id,
- oos_page->guest_page, spt->guest_page_type);
+ spt, spt->guest_page.type);
- oos_page->guest_page->write_cnt = 0;
- oos_page->guest_page->oos_page = NULL;
- oos_page->guest_page = NULL;
+ spt->guest_page.write_cnt = 0;
+ spt->guest_page.oos_page = NULL;
+ oos_page->spt = NULL;
list_del_init(&oos_page->vm_list);
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
@@ -1125,50 +1159,49 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
return 0;
}
-static int attach_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_oos_page *oos_page,
- struct intel_vgpu_guest_page *gpt)
+static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
+ struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
- oos_page->mem, GTT_PAGE_SIZE);
+ ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
+ spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
+ oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
return ret;
- oos_page->guest_page = gpt;
- gpt->oos_page = oos_page;
+ oos_page->spt = spt;
+ spt->guest_page.oos_page = oos_page;
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
- trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
+ spt, spt->guest_page.type);
return 0;
}
-static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
{
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret;
- ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
+ ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
if (ret)
return ret;
- trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
+ spt, spt->guest_page.type);
- list_del_init(&gpt->oos_page->vm_list);
- return sync_oos_page(vgpu, gpt->oos_page);
+ list_del_init(&oos_page->vm_list);
+ return sync_oos_page(spt->vgpu, oos_page);
}
-static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
- struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret;
WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
@@ -1176,31 +1209,30 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
if (list_empty(&gtt->oos_page_free_list_head)) {
oos_page = container_of(gtt->oos_page_use_list_head.next,
struct intel_vgpu_oos_page, list);
- ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ ret = ppgtt_set_guest_page_sync(oos_page->spt);
if (ret)
return ret;
- ret = detach_oos_page(vgpu, oos_page);
+ ret = detach_oos_page(spt->vgpu, oos_page);
if (ret)
return ret;
} else
oos_page = container_of(gtt->oos_page_free_list_head.next,
struct intel_vgpu_oos_page, list);
- return attach_oos_page(vgpu, oos_page, gpt);
+ return attach_oos_page(oos_page, spt);
}
-static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *gpt)
+static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
{
- struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
+ struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
return -EINVAL;
- trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
- gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
+ trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
+ spt, spt->guest_page.type);
- list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
- return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
+ list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
+ return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
}
/**
@@ -1225,7 +1257,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
oos_page = container_of(pos,
struct intel_vgpu_oos_page, vm_list);
- ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
+ ret = ppgtt_set_guest_page_sync(oos_page->spt);
if (ret)
return ret;
}
@@ -1236,17 +1268,15 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
* The heart of PPGTT shadow page table.
*/
static int ppgtt_handle_guest_write_page_table(
- struct intel_vgpu_guest_page *gpt,
+ struct intel_vgpu_ppgtt_spt *spt,
struct intel_gvt_gtt_entry *we, unsigned long index)
{
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
int type = spt->shadow_page.type;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry se;
-
- int ret;
+ struct intel_gvt_gtt_entry old_se;
int new_present;
+ int ret;
new_present = ops->test_present(we);
@@ -1255,21 +1285,21 @@ static int ppgtt_handle_guest_write_page_table(
* guarantee the ppgtt table is validated during the window between
* adding and removal.
*/
- ppgtt_get_shadow_entry(spt, &se, index);
+ ppgtt_get_shadow_entry(spt, &old_se, index);
if (new_present) {
- ret = ppgtt_handle_guest_entry_add(gpt, we, index);
+ ret = ppgtt_handle_guest_entry_add(spt, we, index);
if (ret)
goto fail;
}
- ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
if (ret)
goto fail;
if (!new_present) {
- ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
- ppgtt_set_shadow_entry(spt, &se, index);
+ ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &old_se, index);
}
return 0;
@@ -1279,12 +1309,13 @@ fail:
return ret;
}
-static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
+
+
+static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
{
return enable_out_of_sync
- && gtt_type_is_pte_pt(
- guest_page_to_ppgtt_spt(gpt)->guest_page_type)
- && gpt->write_cnt >= 2;
+ && gtt_type_is_pte_pt(spt->guest_page.type)
+ && spt->guest_page.write_cnt >= 2;
}
static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
@@ -1324,8 +1355,8 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index);
- ret = ppgtt_handle_guest_write_page_table(
- &spt->guest_page, &ge, index);
+ ret = ppgtt_handle_guest_write_page_table(spt,
+ &ge, index);
if (ret)
return ret;
clear_bit(index, spt->post_shadow_bitmap);
@@ -1335,11 +1366,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
return 0;
}
-static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
+static int ppgtt_handle_guest_write_page_table_bytes(
+ struct intel_vgpu_ppgtt_spt *spt,
u64 pa, void *p_data, int bytes)
{
- struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
- struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1354,7 +1384,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
ops->test_pse(&we);
if (bytes == info->gtt_entry_size) {
- ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
+ ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
if (ret)
return ret;
} else {
@@ -1362,7 +1392,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
int type = spt->shadow_page.type;
ppgtt_get_shadow_entry(spt, &se, index);
- ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
if (ret)
return ret;
ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
@@ -1374,128 +1404,54 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
if (!enable_out_of_sync)
return 0;
- gpt->write_cnt++;
+ spt->guest_page.write_cnt++;
- if (gpt->oos_page)
- ops->set_entry(gpt->oos_page->mem, &we, index,
+ if (spt->guest_page.oos_page)
+ ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
false, 0, vgpu);
- if (can_do_out_of_sync(gpt)) {
- if (!gpt->oos_page)
- ppgtt_allocate_oos_page(vgpu, gpt);
+ if (can_do_out_of_sync(spt)) {
+ if (!spt->guest_page.oos_page)
+ ppgtt_allocate_oos_page(spt);
- ret = ppgtt_set_guest_page_oos(vgpu, gpt);
+ ret = ppgtt_set_guest_page_oos(spt);
if (ret < 0)
return ret;
}
return 0;
}
-/*
- * mm page table allocation policy for bdw+
- * - for ggtt, only virtual page table will be allocated.
- * - for ppgtt, dedicated virtual/shadow page table will be allocated.
- */
-static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
-{
- struct intel_vgpu *vgpu = mm->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
- void *mem;
-
- if (mm->type == INTEL_GVT_MM_PPGTT) {
- mm->page_table_entry_cnt = 4;
- mm->page_table_entry_size = mm->page_table_entry_cnt *
- info->gtt_entry_size;
- mem = kzalloc(mm->has_shadow_page_table ?
- mm->page_table_entry_size * 2
- : mm->page_table_entry_size, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- mm->virtual_page_table = mem;
- if (!mm->has_shadow_page_table)
- return 0;
- mm->shadow_page_table = mem + mm->page_table_entry_size;
- } else if (mm->type == INTEL_GVT_MM_GGTT) {
- mm->page_table_entry_cnt =
- (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
- mm->page_table_entry_size = mm->page_table_entry_cnt *
- info->gtt_entry_size;
- mem = vzalloc(mm->page_table_entry_size);
- if (!mem)
- return -ENOMEM;
- mm->virtual_page_table = mem;
- }
- return 0;
-}
-
-static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
-{
- if (mm->type == INTEL_GVT_MM_PPGTT) {
- kfree(mm->virtual_page_table);
- } else if (mm->type == INTEL_GVT_MM_GGTT) {
- if (mm->virtual_page_table)
- vfree(mm->virtual_page_table);
- }
- mm->virtual_page_table = mm->shadow_page_table = NULL;
-}
-
-static void invalidate_mm(struct intel_vgpu_mm *mm)
+static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_gvt_gtt_entry se;
- int i;
+ int index;
- if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
+ if (!mm->ppgtt_mm.shadowed)
return;
- for (i = 0; i < mm->page_table_entry_cnt; i++) {
- ppgtt_get_shadow_root_entry(mm, &se, i);
+ for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
+ ppgtt_get_shadow_root_entry(mm, &se, index);
+
if (!ops->test_present(&se))
continue;
- ppgtt_invalidate_shadow_page_by_shadow_entry(
- vgpu, &se);
+
+ ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
se.val64 = 0;
- ppgtt_set_shadow_root_entry(mm, &se, i);
+ ppgtt_set_shadow_root_entry(mm, &se, index);
- trace_gpt_change(vgpu->id, "destroy root pointer",
- NULL, se.type, se.val64, i);
+ trace_spt_guest_change(vgpu->id, "destroy root pointer",
+ NULL, se.type, se.val64, index);
}
- mm->shadowed = false;
-}
-
-/**
- * intel_vgpu_destroy_mm - destroy a mm object
- * @mm: a kref object
- *
- * This function is used to destroy a mm object for vGPU
- *
- */
-void intel_vgpu_destroy_mm(struct kref *mm_ref)
-{
- struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
- struct intel_vgpu *vgpu = mm->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_gtt *gtt = &gvt->gtt;
-
- if (!mm->initialized)
- goto out;
- list_del(&mm->list);
- list_del(&mm->lru_list);
-
- if (mm->has_shadow_page_table)
- invalidate_mm(mm);
-
- gtt->mm_free_page_table(mm);
-out:
- kfree(mm);
+ mm->ppgtt_mm.shadowed = false;
}
-static int shadow_mm(struct intel_vgpu_mm *mm)
+
+static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
{
struct intel_vgpu *vgpu = mm->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
@@ -1503,119 +1459,155 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
struct intel_vgpu_ppgtt_spt *spt;
struct intel_gvt_gtt_entry ge, se;
- int i;
- int ret;
+ int index, ret;
- if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
+ if (mm->ppgtt_mm.shadowed)
return 0;
- mm->shadowed = true;
+ mm->ppgtt_mm.shadowed = true;
+
+ for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
+ ppgtt_get_guest_root_entry(mm, &ge, index);
- for (i = 0; i < mm->page_table_entry_cnt; i++) {
- ppgtt_get_guest_root_entry(mm, &ge, i);
if (!ops->test_present(&ge))
continue;
- trace_gpt_change(vgpu->id, __func__, NULL,
- ge.type, ge.val64, i);
+ trace_spt_guest_change(vgpu->id, __func__, NULL,
+ ge.type, ge.val64, index);
- spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
+ spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) {
gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt);
goto fail;
}
ppgtt_generate_shadow_entry(&se, spt, &ge);
- ppgtt_set_shadow_root_entry(mm, &se, i);
+ ppgtt_set_shadow_root_entry(mm, &se, index);
- trace_gpt_change(vgpu->id, "populate root pointer",
- NULL, se.type, se.val64, i);
+ trace_spt_guest_change(vgpu->id, "populate root pointer",
+ NULL, se.type, se.val64, index);
}
+
return 0;
fail:
- invalidate_mm(mm);
+ invalidate_ppgtt_mm(mm);
return ret;
}
+static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return NULL;
+
+ mm->vgpu = vgpu;
+ kref_init(&mm->ref);
+ atomic_set(&mm->pincount, 0);
+
+ return mm;
+}
+
+static void vgpu_free_mm(struct intel_vgpu_mm *mm)
+{
+ kfree(mm);
+}
+
/**
- * intel_vgpu_create_mm - create a mm object for a vGPU
+ * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
* @vgpu: a vGPU
- * @mm_type: mm object type, should be PPGTT or GGTT
- * @virtual_page_table: page table root pointers. Could be NULL if user wants
- * to populate shadow later.
- * @page_table_level: describe the page table level of the mm object
- * @pde_base_index: pde root pointer base in GGTT MMIO.
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps.
*
- * This function is used to create a mm object for a vGPU.
+ * This function is used to create a ppgtt mm object for a vGPU.
*
* Returns:
* Zero on success, negative error code in pointer if failed.
*/
-struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
- int mm_type, void *virtual_page_table, int page_table_level,
- u32 pde_base_index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
- struct intel_gvt_gtt *gtt = &gvt->gtt;
struct intel_vgpu_mm *mm;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- ret = -ENOMEM;
- goto fail;
- }
+ mm = vgpu_alloc_mm(vgpu);
+ if (!mm)
+ return ERR_PTR(-ENOMEM);
- mm->type = mm_type;
+ mm->type = INTEL_GVT_MM_PPGTT;
- if (page_table_level == 1)
- mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
- else if (page_table_level == 3)
- mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
- else if (page_table_level == 4)
- mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
- else {
- WARN_ON(1);
- ret = -EINVAL;
- goto fail;
- }
+ GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
+ root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
+ mm->ppgtt_mm.root_entry_type = root_entry_type;
- mm->page_table_level = page_table_level;
- mm->pde_base_index = pde_base_index;
+ INIT_LIST_HEAD(&mm->ppgtt_mm.list);
+ INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
- mm->vgpu = vgpu;
- mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
-
- kref_init(&mm->ref);
- atomic_set(&mm->pincount, 0);
- INIT_LIST_HEAD(&mm->list);
- INIT_LIST_HEAD(&mm->lru_list);
- list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
+ if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
+ mm->ppgtt_mm.guest_pdps[0] = pdps[0];
+ else
+ memcpy(mm->ppgtt_mm.guest_pdps, pdps,
+ sizeof(mm->ppgtt_mm.guest_pdps));
- ret = gtt->mm_alloc_page_table(mm);
+ ret = shadow_ppgtt_mm(mm);
if (ret) {
- gvt_vgpu_err("fail to allocate page table for mm\n");
- goto fail;
+ gvt_vgpu_err("failed to shadow ppgtt mm\n");
+ vgpu_free_mm(mm);
+ return ERR_PTR(ret);
}
- mm->initialized = true;
+ list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+ list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+ return mm;
+}
+
+static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_mm *mm;
+ unsigned long nr_entries;
- if (virtual_page_table)
- memcpy(mm->virtual_page_table, virtual_page_table,
- mm->page_table_entry_size);
+ mm = vgpu_alloc_mm(vgpu);
+ if (!mm)
+ return ERR_PTR(-ENOMEM);
- if (mm->has_shadow_page_table) {
- ret = shadow_mm(mm);
- if (ret)
- goto fail;
- list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+ mm->type = INTEL_GVT_MM_GGTT;
+
+ nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
+ mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
+ vgpu->gvt->device_info.gtt_entry_size);
+ if (!mm->ggtt_mm.virtual_ggtt) {
+ vgpu_free_mm(mm);
+ return ERR_PTR(-ENOMEM);
}
+
return mm;
-fail:
- gvt_vgpu_err("fail to create mm\n");
- if (mm)
- intel_gvt_mm_unreference(mm);
- return ERR_PTR(ret);
+}
+
+/**
+ * _intel_vgpu_mm_release - destroy a mm object
+ * @mm_ref: a kref object
+ *
+ * This function is used to destroy a mm object for vGPU
+ *
+ */
+void _intel_vgpu_mm_release(struct kref *mm_ref)
+{
+ struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
+
+ if (GEM_WARN_ON(atomic_read(&mm->pincount)))
+ gvt_err("vgpu mm pin count bug detected\n");
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del(&mm->ppgtt_mm.list);
+ list_del(&mm->ppgtt_mm.lru_list);
+ invalidate_ppgtt_mm(mm);
+ } else {
+ vfree(mm->ggtt_mm.virtual_ggtt);
+ }
+
+ vgpu_free_mm(mm);
}
/**
@@ -1626,9 +1618,6 @@ fail:
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
- return;
-
atomic_dec(&mm->pincount);
}
@@ -1647,36 +1636,34 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
{
int ret;
- if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
- return 0;
+ atomic_inc(&mm->pincount);
- if (!mm->shadowed) {
- ret = shadow_mm(mm);
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ ret = shadow_ppgtt_mm(mm);
if (ret)
return ret;
+
+ list_move_tail(&mm->ppgtt_mm.lru_list,
+ &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
+
}
- atomic_inc(&mm->pincount);
- list_del_init(&mm->lru_list);
- list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
}
-static int reclaim_one_mm(struct intel_gvt *gvt)
+static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
{
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
- list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, lru_list);
+ list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
- if (mm->type != INTEL_GVT_MM_PPGTT)
- continue;
if (atomic_read(&mm->pincount))
continue;
- list_del_init(&mm->lru_list);
- invalidate_mm(mm);
+ list_del_init(&mm->ppgtt_mm.lru_list);
+ invalidate_ppgtt_mm(mm);
return 1;
}
return 0;
@@ -1692,10 +1679,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- if (WARN_ON(!mm->has_shadow_page_table))
- return -EINVAL;
-
- s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
+ s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
if (!s)
return -ENXIO;
@@ -1726,85 +1710,72 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
unsigned long gpa = INTEL_GVT_INVALID_ADDR;
unsigned long gma_index[4];
struct intel_gvt_gtt_entry e;
- int i, index;
+ int i, levels = 0;
int ret;
- if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
- return INTEL_GVT_INVALID_ADDR;
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
+ mm->type != INTEL_GVT_MM_PPGTT);
if (mm->type == INTEL_GVT_MM_GGTT) {
if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err;
- ret = ggtt_get_guest_entry(mm, &e,
- gma_ops->gma_to_ggtt_pte_index(gma));
- if (ret)
- goto err;
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
- trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
- return gpa;
- }
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ + (gma & ~I915_GTT_PAGE_MASK);
- switch (mm->page_table_level) {
- case 4:
- ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pml4_index(gma);
- gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
- gma_index[2] = gma_ops->gma_to_pde_index(gma);
- gma_index[3] = gma_ops->gma_to_pte_index(gma);
- index = 4;
- break;
- case 3:
- ret = ppgtt_get_shadow_root_entry(mm, &e,
- gma_ops->gma_to_l3_pdp_index(gma));
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pde_index(gma);
- gma_index[1] = gma_ops->gma_to_pte_index(gma);
- index = 2;
- break;
- case 2:
- ret = ppgtt_get_shadow_root_entry(mm, &e,
- gma_ops->gma_to_pde_index(gma));
- if (ret)
- goto err;
- gma_index[0] = gma_ops->gma_to_pte_index(gma);
- index = 1;
- break;
- default:
- WARN_ON(1);
- goto err;
- }
+ trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
+ } else {
+ switch (mm->ppgtt_mm.root_entry_type) {
+ case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+ ppgtt_get_shadow_root_entry(mm, &e, 0);
+
+ gma_index[0] = gma_ops->gma_to_pml4_index(gma);
+ gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
+ gma_index[2] = gma_ops->gma_to_pde_index(gma);
+ gma_index[3] = gma_ops->gma_to_pte_index(gma);
+ levels = 4;
+ break;
+ case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+ ppgtt_get_shadow_root_entry(mm, &e,
+ gma_ops->gma_to_l3_pdp_index(gma));
+
+ gma_index[0] = gma_ops->gma_to_pde_index(gma);
+ gma_index[1] = gma_ops->gma_to_pte_index(gma);
+ levels = 2;
+ break;
+ default:
+ GEM_BUG_ON(1);
+ }
- /* walk into the shadow page table and get gpa from guest entry */
- for (i = 0; i < index; i++) {
- ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
- (i == index - 1));
- if (ret)
- goto err;
+ /* walk the shadow page table and get gpa from guest entry */
+ for (i = 0; i < levels; i++) {
+ ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
+ (i == levels - 1));
+ if (ret)
+ goto err;
- if (!pte_ops->test_present(&e)) {
- gvt_dbg_core("GMA 0x%lx is not present\n", gma);
- goto err;
+ if (!pte_ops->test_present(&e)) {
+ gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+ goto err;
+ }
}
- }
- gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
- + (gma & ~GTT_PAGE_MASK);
+ gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
+ (gma & ~I915_GTT_PAGE_MASK);
+ trace_gma_translate(vgpu->id, "ppgtt", 0,
+ mm->ppgtt_mm.root_entry_type, gma, gpa);
+ }
- trace_gma_translate(vgpu->id, "ppgtt", 0,
- mm->page_table_level, gma, gpa);
return gpa;
err:
gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR;
}
-static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes)
{
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
@@ -1833,7 +1804,7 @@ static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
* Returns:
* Zero on success, error code if failed.
*/
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
@@ -1843,11 +1814,11 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
return -EINVAL;
off -= info->gtt_start_offset;
- ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
+ ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
return ret;
}
-static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
+static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -1855,14 +1826,15 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
- unsigned long gma;
+ unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m;
+ dma_addr_t dma_addr;
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
- gma = g_gtt_index << GTT_PAGE_SHIFT;
+ gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
if (!vgpu_gmadr_is_valid(vgpu, gma))
@@ -1872,30 +1844,42 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ m = e;
if (ops->test_present(&e)) {
- ret = gtt_entry_p2m(vgpu, &e, &m);
+ gfn = ops->get_pfn(&e);
+
+ /* one PTE update may be issued in multiple writes and the
+ * first write may not construct a valid gfn
+ */
+ if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ goto out;
+ }
+
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
+ &dma_addr);
if (ret) {
- gvt_vgpu_err("fail to translate guest gtt entry\n");
+ gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
- }
- } else {
- m = e;
- ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
- }
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ } else
+ ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
+ } else
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
- gtt_invalidate(gvt->dev_priv);
+out:
+ ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
/*
- * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
+ * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
* @vgpu: a vGPU
* @off: register offset
* @p_data: data from guest write
@@ -1906,8 +1890,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
* Returns:
* Zero on success, error code if failed.
*/
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
- void *p_data, unsigned int bytes)
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int off, void *p_data, unsigned int bytes)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
int ret;
@@ -1916,7 +1900,7 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return -EINVAL;
off -= info->gtt_start_offset;
- ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
+ ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
return ret;
}
@@ -1925,7 +1909,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- int page_entry_num = GTT_PAGE_SIZE >>
+ int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
@@ -1949,7 +1933,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
gtt->scratch_pt[type].page_mfn =
- (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+ (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
@@ -1992,7 +1976,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
- GTT_PAGE_SHIFT);
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
@@ -2033,45 +2017,49 @@ err:
int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
- struct intel_vgpu_mm *ggtt_mm;
- hash_init(gtt->guest_page_hash_table);
- hash_init(gtt->shadow_page_hash_table);
+ INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
- INIT_LIST_HEAD(&gtt->mm_list_head);
+ INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
- intel_vgpu_reset_ggtt(vgpu);
-
- ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
- NULL, 1, 0);
- if (IS_ERR(ggtt_mm)) {
+ gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
+ if (IS_ERR(gtt->ggtt_mm)) {
gvt_vgpu_err("fail to create mm for ggtt.\n");
- return PTR_ERR(ggtt_mm);
+ return PTR_ERR(gtt->ggtt_mm);
}
- gtt->ggtt_mm = ggtt_mm;
+ intel_vgpu_reset_ggtt(vgpu);
return create_scratch_page_tree(vgpu);
}
-static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
- list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- if (mm->type == type) {
- vgpu->gvt->gtt.mm_free_page_table(mm);
- list_del(&mm->list);
- list_del(&mm->lru_list);
- kfree(mm);
- }
+ list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+ intel_vgpu_destroy_mm(mm);
+ }
+
+ if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
+ gvt_err("vgpu ppgtt mm is not fully destroyed\n");
+
+ if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
+ gvt_err("Why we still has spt not freed?\n");
+ ppgtt_free_all_spt(vgpu);
}
}
+static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
+{
+ intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
+ vgpu->gtt.ggtt_mm = NULL;
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2084,11 +2072,9 @@ static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
- ppgtt_free_all_shadow_page(vgpu);
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+ intel_vgpu_destroy_ggtt_mm(vgpu);
release_scratch_page_tree(vgpu);
-
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
}
static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2150,99 +2136,78 @@ fail:
* pointer to mm object on success, NULL if failed.
*/
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry)
+ u64 pdps[])
{
- struct list_head *pos;
struct intel_vgpu_mm *mm;
- u64 *src, *dst;
-
- list_for_each(pos, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- if (mm->type != INTEL_GVT_MM_PPGTT)
- continue;
-
- if (mm->page_table_level != page_table_level)
- continue;
+ struct list_head *pos;
- src = root_entry;
- dst = mm->virtual_page_table;
+ list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
- if (page_table_level == 3) {
- if (src[0] == dst[0]
- && src[1] == dst[1]
- && src[2] == dst[2]
- && src[3] == dst[3])
+ switch (mm->ppgtt_mm.root_entry_type) {
+ case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
+ if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
return mm;
- } else {
- if (src[0] == dst[0])
+ break;
+ case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
+ if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
+ sizeof(mm->ppgtt_mm.guest_pdps)))
return mm;
+ break;
+ default:
+ GEM_BUG_ON(1);
}
}
return NULL;
}
/**
- * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @root_entry_type: ppgtt root entry type
+ * @pdps: guest pdps
*
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find or create a PPGTT mm object from a guest.
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level)
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
- if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
- return -EINVAL;
-
- mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (mm) {
- intel_gvt_mm_reference(mm);
+ intel_vgpu_mm_get(mm);
} else {
- mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
- pdp, page_table_level, 0);
- if (IS_ERR(mm)) {
+ mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
+ if (IS_ERR(mm))
gvt_vgpu_err("fail to create mm\n");
- return PTR_ERR(mm);
- }
}
- return 0;
+ return mm;
}
/**
- * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
- * g2v notification
+ * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
+ * @pdps: guest pdps
*
- * This function is used to create a PPGTT mm object from a guest to GVT-g
- * notification.
+ * This function is used to find a PPGTT mm object from a guest and destroy it.
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level)
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
- if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
- return -EINVAL;
-
- mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
if (!mm) {
gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
- intel_gvt_mm_unreference(mm);
+ intel_vgpu_mm_put(mm);
return 0;
}
@@ -2269,8 +2234,6 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|| IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
- gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
- gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
} else {
return -ENODEV;
}
@@ -2288,19 +2251,20 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
__free_page(virt_to_page(page));
return -ENOMEM;
}
- gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
+
+ gvt->gtt.scratch_page = virt_to_page(page);
+ gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
return ret;
}
}
- INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
+ INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
return 0;
}
@@ -2315,18 +2279,40 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
- GTT_PAGE_SHIFT);
+ dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
+ I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
- __free_page(gvt->gtt.scratch_ggtt_page);
+ __free_page(gvt->gtt.scratch_page);
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
/**
+ * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
+ * @vgpu: a vGPU
+ *
+ * This function is called when invalidate all PPGTT instances of a vGPU.
+ *
+ */
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del_init(&mm->ppgtt_mm.lru_list);
+ if (mm->ppgtt_mm.shadowed)
+ invalidate_ppgtt_mm(mm);
+ }
+ }
+}
+
+/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
*
@@ -2338,28 +2324,25 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
u32 index;
- u32 offset;
u32 num_entries;
- struct intel_gvt_gtt_entry e;
- memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
- e.type = GTT_TYPE_GGTT_PTE;
- ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
- e.val64 |= _PAGE_PRESENT;
+ pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
+ pte_ops->set_present(&entry);
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
- for (offset = 0; offset < num_entries; offset++)
- ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+ while (num_entries--)
+ ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
- for (offset = 0; offset < num_entries; offset++)
- ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+ while (num_entries--)
+ ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
- gtt_invalidate(dev_priv);
+ ggtt_invalidate(dev_priv);
}
/**
@@ -2372,22 +2355,10 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
- int i;
-
- ppgtt_free_all_shadow_page(vgpu);
-
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
*/
- intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
-
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu);
-
- /* clear scratch page for security */
- for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
- if (vgpu->gtt.scratch_pt[i].page != NULL)
- memset(page_address(vgpu->gtt.scratch_pt[i].page),
- 0, PAGE_SIZE);
- }
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 30a4c8d..a8b369c 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -34,13 +34,11 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
-#define GTT_PAGE_SHIFT 12
-#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
-#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
+#define I915_GTT_PAGE_SHIFT 12
+#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
-#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {
@@ -63,6 +61,7 @@ struct intel_gvt_gtt_pte_ops {
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
+ void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
@@ -84,15 +83,10 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
- struct list_head mm_lru_list_head;
+ struct list_head ppgtt_mm_lru_list_head;
- struct page *scratch_ggtt_page;
- unsigned long scratch_ggtt_mfn;
-};
-
-enum {
- INTEL_GVT_MM_GGTT = 0,
- INTEL_GVT_MM_PPGTT,
+ struct page *scratch_page;
+ unsigned long scratch_mfn;
};
typedef enum {
@@ -125,66 +119,60 @@ typedef enum {
GTT_TYPE_MAX,
} intel_gvt_gtt_type_t;
-struct intel_vgpu_mm {
- int type;
- bool initialized;
- bool shadowed;
+enum intel_gvt_mm_type {
+ INTEL_GVT_MM_GGTT,
+ INTEL_GVT_MM_PPGTT,
+};
- int page_table_entry_type;
- u32 page_table_entry_size;
- u32 page_table_entry_cnt;
- void *virtual_page_table;
- void *shadow_page_table;
+#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
- int page_table_level;
- bool has_shadow_page_table;
- u32 pde_base_index;
+struct intel_vgpu_mm {
+ enum intel_gvt_mm_type type;
+ struct intel_vgpu *vgpu;
- struct list_head list;
struct kref ref;
atomic_t pincount;
- struct list_head lru_list;
- struct intel_vgpu *vgpu;
-};
-
-extern int intel_vgpu_mm_get_entry(
- struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index);
-extern int intel_vgpu_mm_set_entry(
- struct intel_vgpu_mm *mm,
- void *page_table, struct intel_gvt_gtt_entry *e,
- unsigned long index);
-
-#define ggtt_get_guest_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_set_guest_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
-
-#define ggtt_get_shadow_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
-
-#define ggtt_set_shadow_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+ union {
+ struct {
+ intel_gvt_gtt_type_t root_entry_type;
+ /*
+ * The 4 PDPs in ring context. For 48bit addressing,
+ * only PDP0 is valid and point to PML4. For 32it
+ * addressing, all 4 are used as true PDPs.
+ */
+ u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
+ u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
+ bool shadowed;
+
+ struct list_head list;
+ struct list_head lru_list;
+ } ppgtt_mm;
+ struct {
+ void *virtual_ggtt;
+ } ggtt_mm;
+ };
+};
-#define ppgtt_get_guest_root_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
+struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
-#define ppgtt_set_guest_root_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
+static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
+{
+ kref_get(&mm->ref);
+}
-#define ppgtt_get_shadow_root_entry(mm, e, index) \
- intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
+void _intel_vgpu_mm_release(struct kref *mm_ref);
-#define ppgtt_set_shadow_root_entry(mm, e, index) \
- intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
+static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
+{
+ kref_put(&mm->ref, _intel_vgpu_mm_release);
+}
-extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
- int mm_type, void *virtual_page_table, int page_table_level,
- u32 pde_base_index);
-extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
+static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
+{
+ intel_vgpu_mm_put(mm);
+}
struct intel_vgpu_guest_page;
@@ -193,23 +181,20 @@ struct intel_vgpu_scratch_pt {
unsigned long page_mfn;
};
-
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
- struct list_head mm_list_head;
- DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
- atomic_t n_write_protected_guest_page;
+ struct list_head ppgtt_mm_list_head;
+ struct radix_tree_root spt_tree;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
-
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
@@ -218,78 +203,43 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry);
-struct intel_vgpu_oos_page;
-
-struct intel_vgpu_shadow_page {
- void *vaddr;
- struct page *page;
- int type;
- struct hlist_node node;
- unsigned long mfn;
-};
-
-struct intel_vgpu_guest_page {
- struct hlist_node node;
- bool writeprotection;
- unsigned long gfn;
- int (*handler)(void *, u64, void *, int);
- void *data;
- unsigned long write_cnt;
- struct intel_vgpu_oos_page *oos_page;
-};
-
struct intel_vgpu_oos_page {
- struct intel_vgpu_guest_page *guest_page;
+ struct intel_vgpu_ppgtt_spt *spt;
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[GTT_PAGE_SIZE];
+ unsigned char mem[I915_GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
+/* Represent a vgpu shadow page table. */
struct intel_vgpu_ppgtt_spt {
- struct intel_vgpu_shadow_page shadow_page;
- struct intel_vgpu_guest_page guest_page;
- int guest_page_type;
atomic_t refcount;
struct intel_vgpu *vgpu;
- DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
- struct list_head post_shadow_list;
-};
-
-int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page,
- unsigned long gfn,
- int (*handler)(void *gp, u64, void *, int),
- void *data);
-void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
+ struct {
+ intel_gvt_gtt_type_t type;
+ void *vaddr;
+ struct page *page;
+ unsigned long mfn;
+ } shadow_page;
-int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
+ struct {
+ intel_gvt_gtt_type_t type;
+ unsigned long gfn;
+ unsigned long write_cnt;
+ struct intel_vgpu_oos_page *oos_page;
+ } guest_page;
-void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *guest_page);
-
-struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn);
+ DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
+ struct list_head post_shadow_list;
+};
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
-static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
-{
- kref_get(&mm->ref);
-}
-
-static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
-{
- kref_put(&mm->ref, intel_vgpu_destroy_mm);
-}
-
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
@@ -298,18 +248,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry);
+ u64 pdps[]);
-int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level);
+struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
+ intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
-int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level);
+int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
-int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
-int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
+int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index aaa347f..61bd14f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -36,6 +36,8 @@
#include "i915_drv.h"
#include "gvt.h"
+#include <linux/vfio.h>
+#include <linux/mdev.h>
struct intel_gvt_host intel_gvt_host;
@@ -44,6 +46,129 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
+static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
+ const char *name)
+{
+ int i;
+ struct intel_vgpu_type *t;
+ const char *driver_name = dev_driver_string(
+ &gvt->dev_priv->drm.pdev->dev);
+
+ for (i = 0; i < gvt->num_types; i++) {
+ t = &gvt->types[i];
+ if (!strncmp(t->name, name + strlen(driver_name) + 1,
+ sizeof(t->name)))
+ return t;
+ }
+
+ return NULL;
+}
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
+ struct attribute_group ***intel_vgpu_type_groups)
+{
+ *type_attrs = gvt_type_attrs;
+ *intel_vgpu_type_groups = gvt_vgpu_type_groups;
+ return true;
+}
+
+static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (WARN_ON(!group))
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return true;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return false;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -54,6 +179,11 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
+ .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
+ .get_gvt_attrs = intel_get_gvt_attrs,
+ .vgpu_query_plane = intel_vgpu_query_plane,
+ .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
+ .write_protect_handler = intel_vgpu_page_track_handler,
};
/**
@@ -191,17 +321,18 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
+ intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
- intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
idr_destroy(&gvt->vgpu_idr);
@@ -256,6 +387,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_idr;
+ intel_gvt_init_engine_mmio_context(gvt);
+
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
@@ -268,13 +401,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_irq;
- ret = intel_gvt_init_opregion(gvt);
- if (ret)
- goto out_clean_gtt;
-
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
- goto out_clean_opregion;
+ goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
@@ -292,6 +421,12 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_thread;
+ ret = intel_gvt_init_vgpu_type_groups(gvt);
+ if (ret == false) {
+ gvt_err("failed to init vgpu type groups: %d\n", ret);
+ goto out_clean_types;
+ }
+
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
@@ -307,6 +442,10 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
+ ret = intel_gvt_debugfs_init(gvt);
+ if (ret)
+ gvt_err("debugfs registeration failed, go on.\n");
+
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
@@ -321,8 +460,6 @@ out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
-out_clean_opregion:
- intel_gvt_clean_opregion(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_clean_irq:
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9c2e7c0..efacd8a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -44,8 +44,11 @@
#include "execlist.h"
#include "scheduler.h"
#include "sched_policy.h"
-#include "render.h"
+#include "mmio_context.h"
#include "cmd_parser.h"
+#include "fb_decoder.h"
+#include "dmabuf.h"
+#include "page_track.h"
#define GVT_MAX_VGPU 8
@@ -80,7 +83,6 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
- void *aperture_va;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
@@ -123,18 +125,16 @@ struct intel_vgpu_irq {
};
struct intel_vgpu_opregion {
+ bool mapped;
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
- struct page *pages[INTEL_GVT_OPREGION_PAGES];
};
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
-#define INTEL_GVT_MAX_PORT 5
-
struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid;
- struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
+ struct intel_vgpu_port ports[I915_MAX_PORTS];
struct intel_vgpu_sbi sbi;
};
@@ -142,6 +142,33 @@ struct vgpu_sched_ctl {
int weight;
};
+enum {
+ INTEL_VGPU_EXECLIST_SUBMISSION = 1,
+ INTEL_VGPU_GUC_SUBMISSION,
+};
+
+struct intel_vgpu_submission_ops {
+ const char *name;
+ int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+};
+
+struct intel_vgpu_submission {
+ struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+ struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct kmem_cache *workloads;
+ atomic_t running_workload_num;
+ struct i915_gem_context *shadow_ctx;
+ DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
+ void *ring_scan_buffer[I915_NUM_ENGINES];
+ int ring_scan_buffer_size[I915_NUM_ENGINES];
+ const struct intel_vgpu_submission_ops *ops;
+ int virtual_submission_interface;
+ bool active;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@@ -161,16 +188,11 @@ struct intel_vgpu {
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
- struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
- struct list_head workload_q_head[I915_NUM_ENGINES];
- struct kmem_cache *workloads;
- atomic_t running_workload_num;
- /* 1/2K for each reserve ring buffer */
- void *reserve_ring_buffer_va[I915_NUM_ENGINES];
- int reserve_ring_buffer_size[I915_NUM_ENGINES];
- DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
- struct i915_gem_context *shadow_ctx;
- DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ struct intel_vgpu_submission submission;
+ struct radix_tree_root page_track_tree;
+ u32 hws_pga[I915_NUM_ENGINES];
+
+ struct dentry *debugfs;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -179,17 +201,37 @@ struct intel_vgpu {
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
- struct rb_root cache;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
struct mutex cache_lock;
+
struct notifier_block iommu_notifier;
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
+ struct vfio_device *vfio_device;
} vdev;
#endif
+
+ struct list_head dmabuf_obj_list_head;
+ struct mutex dmabuf_lock;
+ struct idr object_idr;
+
+ struct completion vblank_done;
+
};
+/* validating GM healthy status*/
+#define vgpu_is_vm_unhealthy(ret_val) \
+ (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
+
struct intel_gvt_gm {
unsigned long vgpu_allocated_low_gm_size;
unsigned long vgpu_allocated_high_gm_size;
@@ -231,7 +273,7 @@ struct intel_gvt_mmio {
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
- unsigned int num_tracked_mmio;
+ unsigned long num_tracked_mmio;
};
struct intel_gvt_firmware {
@@ -240,11 +282,6 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-struct intel_gvt_opregion {
- void *opregion_va;
- u32 opregion_pa;
-};
-
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
@@ -268,7 +305,6 @@ struct intel_gvt {
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
- struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
@@ -279,6 +315,13 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
unsigned long service_request;
+
+ struct {
+ struct engine_mmio *mmio;
+ int ctx_mmio_count[I915_NUM_ENGINES];
+ } engine_mmio_list;
+
+ struct dentry *debugfs_root;
};
static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
@@ -316,7 +359,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
@@ -378,23 +421,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
-/* Macros for easily accessing vGPU virtual/shadow register */
-#define vgpu_vreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+/* Macros for easily accessing vGPU virtual/shadow register.
+ Explicitly seperate use for typed MMIO reg or real offset.*/
+#define vgpu_vreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_vreg64_t(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg64(vgpu, offset) \
+ (*(u64 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_sreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
+#define vgpu_sreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -484,16 +524,15 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
PCI_BASE_ADDRESS_MEM_MASK;
}
-void intel_gvt_clean_opregion(struct intel_gvt *gvt);
-int intel_gvt_init_opregion(struct intel_gvt *gvt);
-
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
@@ -510,12 +549,21 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
+ struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
+ const char *name);
+ bool (*get_gvt_attrs)(struct attribute ***type_attrs,
+ struct attribute_group ***intel_vgpu_type_groups);
+ int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+ int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
+ int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
+ unsigned int);
};
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+ GVT_FAILSAFE_GUEST_ERR,
};
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
@@ -591,6 +639,12 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
+int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+
+
#include "trace.h"
#include "mpt.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1f840f6..8c5d5d0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -166,7 +166,7 @@ int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
-static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
{
switch (reason) {
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
@@ -174,6 +174,10 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
+ break;
+ case GVT_FAILSAFE_GUEST_ERR:
+ pr_err("GVT Internal error for the guest\n");
+ break;
default:
break;
}
@@ -184,7 +188,9 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
- if (fence_num >= vgpu_fence_sz(vgpu)) {
+ unsigned int max_fence = vgpu_fence_sz(vgpu);
+
+ if (fence_num >= max_fence) {
/* When guest access oob fence regs without access
* pv_info first, we treat guest not supporting GVT,
@@ -197,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
if (!vgpu->mmio.disable_warn_untrack) {
gvt_vgpu_err("found oob fence register access\n");
gvt_vgpu_err("total fence %d, access fence %d\n",
- vgpu_fence_sz(vgpu), fence_num);
+ max_fence, fence_num);
}
memset(p_data, 0, bytes);
return -EINVAL;
@@ -316,7 +322,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
/* sw will wait for the device to ack the reset request */
- vgpu_vreg(vgpu, offset) = 0;
+ vgpu_vreg(vgpu, offset) = 0;
return 0;
}
@@ -339,13 +345,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
} else
- vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
~(PP_ON | PP_SEQUENCE_POWER_DOWN
| PP_CYCLE_DELAY_ACTIVE);
return 0;
@@ -499,7 +505,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
}
return 0;
@@ -517,9 +523,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
static int fdi_auto_training_started(struct intel_vgpu *vgpu)
{
- u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
- u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+ u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
(rx_ctl & FDI_RX_ENABLE) &&
@@ -560,12 +566,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
/* If imr bit has been masked */
- if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
return 0;
- if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
== fdi_tx_check_bits)
- && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
== fdi_rx_check_bits))
return 1;
else
@@ -622,17 +628,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
if (offset == _FDI_RXA_CTL)
if (fdi_auto_training_started(vgpu))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
DP_TP_STATUS_AUTOTRAIN_DONE;
return 0;
}
@@ -653,7 +659,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
if (data == 0x2) {
status_reg = DP_TP_STATUS(index);
- vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
}
return 0;
}
@@ -717,7 +723,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -738,7 +744,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -1060,9 +1066,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
sbi_offset);
@@ -1087,13 +1093,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg(vgpu, SBI_DATA));
+ vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -1135,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- int ret = 0;
+ intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ struct intel_vgpu_mm *mm;
+ u64 *pdps;
+
+ pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
- ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
- break;
- case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
- ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
- break;
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
- ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
- break;
+ mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
+ return PTR_ERR_OR_ZERO(mm);
+ case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
- ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
- break;
+ return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */
@@ -1157,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
default:
gvt_vgpu_err("Invalid PV notification %d\n", notification);
}
- return ret;
+ return 0;
}
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
@@ -1339,7 +1345,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
{
u32 value = *(u32 *)p_data;
u32 cmd = value & 0xff;
- u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+ u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
@@ -1378,6 +1384,34 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
+static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 value = *(u32 *)p_data;
+ int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
+ offset, value);
+ return -EINVAL;
+ }
+ /*
+ * Need to emulate all the HWSP register write to ensure host can
+ * update the VM CSB status correctly. Here listed registers can
+ * support BDW, SKL or other platforms with same HWSP registers.
+ */
+ if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
+ offset);
+ return -EINVAL;
+ }
+ vgpu->hws_pga[ring_id] = value;
+ gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
+ vgpu->id, value, offset);
+
+ return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@@ -1441,12 +1475,12 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
return -EINVAL;
- execlist = &vgpu->execlist[ring_id];
+ execlist = &vgpu->submission.execlist[ring_id];
- execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
+ execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
@@ -1465,6 +1499,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
+ int ret;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1486,8 +1521,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(enable_execlist ? "enabling" : "disabling"),
ring_id);
- if (enable_execlist)
- intel_vgpu_start_schedule(vgpu);
+ if (!enable_execlist)
+ return 0;
+
+ ret = intel_vgpu_select_submission_ops(vgpu,
+ ENGINE_MASK(ring_id),
+ INTEL_VGPU_EXECLIST_SUBMISSION);
+ if (ret)
+ return ret;
+
+ intel_vgpu_start_schedule(vgpu);
}
return 0;
}
@@ -1519,7 +1562,7 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
default:
return -EINVAL;
}
- set_bit(id, (void *)vgpu->tlb_handle_pending);
+ set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
return 0;
}
@@ -1542,7 +1585,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
- ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
if (ret) \
return ret; \
@@ -1610,22 +1653,22 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-#define RING_REG(base) (base + 0x28)
+#define RING_REG(base) _MMIO((base) + 0x28)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x134)
+#define RING_REG(base) _MMIO((base) + 0x134)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x6c)
+#define RING_REG(base) _MMIO((base) + 0x6c)
MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
- MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
- MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -1635,7 +1678,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
-#define RING_REG(base) (base + 0x29c)
+#define RING_REG(base) _MMIO((base) + 0x29c)
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -1654,37 +1697,37 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x602a0, D_ALL);
+ MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(_MMIO(0x602a0), D_ALL);
- MMIO_D(0x65050, D_ALL);
- MMIO_D(0x650b4, D_ALL);
+ MMIO_D(_MMIO(0x65050), D_ALL);
+ MMIO_D(_MMIO(0x650b4), D_ALL);
- MMIO_D(0xc4040, D_ALL);
+ MMIO_D(_MMIO(0xc4040), D_ALL);
MMIO_D(DERRMR, D_ALL);
MMIO_D(PIPEDSL(PIPE_A), D_ALL);
@@ -1724,14 +1767,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
- MMIO_D(0x700ac, D_ALL);
- MMIO_D(0x710ac, D_ALL);
- MMIO_D(0x720ac, D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
+
+ MMIO_D(_MMIO(0x700ac), D_ALL);
+ MMIO_D(_MMIO(0x710ac), D_ALL);
+ MMIO_D(_MMIO(0x720ac), D_ALL);
- MMIO_D(0x70090, D_ALL);
- MMIO_D(0x70094, D_ALL);
- MMIO_D(0x70098, D_ALL);
- MMIO_D(0x7009c, D_ALL);
+ MMIO_D(_MMIO(0x70090), D_ALL);
+ MMIO_D(_MMIO(0x70094), D_ALL);
+ MMIO_D(_MMIO(0x70098), D_ALL);
+ MMIO_D(_MMIO(0x7009c), D_ALL);
MMIO_D(DSPCNTR(PIPE_A), D_ALL);
MMIO_D(DSPADDR(PIPE_A), D_ALL);
@@ -1907,24 +1954,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
- MMIO_D(0x48268, D_ALL);
+ MMIO_D(_MMIO(0x48268), D_ALL);
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
- MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
- MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
@@ -1936,30 +1983,30 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
- MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
-
- MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
-
- MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
@@ -1977,38 +2024,38 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
- MMIO_D(_FDI_RXA_MISC, D_ALL);
- MMIO_D(_FDI_RXB_MISC, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
MMIO_D(PCH_PP_DIVISOR, D_ALL);
MMIO_D(PCH_PP_STATUS, D_ALL);
MMIO_D(PCH_LVDS, D_ALL);
- MMIO_D(_PCH_DPLL_A, D_ALL);
- MMIO_D(_PCH_DPLL_B, D_ALL);
- MMIO_D(_PCH_FPA0, D_ALL);
- MMIO_D(_PCH_FPA1, D_ALL);
- MMIO_D(_PCH_FPB0, D_ALL);
- MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
MMIO_D(PCH_DREF_CONTROL, D_ALL);
MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
MMIO_D(PCH_DPLL_SEL, D_ALL);
- MMIO_D(0x61208, D_ALL);
- MMIO_D(0x6120c, D_ALL);
+ MMIO_D(_MMIO(0x61208), D_ALL);
+ MMIO_D(_MMIO(0x6120c), D_ALL);
MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
- MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
@@ -2030,11 +2077,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
- MMIO_D(_TRANSA_CHICKEN1, D_ALL);
- MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
- MMIO_D(_TRANSA_CHICKEN2, D_ALL);
- MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
MMIO_D(ILK_DPFC_CONTROL, D_ALL);
@@ -2100,24 +2147,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x60110, D_ALL);
- MMIO_D(0x61110, D_ALL);
- MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_D(_MMIO(0x60110), D_ALL);
+ MMIO_D(_MMIO(0x61110), D_ALL);
+ MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
- MMIO_D(_WRPLL_CTL1, D_ALL);
- MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
@@ -2128,15 +2175,15 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
- MMIO_D(0x46508, D_ALL);
+ MMIO_D(_MMIO(0x46508), D_ALL);
- MMIO_D(0x49080, D_ALL);
- MMIO_D(0x49180, D_ALL);
- MMIO_D(0x49280, D_ALL);
+ MMIO_D(_MMIO(0x49080), D_ALL);
+ MMIO_D(_MMIO(0x49180), D_ALL);
+ MMIO_D(_MMIO(0x49280), D_ALL);
- MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
@@ -2156,7 +2203,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
MMIO_D(PIXCLK_GATE, D_ALL);
- MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
@@ -2177,24 +2224,25 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
- MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+ MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
- MMIO_D(_TRANSA_MSA_MISC, D_ALL);
- MMIO_D(_TRANSB_MSA_MISC, D_ALL);
- MMIO_D(_TRANSC_MSA_MISC, D_ALL);
- MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
MMIO_D(FORCEWAKE_ACK, D_ALL);
@@ -2260,101 +2308,101 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_UCGCTL1, D_ALL);
MMIO_D(GEN6_UCGCTL2, D_ALL);
- MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
- MMIO_D(0x13812c, D_ALL);
+ MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
MMIO_D(HSW_EDRAM_CAP, D_ALL);
MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
- MMIO_D(0x3c, D_ALL);
- MMIO_D(0x860, D_ALL);
+ MMIO_D(_MMIO(0x3c), D_ALL);
+ MMIO_D(_MMIO(0x860), D_ALL);
MMIO_D(ECOSKPD, D_ALL);
- MMIO_D(0x121d0, D_ALL);
+ MMIO_D(_MMIO(0x121d0), D_ALL);
MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
- MMIO_D(0x41d0, D_ALL);
+ MMIO_D(_MMIO(0x41d0), D_ALL);
MMIO_D(GAC_ECO_BITS, D_ALL);
- MMIO_D(0x6200, D_ALL);
- MMIO_D(0x6204, D_ALL);
- MMIO_D(0x6208, D_ALL);
- MMIO_D(0x7118, D_ALL);
- MMIO_D(0x7180, D_ALL);
- MMIO_D(0x7408, D_ALL);
- MMIO_D(0x7c00, D_ALL);
+ MMIO_D(_MMIO(0x6200), D_ALL);
+ MMIO_D(_MMIO(0x6204), D_ALL);
+ MMIO_D(_MMIO(0x6208), D_ALL);
+ MMIO_D(_MMIO(0x7118), D_ALL);
+ MMIO_D(_MMIO(0x7180), D_ALL);
+ MMIO_D(_MMIO(0x7408), D_ALL);
+ MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
- MMIO_D(0x911c, D_ALL);
- MMIO_D(0x9120, D_ALL);
+ MMIO_D(_MMIO(0x911c), D_ALL);
+ MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
- MMIO_D(0x48800, D_ALL);
- MMIO_D(0xce044, D_ALL);
- MMIO_D(0xe6500, D_ALL);
- MMIO_D(0xe6504, D_ALL);
- MMIO_D(0xe6600, D_ALL);
- MMIO_D(0xe6604, D_ALL);
- MMIO_D(0xe6700, D_ALL);
- MMIO_D(0xe6704, D_ALL);
- MMIO_D(0xe6800, D_ALL);
- MMIO_D(0xe6804, D_ALL);
+ MMIO_D(_MMIO(0x48800), D_ALL);
+ MMIO_D(_MMIO(0xce044), D_ALL);
+ MMIO_D(_MMIO(0xe6500), D_ALL);
+ MMIO_D(_MMIO(0xe6504), D_ALL);
+ MMIO_D(_MMIO(0xe6600), D_ALL);
+ MMIO_D(_MMIO(0xe6604), D_ALL);
+ MMIO_D(_MMIO(0xe6700), D_ALL);
+ MMIO_D(_MMIO(0xe6704), D_ALL);
+ MMIO_D(_MMIO(0xe6800), D_ALL);
+ MMIO_D(_MMIO(0xe6804), D_ALL);
MMIO_D(PCH_GMBUS4, D_ALL);
MMIO_D(PCH_GMBUS5, D_ALL);
- MMIO_D(0x902c, D_ALL);
- MMIO_D(0xec008, D_ALL);
- MMIO_D(0xec00c, D_ALL);
- MMIO_D(0xec008 + 0x18, D_ALL);
- MMIO_D(0xec00c + 0x18, D_ALL);
- MMIO_D(0xec008 + 0x18 * 2, D_ALL);
- MMIO_D(0xec00c + 0x18 * 2, D_ALL);
- MMIO_D(0xec008 + 0x18 * 3, D_ALL);
- MMIO_D(0xec00c + 0x18 * 3, D_ALL);
- MMIO_D(0xec408, D_ALL);
- MMIO_D(0xec40c, D_ALL);
- MMIO_D(0xec408 + 0x18, D_ALL);
- MMIO_D(0xec40c + 0x18, D_ALL);
- MMIO_D(0xec408 + 0x18 * 2, D_ALL);
- MMIO_D(0xec40c + 0x18 * 2, D_ALL);
- MMIO_D(0xec408 + 0x18 * 3, D_ALL);
- MMIO_D(0xec40c + 0x18 * 3, D_ALL);
- MMIO_D(0xfc810, D_ALL);
- MMIO_D(0xfc81c, D_ALL);
- MMIO_D(0xfc828, D_ALL);
- MMIO_D(0xfc834, D_ALL);
- MMIO_D(0xfcc00, D_ALL);
- MMIO_D(0xfcc0c, D_ALL);
- MMIO_D(0xfcc18, D_ALL);
- MMIO_D(0xfcc24, D_ALL);
- MMIO_D(0xfd000, D_ALL);
- MMIO_D(0xfd00c, D_ALL);
- MMIO_D(0xfd018, D_ALL);
- MMIO_D(0xfd024, D_ALL);
- MMIO_D(0xfd034, D_ALL);
+ MMIO_D(_MMIO(0x902c), D_ALL);
+ MMIO_D(_MMIO(0xec008), D_ALL);
+ MMIO_D(_MMIO(0xec00c), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec408), D_ALL);
+ MMIO_D(_MMIO(0xec40c), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xfc810), D_ALL);
+ MMIO_D(_MMIO(0xfc81c), D_ALL);
+ MMIO_D(_MMIO(0xfc828), D_ALL);
+ MMIO_D(_MMIO(0xfc834), D_ALL);
+ MMIO_D(_MMIO(0xfcc00), D_ALL);
+ MMIO_D(_MMIO(0xfcc0c), D_ALL);
+ MMIO_D(_MMIO(0xfcc18), D_ALL);
+ MMIO_D(_MMIO(0xfcc24), D_ALL);
+ MMIO_D(_MMIO(0xfd000), D_ALL);
+ MMIO_D(_MMIO(0xfd00c), D_ALL);
+ MMIO_D(_MMIO(0xfd018), D_ALL);
+ MMIO_D(_MMIO(0xfd024), D_ALL);
+ MMIO_D(_MMIO(0xfd034), D_ALL);
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
- MMIO_D(0x2054, D_ALL);
- MMIO_D(0x12054, D_ALL);
- MMIO_D(0x22054, D_ALL);
- MMIO_D(0x1a054, D_ALL);
-
- MMIO_D(0x44070, D_ALL);
- MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_D(0x2b00, D_BDW_PLUS);
- MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0x2054), D_ALL);
+ MMIO_D(_MMIO(0x12054), D_ALL);
+ MMIO_D(_MMIO(0x22054), D_ALL);
+ MMIO_D(_MMIO(0x1a054), D_ALL);
+
+ MMIO_D(_MMIO(0x44070), D_ALL);
+ MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
+ MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2368,24 +2416,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
- MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2459,40 +2507,40 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
mmio_read_from_hw, NULL);
-#define RING_REG(base) (base + 0xd0)
+#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x230)
+#define RING_REG(base) _MMIO((base) + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x234)
+#define RING_REG(base) _MMIO((base) + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x244)
+#define RING_REG(base) _MMIO((base) + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x370)
+#define RING_REG(base) _MMIO((base) + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x3a0)
+#define RING_REG(base) _MMIO((base) + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
- MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
- MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
@@ -2501,11 +2549,11 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
-#define RING_REG(base) (base + 0x270)
+#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2514,10 +2562,11 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+ MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
- MMIO_D(0x66c00, D_BDW_PLUS);
- MMIO_D(0x66c04, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
@@ -2525,54 +2574,54 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb110, D_BDW);
+ MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write);
- MMIO_D(0x44484, D_BDW_PLUS);
- MMIO_D(0x4448c, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
- MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x110000, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
- MMIO_D(0x48400, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
- MMIO_D(0x6e570, D_BDW_PLUS);
- MMIO_D(0x65f10, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2588,11 +2637,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
/*
@@ -2603,26 +2652,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
skl_power_well_ctl_write);
- MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x45504, D_SKL_PLUS);
- MMIO_D(0x45520, D_SKL_PLUS);
- MMIO_D(0x46000, D_SKL_PLUS);
- MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL | D_KBL);
- MMIO_D(0x6C048, D_SKL | D_KBL);
- MMIO_D(0x6C050, D_SKL | D_KBL);
- MMIO_D(0x6C044, D_SKL | D_KBL);
- MMIO_D(0x6C04C, D_SKL | D_KBL);
- MMIO_D(0x6C054, D_SKL | D_KBL);
- MMIO_D(0x6c058, D_SKL | D_KBL);
- MMIO_D(0x6c05c, D_SKL | D_KBL);
- MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
+ MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2711,105 +2760,111 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x70380, D_SKL_PLUS);
- MMIO_D(0x71380, D_SKL_PLUS);
- MMIO_D(0x72380, D_SKL_PLUS);
- MMIO_D(0x7039c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(0x8f074, D_SKL | D_KBL);
- MMIO_D(0x8f004, D_SKL | D_KBL);
- MMIO_D(0x8f034, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
- MMIO_D(0xb11c, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
- MMIO_D(0x51000, D_SKL | D_KBL);
- MMIO_D(0x6c00c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_D(0xd08, D_SKL_PLUS);
- MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
+ MMIO_D(RC6_LOCATION, D_SKL_PLUS);
+ MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL | D_KBL);
- MMIO_D(0xb004, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL_PLUS);
- MMIO_D(0x1082c0, D_SKL | D_KBL);
- MMIO_D(0x4068, D_SKL | D_KBL);
- MMIO_D(0x67054, D_SKL | D_KBL);
- MMIO_D(0x6e560, D_SKL | D_KBL);
- MMIO_D(0x6e554, D_SKL | D_KBL);
- MMIO_D(0x2b20, D_SKL | D_KBL);
- MMIO_D(0x65f00, D_SKL | D_KBL);
- MMIO_D(0x65f08, D_SKL | D_KBL);
- MMIO_D(0x320f0, D_SKL | D_KBL);
-
- MMIO_D(0x70034, D_SKL_PLUS);
- MMIO_D(0x71034, D_SKL_PLUS);
- MMIO_D(0x72034, D_SKL_PLUS);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
-
- MMIO_D(0x44500, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+
+ MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(0x4ab8, D_KBL);
- MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x4ab8), D_KBL);
+ MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
return 0;
}
@@ -2825,8 +2880,8 @@ static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
- if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
- offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
+ if (offset >= i915_mmio_reg_offset(block->offset) &&
+ offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
}
return NULL;
@@ -2906,14 +2961,46 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
- gvt_dbg_mmio("traced %u virtual mmio registers\n",
- gvt->mmio.num_tracked_mmio);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
+/**
+ * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
+ * @gvt: a GVT device
+ * @handler: the handler
+ * @data: private data given to handler
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+ int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+ void *data)
+{
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct intel_gvt_mmio_info *e;
+ int i, j, ret;
+
+ hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+ ret = handler(gvt, e->offset, data);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ for (j = 0; j < block->size; j += 4) {
+ ret = handler(gvt,
+ i915_mmio_reg_offset(block->offset) + j,
+ data);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index df7f33a..f6dd9f7 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -44,17 +44,26 @@ struct intel_gvt_mpt {
void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p);
- int (*set_wp_page)(unsigned long handle, u64 gfn);
- int (*unset_wp_page)(unsigned long handle, u64 gfn);
+ int (*enable_page_track)(unsigned long handle, u64 gfn);
+ int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+
+ int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
+ dma_addr_t *dma_addr);
+ void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
+ int (*set_opregion)(void *vgpu);
+ int (*get_vfio_device)(void *vgpu);
+ void (*put_vfio_device)(void *vgpu);
+ bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9606092..c16a492 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -41,6 +41,7 @@
#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <linux/debugfs.h>
#include "i915_drv.h"
#include "gvt.h"
@@ -53,11 +54,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+
+struct vfio_region;
+struct intel_vgpu_regops {
+ size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct intel_vgpu *vgpu,
+ struct vfio_region *region);
+};
+
struct vfio_region {
u32 type;
u32 subtype;
size_t size;
u32 flags;
+ const struct intel_vgpu_regops *ops;
+ void *data;
};
struct kvmgt_pgfn {
@@ -72,12 +85,16 @@ struct kvmgt_guest_info {
#define NR_BKT (1 << 18)
struct hlist_head ptable[NR_BKT];
#undef NR_BKT
+ struct dentry *debugfs_cache_entries;
};
struct gvt_dma {
- struct rb_node node;
+ struct intel_vgpu *vgpu;
+ struct rb_node gfn_node;
+ struct rb_node dma_addr_node;
gfn_t gfn;
- unsigned long iova;
+ dma_addr_t dma_addr;
+ struct kref ref;
};
static inline bool handle_valid(unsigned long handle)
@@ -89,277 +106,166 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
-static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
- unsigned long *iova)
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr)
{
- struct page *page;
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
+ struct page *page;
+ unsigned long pfn;
+ int ret;
- if (unlikely(!pfn_valid(pfn)))
- return -EFAULT;
+ /* Pin the page first. */
+ ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
+ if (ret != 1) {
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+ gfn, ret);
+ return -EINVAL;
+ }
+ /* Setup DMA mapping. */
page = pfn_to_page(pfn);
- daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, daddr))
+ *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr)) {
+ gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
+ vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
return -ENOMEM;
+ }
- *iova = (unsigned long)(daddr >> PAGE_SHIFT);
return 0;
}
-static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t dma_addr)
{
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
- dma_addr_t daddr;
+ int ret;
- daddr = (dma_addr_t)(iova << PAGE_SHIFT);
- dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
+ WARN_ON(ret != 1);
}
-static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.cache.rb_node;
- struct gvt_dma *ret = NULL;
+ struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct gvt_dma *itr;
while (node) {
- struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
+ itr = rb_entry(node, struct gvt_dma, dma_addr_node);
- if (gfn < itr->gfn)
+ if (dma_addr < itr->dma_addr)
node = node->rb_left;
- else if (gfn > itr->gfn)
+ else if (dma_addr > itr->dma_addr)
node = node->rb_right;
- else {
- ret = itr;
- goto out;
- }
+ else
+ return itr;
}
-
-out:
- return ret;
+ return NULL;
}
-static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct gvt_dma *entry;
- unsigned long iova;
+ struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct gvt_dma *itr;
- mutex_lock(&vgpu->vdev.cache_lock);
-
- entry = __gvt_cache_find(vgpu, gfn);
- iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
+ while (node) {
+ itr = rb_entry(node, struct gvt_dma, gfn_node);
- mutex_unlock(&vgpu->vdev.cache_lock);
- return iova;
+ if (gfn < itr->gfn)
+ node = node->rb_left;
+ else if (gfn > itr->gfn)
+ node = node->rb_right;
+ else
+ return itr;
+ }
+ return NULL;
}
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
- unsigned long iova)
+static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+ dma_addr_t dma_addr)
{
struct gvt_dma *new, *itr;
- struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
+ struct rb_node **link, *parent = NULL;
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
- return;
+ return -ENOMEM;
+ new->vgpu = vgpu;
new->gfn = gfn;
- new->iova = iova;
+ new->dma_addr = dma_addr;
+ kref_init(&new->ref);
- mutex_lock(&vgpu->vdev.cache_lock);
+ /* gfn_cache maps gfn to struct gvt_dma. */
+ link = &vgpu->vdev.gfn_cache.rb_node;
while (*link) {
parent = *link;
- itr = rb_entry(parent, struct gvt_dma, node);
+ itr = rb_entry(parent, struct gvt_dma, gfn_node);
- if (gfn == itr->gfn)
- goto out;
- else if (gfn < itr->gfn)
+ if (gfn < itr->gfn)
link = &parent->rb_left;
else
link = &parent->rb_right;
}
+ rb_link_node(&new->gfn_node, parent, link);
+ rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &vgpu->vdev.cache);
- mutex_unlock(&vgpu->vdev.cache_lock);
- return;
+ /* dma_addr_cache maps dma addr to struct gvt_dma. */
+ parent = NULL;
+ link = &vgpu->vdev.dma_addr_cache.rb_node;
+ while (*link) {
+ parent = *link;
+ itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
-out:
- mutex_unlock(&vgpu->vdev.cache_lock);
- kfree(new);
+ if (dma_addr < itr->dma_addr)
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+ rb_link_node(&new->dma_addr_node, parent, link);
+ rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+
+ vgpu->vdev.nr_cache_entries++;
+ return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->node, &vgpu->vdev.cache);
+ rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
kfree(entry);
-}
-
-static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-{
- struct device *dev = mdev_dev(vgpu->vdev.mdev);
- struct gvt_dma *this;
- unsigned long g1;
- int rc;
-
- mutex_lock(&vgpu->vdev.cache_lock);
- this = __gvt_cache_find(vgpu, gfn);
- if (!this) {
- mutex_unlock(&vgpu->vdev.cache_lock);
- return;
- }
-
- g1 = gfn;
- gvt_dma_unmap_iova(vgpu, this->iova);
- rc = vfio_unpin_pages(dev, &g1, 1);
- WARN_ON(rc != 1);
- __gvt_cache_remove_entry(vgpu, this);
- mutex_unlock(&vgpu->vdev.cache_lock);
-}
-
-static void gvt_cache_init(struct intel_vgpu *vgpu)
-{
- vgpu->vdev.cache = RB_ROOT;
- mutex_init(&vgpu->vdev.cache_lock);
+ vgpu->vdev.nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = mdev_dev(vgpu->vdev.mdev);
- unsigned long gfn;
for (;;) {
mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.cache);
+ node = rb_first(&vgpu->vdev.gfn_cache);
if (!node) {
mutex_unlock(&vgpu->vdev.cache_lock);
break;
}
- dma = rb_entry(node, struct gvt_dma, node);
- gvt_dma_unmap_iova(vgpu, dma->iova);
- gfn = dma->gfn;
+ dma = rb_entry(node, struct gvt_dma, gfn_node);
+ gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
__gvt_cache_remove_entry(vgpu, dma);
mutex_unlock(&vgpu->vdev.cache_lock);
- vfio_unpin_pages(dev, &gfn, 1);
}
}
-static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
- const char *name)
-{
- int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
-
- for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
- return t;
- }
-
- return NULL;
-}
-
-static ssize_t available_instances_show(struct kobject *kobj,
- struct device *dev, char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(dev)->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
- char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct kobject *kobj, struct device *dev,
- char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(dev)->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *intel_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = type_attrs;
- intel_vgpu_type_groups[i] = group;
- }
-
- return true;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = intel_vgpu_type_groups[j];
- kfree(group);
- }
-
- return false;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = intel_vgpu_type_groups[i];
- kfree(group);
- }
+ vgpu->vdev.gfn_cache = RB_ROOT;
+ vgpu->vdev.dma_addr_cache = RB_ROOT;
+ vgpu->vdev.nr_cache_entries = 0;
+ mutex_init(&vgpu->vdev.cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -430,6 +336,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ void *base = vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vgpu->vdev.region[i].size || iswrite) {
+ gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
+ return -EINVAL;
+ }
+ count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ memcpy(buf, base + pos, count);
+
+ return count;
+}
+
+static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
+ .rw = intel_vgpu_reg_rw_opregion,
+ .release = intel_vgpu_reg_release_opregion,
+};
+
+static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
+ unsigned int type, unsigned int subtype,
+ const struct intel_vgpu_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_region *region;
+
+ region = krealloc(vgpu->vdev.region,
+ (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vgpu->vdev.region = region;
+ vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
+ vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
+ vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
+ vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
+ vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
+ vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
+ vgpu->vdev.num_regions++;
+ return 0;
+}
+
+static int kvmgt_get_vfio_device(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev.vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vgpu->vdev.mdev));
+ if (!vgpu->vdev.vfio_device) {
+ gvt_vgpu_err("failed to get vfio device\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static int kvmgt_set_opregion(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ void *base;
+ int ret;
+
+ /* Each vgpu has its own opregion, although VFIO would create another
+ * one later. This one is used to expose opregion to VFIO. And the
+ * other one created by VFIO later, is used by guest actually.
+ */
+ base = vgpu_opregion(vgpu)->va;
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ ret = intel_vgpu_register_reg(vgpu,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+ &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ VFIO_REGION_INFO_FLAG_READ, base);
+
+ return ret;
+}
+
+static void kvmgt_put_vfio_device(void *vgpu)
+{
+ if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ return;
+
+ vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+}
+
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
@@ -441,7 +449,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj));
@@ -452,7 +460,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
+ gvt_err("failed to create intel vgpu: %d\n", ret);
goto out;
}
@@ -489,13 +497,22 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
- unsigned long gfn, end_gfn;
+ struct gvt_dma *entry;
+ unsigned long iov_pfn, end_iov_pfn;
+
+ iov_pfn = unmap->iova >> PAGE_SHIFT;
+ end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- gfn = unmap->iova >> PAGE_SHIFT;
- end_gfn = gfn + unmap->size / PAGE_SIZE;
+ mutex_lock(&vgpu->vdev.cache_lock);
+ for (; iov_pfn < end_iov_pfn; iov_pfn++) {
+ entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
+ if (!entry)
+ continue;
- while (gfn < end_gfn)
- gvt_cache_remove(vgpu, gfn++);
+ gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+ __gvt_cache_remove_entry(vgpu, entry);
+ }
+ mutex_unlock(&vgpu->vdev.cache_lock);
}
return NOTIFY_OK;
@@ -651,6 +668,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret;
}
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+{
+ return off >= vgpu_aperture_offset(vgpu) &&
+ off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
+}
+
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+ void *buf, unsigned long count, bool is_write)
+{
+ void *aperture_va;
+
+ if (!intel_vgpu_in_aperture(vgpu, off) ||
+ !intel_vgpu_in_aperture(vgpu, off + count)) {
+ gvt_vgpu_err("Invalid aperture offset %llu\n", off);
+ return -EINVAL;
+ }
+
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ ALIGN_DOWN(off, PAGE_SIZE),
+ count + offset_in_page(off));
+ if (!aperture_va)
+ return -EIO;
+
+ if (is_write)
+ memcpy(aperture_va + offset_in_page(off), buf, count);
+ else
+ memcpy(buf, aperture_va + offset_in_page(off), count);
+
+ io_mapping_unmap(aperture_va);
+
+ return 0;
+}
+
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
@@ -660,7 +710,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -679,8 +729,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count, is_write);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
- ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
- buf, count, is_write);
+ ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
break;
case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX:
@@ -688,13 +737,38 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
+ break;
default:
- gvt_vgpu_err("unsupported region: %u\n", index);
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ ppos, is_write);
}
return ret == 0 ? count : ret;
}
+static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct intel_gvt *gvt = vgpu->gvt;
+ int offset;
+
+ /* Only allow MMIO GGTT entry access */
+ if (index != PCI_BASE_ADDRESS_0)
+ return false;
+
+ offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
+ intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
+
+ return (offset >= gvt->device_info.gtt_start_offset &&
+ offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
+ true : false;
+}
+
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -704,7 +778,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes read */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
@@ -764,7 +852,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes write */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
@@ -952,7 +1054,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1012,6 +1115,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (!sparse)
return -ENOMEM;
+ sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->header.version = 1;
sparse->nr_areas = nr_areas;
cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
sparse->areas[0].offset =
@@ -1022,18 +1127,24 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0;
-
info.flags = 0;
+
gvt_dbg_core("get region info bar:%d\n", info.index);
break;
case VFIO_PCI_ROM_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+
gvt_dbg_core("get region info index:%d\n", info.index);
break;
default:
{
- struct vfio_region_info_cap_type cap_type;
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
@@ -1050,8 +1161,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
cap_type.subtype = vgpu->vdev.region[i].subtype;
ret = vfio_info_add_capability(&caps,
- VFIO_REGION_INFO_CAP_TYPE,
- &cap_type);
+ &cap_type.header,
+ sizeof(cap_type));
if (ret)
return ret;
}
@@ -1061,8 +1172,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = vfio_info_add_capability(&caps,
- VFIO_REGION_INFO_CAP_SPARSE_MMAP,
- sparse);
+ &sparse->header, sizeof(*sparse) +
+ (sparse->nr_areas *
+ sizeof(*sparse->areas)));
kfree(sparse);
if (ret)
return ret;
@@ -1073,6 +1185,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
@@ -1159,6 +1272,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
return 0;
+ } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
+ struct vfio_device_gfx_plane_info dmabuf;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_device_gfx_plane_info,
+ dmabuf_id);
+ if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (dmabuf.argsz < minsz)
+ return -EINVAL;
+
+ ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ if (ret != 0)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
+ __u32 dmabuf_id;
+ __s32 dmabuf_fd;
+
+ if (get_user(dmabuf_id, (__u32 __user *)arg))
+ return -EFAULT;
+
+ dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
+ return dmabuf_fd;
+
}
return 0;
@@ -1188,7 +1328,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
- vgpu->shadow_ctx->hw_id);
+ vgpu->submission.shadow_ctx->hw_id);
}
return sprintf(buf, "\n");
}
@@ -1212,8 +1352,7 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
-static const struct mdev_parent_ops intel_vgpu_ops = {
- .supported_type_groups = intel_vgpu_type_groups,
+static struct mdev_parent_ops intel_vgpu_ops = {
.mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@@ -1229,21 +1368,24 @@ static const struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- if (!intel_gvt_init_vgpu_type_groups(gvt))
- return -EFAULT;
+ struct attribute **kvm_type_attrs;
+ struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
+ &kvm_vgpu_type_groups))
+ return -EFAULT;
+ intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
- intel_gvt_cleanup_vgpu_type_groups(gvt);
mdev_unregister_device(dev);
}
-static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
@@ -1277,7 +1419,7 @@ out:
return 0;
}
-static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
@@ -1319,8 +1461,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
- (void *)val, len);
+ intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ (void *)val, len);
}
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
@@ -1398,15 +1540,27 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
+ mutex_init(&vgpu->dmabuf_lock);
+ init_completion(&vgpu->vblank_done);
+
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
+ info->debugfs_cache_entries = debugfs_create_ulong(
+ "kvmgt_nr_cache_entries",
+ 0444, vgpu->debugfs,
+ &vgpu->vdev.nr_cache_entries);
+ if (!info->debugfs_cache_entries)
+ gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
+
return 0;
}
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
+ debugfs_remove(info->debugfs_cache_entries);
+
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
@@ -1446,39 +1600,84 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
- unsigned long iova, pfn;
struct kvmgt_guest_info *info;
- struct device *dev;
- struct intel_vgpu *vgpu;
- int rc;
+ kvm_pfn_t pfn;
if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
- iova = gvt_cache_find(info->vgpu, gfn);
- if (iova != INTEL_GVT_INVALID_ADDR)
- return iova;
-
- pfn = INTEL_GVT_INVALID_ADDR;
- dev = mdev_dev(info->vgpu->vdev.mdev);
- rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
- if (rc != 1) {
- gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
- gfn, rc);
- return INTEL_GVT_INVALID_ADDR;
- }
- /* transfer to host iova for GFX to use DMA */
- rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
- if (rc) {
- gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
- vfio_unpin_pages(dev, &gfn, 1);
+
+ pfn = gfn_to_pfn(info->kvm, gfn);
+ if (is_error_noslot_pfn(pfn))
return INTEL_GVT_INVALID_ADDR;
+
+ return pfn;
+}
+
+int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+ dma_addr_t *dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct gvt_dma *entry;
+ int ret;
+
+ if (!handle_valid(handle))
+ return -EINVAL;
+
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+
+ entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ if (!entry) {
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ if (ret)
+ goto err_unmap;
+ } else {
+ kref_get(&entry->ref);
+ *dma_addr = entry->dma_addr;
}
- gvt_cache_add(info->vgpu, gfn, iova);
- return iova;
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+ return 0;
+
+err_unmap:
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+err_unlock:
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+ return ret;
+}
+
+static void __gvt_dma_release(struct kref *ref)
+{
+ struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
+
+ gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+ __gvt_cache_remove_entry(entry->vgpu, entry);
+}
+
+void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+{
+ struct kvmgt_guest_info *info;
+ struct gvt_dma *entry;
+
+ if (!handle_valid(handle))
+ return;
+
+ info = (struct kvmgt_guest_info *)handle;
+
+ mutex_lock(&info->vgpu->vdev.cache_lock);
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ if (entry)
+ kref_put(&entry->ref, __gvt_dma_release);
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1526,6 +1725,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
return PFN_DOWN(__pa(addr));
}
+static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+{
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+
+ if (!handle_valid(handle))
+ return false;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
+ return kvm_is_visible_gfn(kvm, gfn);
+
+}
+
struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
@@ -1533,11 +1747,17 @@ struct intel_gvt_mpt kvmgt_mpt = {
.detach_vgpu = kvmgt_detach_vgpu,
.inject_msi = kvmgt_inject_msi,
.from_virt_to_mfn = kvmgt_virt_to_pfn,
- .set_wp_page = kvmgt_write_protect_add,
- .unset_wp_page = kvmgt_write_protect_remove,
+ .enable_page_track = kvmgt_page_track_add,
+ .disable_page_track = kvmgt_page_track_remove,
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
+ .dma_map_guest_page = kvmgt_dma_map_guest_page,
+ .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+ .set_opregion = kvmgt_set_opregion,
+ .get_vfio_device = kvmgt_get_vfio_device,
+ .put_vfio_device = kvmgt_put_vfio_device,
+ .is_valid_gfn = kvmgt_is_valid_gfn,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 1e1310f..11b71b3 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
-static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
-{
- u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
- u64 aperture_sz = vgpu_aperture_sz(vgpu);
-
- return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
-}
-
-static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
- void *pdata, unsigned int size, bool is_read)
-{
- u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
- u64 offset = gpa - aperture_gpa;
-
- if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
- gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
- offset, size);
- return -EINVAL;
- }
-
- if (!vgpu->gm.aperture_va) {
- gvt_vgpu_err("BAR is not enabled\n");
- return -ENXIO;
- }
-
- if (is_read)
- memcpy(pdata, vgpu->gm.aperture_va + offset, size);
- else
- memcpy(vgpu->gm.aperture_va + offset, pdata, size);
- return 0;
-}
-
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
@@ -108,34 +76,14 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
- } else if (reg_is_gtt(gvt, offset) &&
- vgpu->gtt.ggtt_mm->virtual_page_table) {
+ } else if (reg_is_gtt(gvt, offset)) {
offset -= gvt->device_info.gtt_start_offset;
- pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+ pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
if (read)
memcpy(p_data, pt, bytes);
else
memcpy(pt, p_data, bytes);
- } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- /* Since we enter the failsafe mode early during guest boot,
- * guest may not have chance to set up its ppgtt table, so
- * there should not be any wp pages for guest. Keep the wp
- * related code here in case we need to handle it in furture.
- */
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- /* remove write protection to prevent furture traps */
- intel_vgpu_clean_guest_page(vgpu, gp);
- if (read)
- intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- else
- intel_gvt_hypervisor_write_gpa(vgpu, pa,
- p_data, bytes);
- }
}
mutex_unlock(&gvt->lock);
}
@@ -157,37 +105,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
-
if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
mutex_lock(&gvt->lock);
- if (vgpu_gpa_is_aperture(vgpu, pa)) {
- ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- if (ret) {
- gvt_vgpu_err("guest page read error %d, "
- "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- ret, gp->gfn, pa, *(u32 *)p_data,
- bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
- }
-
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
@@ -201,18 +124,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
- ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
+ ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
@@ -228,11 +149,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
+
err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -261,30 +184,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock);
- if (vgpu_gpa_is_aperture(vgpu, pa)) {
- ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
- struct intel_vgpu_guest_page *gp;
-
- gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
- if (gp) {
- ret = gp->handler(gp, pa, p_data, bytes);
- if (ret) {
- gvt_err("guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, "
- "var 0x%x, len %d\n",
- ret, gp->gfn, pa,
- *(u32 *)p_data, bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
- }
-
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
@@ -298,18 +197,16 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
- ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
+ ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
@@ -317,11 +214,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -342,10 +240,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index dbc04ad..71b6208 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -72,12 +72,9 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
-
-#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
- typeof(reg) __reg = reg; \
- u32 *offset = (u32 *)&__reg; \
- *offset; \
-})
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+ int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+ void *data);
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
new file mode 100644
index 0000000..a5bac83
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "trace.h"
+
+/**
+ * Defined in Intel Open Source PRM.
+ * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
+ */
+#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
+#define TRNULLDETCT _MMIO(0x4de8)
+#define TRINVTILEDETCT _MMIO(0x4dec)
+#define TRVADR _MMIO(0x4df0)
+#define TRTTE _MMIO(0x4df4)
+#define RING_EXCC(base) _MMIO((base) + 0x28)
+#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
+#define VF_GUARDBAND _MMIO(0x83a4)
+
+#define GEN9_MOCS_SIZE 64
+
+/* Raw offset is appened to each line for convenience. */
+static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+ {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+};
+
+static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+
+ {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+
+ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+};
+
+static struct {
+ bool initialized;
+ u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
+ u32 l3cc_table[GEN9_MOCS_SIZE / 2];
+} gen9_render_mocs;
+
+static void load_render_mocs(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int ring_id, i;
+
+ for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ offset.reg = regs[ring_id];
+ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+ gen9_render_mocs.control_table[ring_id][i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ }
+
+ offset.reg = 0xb020;
+ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+ gen9_render_mocs.l3cc_table[i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ gen9_render_mocs.initialized = true;
+}
+
+static int
+restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ u32 *cs;
+ int ret;
+ struct engine_mmio *mmio;
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ring_id = req->engine->id;
+ int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
+
+ if (count == 0)
+ return 0;
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(req, count * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ for (mmio = gvt->engine_mmio_list.mmio;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
+ if (mmio->ring_id != ring_id ||
+ !mmio->in_context)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(mmio->reg);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
+ (mmio->mask << 16);
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, ring_id);
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ unsigned int index;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
+
+ for (index = 0; index < GEN9_MOCS_SIZE; index++) {
+ *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
+ *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return 0;
+}
+
+static int
+restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ unsigned int index;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
+
+ for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
+ *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
+ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
+ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
+
+ }
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return 0;
+}
+
+/*
+ * Use lri command to initialize the mmio which is in context state image for
+ * inhibit context, it contains tracked engine mmio, render_mocs and
+ * render_mocs_l3cc.
+ */
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+ struct i915_request *req)
+{
+ int ret;
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ ret = restore_context_mmio_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+ /* no MOCS register in context except render engine */
+ if (req->engine->id != RCS)
+ goto out;
+
+ ret = restore_render_mocs_control_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+ ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
+ if (ret)
+ goto out;
+
+out:
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ return ret;
+}
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg_t(vgpu, reg) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
+
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+ return;
+
+ if (!pre && !gen9_render_mocs.initialized)
+ load_render_mocs(dev_priv);
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, offset);
+ else
+ old_v = gen9_render_mocs.control_table[ring_id][i];
+ if (next)
+ new_v = vgpu_vreg_t(next, offset);
+ else
+ new_v = gen9_render_mocs.control_table[ring_id][i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(offset, new_v);
+
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, l3_offset);
+ else
+ old_v = gen9_render_mocs.l3cc_table[i];
+ if (next)
+ new_v = vgpu_vreg_t(next, l3_offset);
+ else
+ new_v = gen9_render_mocs.l3cc_table[i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(l3_offset, new_v);
+
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+#define CTX_CONTEXT_CONTROL_VAL 0x03
+
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+{
+ u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+
+ return inhibit_mask ==
+ (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
+}
+
+/* Switch ring mmio values (context). */
+static void switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_vgpu_submission *s;
+ struct engine_mmio *mmio;
+ u32 old_v, new_v;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ switch_mocs(pre, next, ring_id);
+
+ for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
+ if (mmio->ring_id != ring_id)
+ continue;
+ /*
+ * No need to do save or restore of the mmio which is in context
+ * state image on kabylake, it's initialized by lri command and
+ * save or restore with context together.
+ */
+ if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+ continue;
+
+ // save
+ if (pre) {
+ vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ if (mmio->mask)
+ vgpu_vreg_t(pre, mmio->reg) &=
+ ~(mmio->mask << 16);
+ old_v = vgpu_vreg_t(pre, mmio->reg);
+ } else
+ old_v = mmio->value = I915_READ_FW(mmio->reg);
+
+ // restore
+ if (next) {
+ s = &next->submission;
+ /*
+ * No need to restore the mmio which is in context state
+ * image if it's not inhibit context, it will restore
+ * itself.
+ */
+ if (mmio->in_context &&
+ !is_inhibit_context(s->shadow_ctx, ring_id))
+ continue;
+
+ if (mmio->mask)
+ new_v = vgpu_vreg_t(next, mmio->reg) |
+ (mmio->mask << 16);
+ else
+ new_v = vgpu_vreg_t(next, mmio->reg);
+ } else {
+ if (mmio->in_context)
+ continue;
+ if (mmio->mask)
+ new_v = mmio->value | (mmio->mask << 16);
+ else
+ new_v = mmio->value;
+ }
+
+ I915_WRITE_FW(mmio->reg, new_v);
+
+ trace_render_mmio(pre ? pre->id : 0,
+ next ? next->id : 0,
+ "switch",
+ i915_mmio_reg_offset(mmio->reg),
+ old_v, new_v);
+ }
+
+ if (next)
+ handle_tlb_pending_event(next, ring_id);
+}
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next, int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (WARN_ON(!pre && !next))
+ return;
+
+ gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ switch_mmio(pre, next, ring_id);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
+ * @gvt: GVT device
+ *
+ */
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+{
+ struct engine_mmio *mmio;
+
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
+ else
+ gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+
+ for (mmio = gvt->engine_mmio_list.mmio;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
+ if (mmio->in_context)
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 91db1d3..0439eb8 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,8 +36,22 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+struct engine_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id);
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
+
+bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+
+int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
+ struct i915_request *req);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index f0e5487..32ffcd5 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -154,52 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
}
/**
- * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
+ * intel_gvt_hypervisor_enable_page_track - track a guest page
* @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
-static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_enable_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn)
{
- int ret;
-
- if (p->writeprotection)
- return 0;
-
- ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
- if (ret)
- return ret;
- p->writeprotection = true;
- atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
- return 0;
+ return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
}
/**
- * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
- * guest page
+ * intel_gvt_hypervisor_disable_page_track - untrack a guest page
* @vgpu: a vGPU
- * @p: intel_vgpu_guest_page
+ * @gfn: the gfn of guest
*
* Returns:
* Zero on success, negative error code if failed.
*/
-static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
- struct intel_vgpu_guest_page *p)
+static inline int intel_gvt_hypervisor_disable_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn)
{
- int ret;
-
- if (!p->writeprotection)
- return 0;
-
- ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
- if (ret)
- return ret;
- p->writeprotection = false;
- atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
- return 0;
+ return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
}
/**
@@ -249,6 +228,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
}
/**
+ * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
+ * @vgpu: a vGPU
+ * @gpfn: guest pfn
+ * @dma_addr: retrieve allocated dma addr
+ *
+ * Returns:
+ * 0 on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_dma_map_guest_page(
+ struct intel_vgpu *vgpu, unsigned long gfn,
+ dma_addr_t *dma_addr)
+{
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+ dma_addr);
+}
+
+/**
+ * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
+ * @vgpu: a vGPU
+ * @dma_addr: the mapped dma addr
+ */
+static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
+ struct intel_vgpu *vgpu, dma_addr_t dma_addr)
+{
+ intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
+}
+
+/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU
* @gfn: guest PFN
@@ -292,4 +299,66 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
+/**
+ * intel_gvt_hypervisor_set_opregion - Set opregion for guest
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->set_opregion)
+ return 0;
+
+ return intel_gvt_host.mpt->set_opregion(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->get_vfio_device)
+ return 0;
+
+ return intel_gvt_host.mpt->get_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->put_vfio_device)
+ return;
+
+ intel_gvt_host.mpt->put_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ *
+ * Returns:
+ * true on valid gfn, false on not.
+ */
+static inline bool intel_gvt_hypervisor_is_valid_gfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ if (!intel_gvt_host.mpt->is_valid_gfn)
+ return true;
+
+ return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 3117991..fa75a2e 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -25,36 +25,237 @@
#include "i915_drv.h"
#include "gvt.h"
-static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
+/*
+ * Note: Only for GVT-g virtual VBT generation, other usage must
+ * not do like this.
+ */
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_VBT (1<<3)
+
+/* device handle */
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_EFP1 0x04
+#define DEVICE_TYPE_EFP2 0x40
+#define DEVICE_TYPE_EFP3 0x20
+#define DEVICE_TYPE_EFP4 0x10
+
+#define DEV_SIZE 38
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u32 driver_model;
+ u32 pcon;
+ u8 dver[32];
+ u8 rsvd[124];
+} __packed;
+
+struct bdb_data_header {
+ u8 id;
+ u16 size; /* data size */
+} __packed;
+
+struct efp_child_device_config {
+ u16 handle;
+ u16 device_type;
+ u16 device_class;
+ u8 i2c_speed;
+ u8 dp_onboard_redriver; /* 158 */
+ u8 dp_ondock_redriver; /* 158 */
+ u8 hdmi_level_shifter_value:4; /* 169 */
+ u8 hdmi_max_data_rate:4; /* 204 */
+ u16 dtd_buf_ptr; /* 161 */
+ u8 edidless_efp:1; /* 161 */
+ u8 compression_enable:1; /* 198 */
+ u8 compression_method:1; /* 198 */
+ u8 ganged_edp:1; /* 202 */
+ u8 skip0:4;
+ u8 compression_structure_index:4; /* 198 */
+ u8 skip1:4;
+ u8 slave_port; /* 202 */
+ u8 skip2;
+ u8 dvo_port;
+ u8 i2c_pin; /* for add-in card */
+ u8 slave_addr; /* for add-in card */
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_config;
+ u8 efp_docked_port:1; /* 158 */
+ u8 lane_reversal:1; /* 184 */
+ u8 onboard_lspcon:1; /* 192 */
+ u8 iboost_enable:1; /* 196 */
+ u8 hpd_invert:1; /* BXT 196 */
+ u8 slip3:3;
+ u8 hdmi_compat:1;
+ u8 dp_compat:1;
+ u8 tmds_compat:1;
+ u8 skip4:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158 */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 skip5:2;
+ u8 dvo_wiring;
+ u8 mipi_bridge_type; /* 171 */
+ u16 device_class_ext;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195 */
+ u8 skip6:7;
+ u8 dp_usb_type_c_2x_gpio_index; /* 195 */
+ u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
+ u8 iboost_dp:4; /* 196 */
+ u8 iboost_hdmi:4; /* 196 */
+} __packed;
+
+struct vbt {
+ /* header->bdb_offset point to bdb_header offset */
+ struct vbt_header header;
+ struct bdb_header bdb_header;
+
+ struct bdb_data_header general_features_header;
+ struct bdb_general_features general_features;
+
+ struct bdb_data_header general_definitions_header;
+ struct bdb_general_definitions general_definitions;
+
+ struct efp_child_device_config child0;
+ struct efp_child_device_config child1;
+ struct efp_child_device_config child2;
+ struct efp_child_device_config child3;
+
+ struct bdb_data_header driver_features_header;
+ struct bdb_driver_features driver_features;
+};
+
+static void virt_vbt_generation(struct vbt *v)
{
- u8 *buf;
- int i;
+ int num_child;
+
+ memset(v, 0, sizeof(struct vbt));
+
+ v->header.signature[0] = '$';
+ v->header.signature[1] = 'V';
+ v->header.signature[2] = 'B';
+ v->header.signature[3] = 'T';
+
+ /* there's features depending on version! */
+ v->header.version = 155;
+ v->header.header_size = sizeof(v->header);
+ v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+ v->header.bdb_offset = offsetof(struct vbt, bdb_header);
+
+ strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
+ v->bdb_header.version = 186; /* child_dev_size = 38 */
+ v->bdb_header.header_size = sizeof(v->bdb_header);
+
+ v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
+ - sizeof(struct bdb_header);
+
+ /* general features */
+ v->general_features_header.id = BDB_GENERAL_FEATURES;
+ v->general_features_header.size = sizeof(struct bdb_general_features);
+ v->general_features.int_crt_support = 0;
+ v->general_features.int_tv_support = 0;
+
+ /* child device */
+ num_child = 4; /* each port has one child */
+ v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
+ /* size will include child devices */
+ v->general_definitions_header.size =
+ sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
+ v->general_definitions.child_dev_size = DEV_SIZE;
+
+ /* portA */
+ v->child0.handle = DEVICE_TYPE_EFP1;
+ v->child0.device_type = DEVICE_TYPE_DP;
+ v->child0.dvo_port = DVO_PORT_DPA;
+ v->child0.aux_channel = DP_AUX_A;
+ v->child0.dp_compat = true;
+ v->child0.integrated_encoder = true;
+
+ /* portB */
+ v->child1.handle = DEVICE_TYPE_EFP2;
+ v->child1.device_type = DEVICE_TYPE_DP;
+ v->child1.dvo_port = DVO_PORT_DPB;
+ v->child1.aux_channel = DP_AUX_B;
+ v->child1.dp_compat = true;
+ v->child1.integrated_encoder = true;
+
+ /* portC */
+ v->child2.handle = DEVICE_TYPE_EFP3;
+ v->child2.device_type = DEVICE_TYPE_DP;
+ v->child2.dvo_port = DVO_PORT_DPC;
+ v->child2.aux_channel = DP_AUX_C;
+ v->child2.dp_compat = true;
+ v->child2.integrated_encoder = true;
+
+ /* portD */
+ v->child3.handle = DEVICE_TYPE_EFP4;
+ v->child3.device_type = DEVICE_TYPE_DP;
+ v->child3.dvo_port = DVO_PORT_DPD;
+ v->child3.aux_channel = DP_AUX_D;
+ v->child3.dp_compat = true;
+ v->child3.integrated_encoder = true;
+
+ /* driver features */
+ v->driver_features_header.id = BDB_DRIVER_FEATURES;
+ v->driver_features_header.size = sizeof(struct bdb_driver_features);
+ v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
+}
- if (WARN((vgpu_opregion(vgpu)->va),
- "vgpu%d: opregion has been initialized already.\n",
- vgpu->id))
- return -EINVAL;
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
+{
+ u8 *buf;
+ struct opregion_header *header;
+ struct vbt v;
+ const char opregion_signature[16] = OPREGION_SIGNATURE;
+ gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
__GFP_ZERO,
get_order(INTEL_GVT_OPREGION_SIZE));
-
- if (!vgpu_opregion(vgpu)->va)
+ if (!vgpu_opregion(vgpu)->va) {
+ gvt_err("fail to get memory for vgpu virt opregion\n");
return -ENOMEM;
+ }
- memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
- INTEL_GVT_OPREGION_SIZE);
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+ /* emulated opregion with VBT mailbox only */
+ buf = (u8 *)vgpu_opregion(vgpu)->va;
+ header = (struct opregion_header *)buf;
+ memcpy(header->signature, opregion_signature,
+ sizeof(opregion_signature));
+ header->size = 0x8;
+ header->opregion_ver = 0x02000000;
+ header->mboxes = MBOX_VBT;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
- buf = (u8 *)vgpu_opregion(vgpu)->va;
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
+ /* emulated vbt from virt vbt generation */
+ virt_vbt_generation(&v);
+ memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
+
return 0;
}
@@ -79,93 +280,80 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
return ret;
}
}
- return 0;
-}
-/**
- * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
- * @vgpu: a vGPU
- *
- */
-void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
-{
- gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
-
- if (!vgpu_opregion(vgpu)->va)
- return;
-
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- map_vgpu_opregion(vgpu, false);
- free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- get_order(INTEL_GVT_OPREGION_SIZE));
+ vgpu_opregion(vgpu)->mapped = map;
- vgpu_opregion(vgpu)->va = NULL;
- }
+ return 0;
}
/**
- * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
+ *
* @vgpu: a vGPU
* @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
*/
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
- int ret;
- gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+ int i, ret = 0;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- gvt_dbg_core("emulate opregion from kernel\n");
+ gvt_dbg_core("emulate opregion from kernel\n");
- ret = init_vgpu_opregion(vgpu, gpa);
- if (ret)
- return ret;
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_KVM:
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+ break;
+ case INTEL_GVT_HYPERVISOR_XEN:
+ /**
+ * Wins guest on Xengt will write this register twice: xen
+ * hvmloader and windows graphic driver.
+ */
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
ret = map_vgpu_opregion(vgpu, true);
- if (ret)
- return ret;
+ break;
+ default:
+ ret = -EINVAL;
+ gvt_vgpu_err("not supported hypervisor\n");
}
- return 0;
-}
-
-/**
- * intel_gvt_clean_opregion - clean host opergion related stuffs
- * @gvt: a GVT device
- *
- */
-void intel_gvt_clean_opregion(struct intel_gvt *gvt)
-{
- memunmap(gvt->opregion.opregion_va);
- gvt->opregion.opregion_va = NULL;
+ return ret;
}
/**
- * intel_gvt_init_opregion - initialize host opergion related stuffs
- * @gvt: a GVT device
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * @vgpu: a vGPU
*
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_gvt_init_opregion(struct intel_gvt *gvt)
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- gvt_dbg_core("init host opregion\n");
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
- pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
- &gvt->opregion.opregion_pa);
+ if (!vgpu_opregion(vgpu)->va)
+ return;
- gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
- INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
- if (!gvt->opregion.opregion_va) {
- gvt_err("fail to map host opregion\n");
- return -EFAULT;
+ if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+ } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+ /* Guest opregion is released by VFIO */
}
- return 0;
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ get_order(INTEL_GVT_OPREGION_SIZE));
+
+ vgpu_opregion(vgpu)->va = NULL;
+
}
+
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \
@@ -281,11 +469,45 @@ static bool querying_capabilities(u32 scic)
*/
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{
- u32 *scic, *parm;
+ u32 scic, parm;
u32 func, subfunc;
+ u64 scic_pa = 0, parm_pa = 0;
+ int ret;
+
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_XEN:
+ scic = *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_SCIC);
+ parm = *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_PARM);
+ break;
+ case INTEL_GVT_HYPERVISOR_KVM:
+ scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_SCIC;
+ parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_PARM;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
+ &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
- scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
- parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
+ &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ break;
+ default:
+ gvt_vgpu_err("not supported hypervisor\n");
+ return -EINVAL;
+ }
if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_vgpu_err("requesting SMI service\n");
@@ -298,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
return 0;
}
- func = GVT_OPREGION_FUNC(*scic);
- subfunc = GVT_OPREGION_SUBFUNC(*scic);
- if (!querying_capabilities(*scic)) {
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+ if (!querying_capabilities(scic)) {
gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
opregion_func_name(func),
@@ -309,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
* emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause"
*/
- *scic &= ~OPREGION_SCIC_EXIT_MASK;
- return 0;
+ scic &= ~OPREGION_SCIC_EXIT_MASK;
+ goto out;
+ }
+
+ scic = 0;
+ parm = 0;
+
+out:
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_XEN:
+ *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_SCIC) = scic;
+ *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_PARM) = parm;
+ break;
+ case INTEL_GVT_HYPERVISOR_KVM:
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
+ &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
+ &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ break;
+ default:
+ gvt_vgpu_err("not supported hypervisor\n");
+ return -EINVAL;
}
- *scic = 0;
- *parm = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
new file mode 100644
index 0000000..53e2bd7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "i915_drv.h"
+#include "gvt.h"
+
+/**
+ * intel_vgpu_find_page_track - find page track rcord of guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * A pointer to struct intel_vgpu_page_track if found, else NULL returned.
+ */
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ return radix_tree_lookup(&vgpu->page_track_tree, gfn);
+}
+
+/**
+ * intel_vgpu_register_page_track - register a guest page to be tacked
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
+ gvt_page_track_handler_t handler, void *priv)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (track)
+ return -EEXIST;
+
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
+
+ track->handler = handler;
+ track->priv_data = priv;
+
+ ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
+ if (ret) {
+ kfree(track);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_unregister_page_track - unregister the tracked guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ */
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+
+ track = radix_tree_delete(&vgpu->page_track_tree, gfn);
+ if (track) {
+ if (track->tracked)
+ intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ kfree(track);
+ }
+}
+
+/**
+ * intel_vgpu_enable_page_track - set write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (!track)
+ return -ENXIO;
+
+ if (track->tracked)
+ return 0;
+
+ ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
+ if (ret)
+ return ret;
+ track->tracked = true;
+ return 0;
+}
+
+/**
+ * intel_vgpu_enable_page_track - cancel write-protection on guest page
+ * @vgpu: a vGPU
+ * @gfn: the gfn of guest page
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct intel_vgpu_page_track *track;
+ int ret;
+
+ track = intel_vgpu_find_page_track(vgpu, gfn);
+ if (!track)
+ return -ENXIO;
+
+ if (!track->tracked)
+ return 0;
+
+ ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ if (ret)
+ return ret;
+ track->tracked = false;
+ return 0;
+}
+
+/**
+ * intel_vgpu_page_track_handler - called when write to write-protected page
+ * @vgpu: a vGPU
+ * @gpa: the gpa of this write
+ * @data: the writed data
+ * @bytes: the length of this write
+ *
+ * Returns:
+ * zero on success, negative error code if failed.
+ */
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+ void *data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_page_track *page_track;
+ int ret = 0;
+
+ mutex_lock(&gvt->lock);
+
+ page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
+ if (!page_track) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (unlikely(vgpu->failsafe)) {
+ /* Remove write protection to prevent furture traps. */
+ intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
+ } else {
+ ret = page_track->handler(page_track, gpa, data, bytes);
+ if (ret)
+ gvt_err("guest page write error, gpa %llx\n", gpa);
+ }
+
+out:
+ mutex_unlock(&gvt->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
new file mode 100644
index 0000000..fa607a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _GVT_PAGE_TRACK_H_
+#define _GVT_PAGE_TRACK_H_
+
+struct intel_vgpu_page_track;
+
+typedef int (*gvt_page_track_handler_t)(
+ struct intel_vgpu_page_track *page_track,
+ u64 gpa, void *data, int bytes);
+
+/* Track record for a write-protected guest page. */
+struct intel_vgpu_page_track {
+ gvt_page_track_handler_t handler;
+ bool tracked;
+ void *priv_data;
+};
+
+struct intel_vgpu_page_track *intel_vgpu_find_page_track(
+ struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn, gvt_page_track_handler_t handler,
+ void *priv);
+void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
+ unsigned long gfn);
+
+int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
+
+int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
+ void *data, unsigned int bytes);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 7d01c77..d4f7ce6 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -51,6 +51,9 @@
#define INTEL_GVT_OPREGION_PAGES 2
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
+#define INTEL_GVT_OPREGION_VBT_OFFSET 0x400
+#define INTEL_GVT_OPREGION_VBT_SIZE \
+ (INTEL_GVT_OPREGION_SIZE - INTEL_GVT_OPREGION_VBT_OFFSET)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
@@ -71,6 +74,7 @@
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
-#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
+#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
+ I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
deleted file mode 100644
index 6d066cf..0000000
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Kevin Tian <kevin.tian@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- * Changbin Du <changbin.du@intel.com>
- * Zhenyu Wang <zhenyuw@linux.intel.com>
- * Tina Zhang <tina.zhang@intel.com>
- * Bing Niu <bing.niu@intel.com>
- *
- */
-
-#include "i915_drv.h"
-#include "gvt.h"
-#include "trace.h"
-
-struct render_mmio {
- int ring_id;
- i915_reg_t reg;
- u32 mask;
- bool in_context;
- u32 value;
-};
-
-static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-};
-
-static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {RCS, _MMIO(0x40e0), 0, false},
- {RCS, _MMIO(0x40e4), 0, false},
- {RCS, _MMIO(0x2580), 0xffff, true},
- {RCS, _MMIO(0x7014), 0xffff, true},
- {RCS, _MMIO(0x20ec), 0xffff, false},
- {RCS, _MMIO(0xb118), 0, false},
- {RCS, _MMIO(0xe100), 0xffff, true},
- {RCS, _MMIO(0xe180), 0xffff, true},
- {RCS, _MMIO(0xe184), 0xffff, true},
- {RCS, _MMIO(0xe188), 0xffff, true},
- {RCS, _MMIO(0xe194), 0xffff, true},
- {RCS, _MMIO(0x4de0), 0, false},
- {RCS, _MMIO(0x4de4), 0, false},
- {RCS, _MMIO(0x4de8), 0, false},
- {RCS, _MMIO(0x4dec), 0, false},
- {RCS, _MMIO(0x4df0), 0, false},
- {RCS, _MMIO(0x4df4), 0, false},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-
- {VCS2, _MMIO(0x1c028), 0xffff, false},
-
- {VECS, _MMIO(0x1a028), 0xffff, false},
-
- {RCS, _MMIO(0x7304), 0xffff, true},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x940c), 0x0, false},
- {RCS, _MMIO(0x4ab8), 0x0, false},
-
- {RCS, _MMIO(0x4ab0), 0x0, false},
- {RCS, _MMIO(0x20d4), 0x0, false},
-
- {RCS, _MMIO(0xb004), 0x0, false},
- {RCS, _MMIO(0x20a0), 0x0, false},
- {RCS, _MMIO(0x20e4), 0xffff, false},
-};
-
-static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
-static u32 gen9_render_mocs_L3[32];
-
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum forcewake_domains fw;
- i915_reg_t reg;
- u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
- };
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
- return;
-
- reg = _MMIO(regs[ring_id]);
-
- /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
- * we need to put a forcewake when invalidating RCS TLB caches,
- * otherwise device can go to RC6 state and interrupt invalidation
- * process
- */
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
- FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- fw |= FORCEWAKE_RENDER;
-
- intel_uncore_forcewake_get(dev_priv, fw);
-
- I915_WRITE_FW(reg, 0x1);
-
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
- else
- vgpu_vreg(vgpu, regs[ring_id]) = 0;
-
- intel_uncore_forcewake_put(dev_priv, fw);
-
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
-}
-
-static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
- I915_WRITE(offset, vgpu_vreg(vgpu, offset));
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
- l3_offset.reg += 4;
- }
- }
-}
-
-static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
- I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
- l3_offset.reg += 4;
- }
- }
-}
-
-#define CTX_CONTEXT_CONTROL_VAL 0x03
-
-/* Switch ring mmio values (context) from host to a vgpu. */
-static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
- u32 v;
- int i, array_size;
- u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
- u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
- u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- i915_reg_t last_reg = _MMIO(0);
-
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- load_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- mmio->value = I915_READ_FW(mmio->reg);
-
- /*
- * if it is an inhibit context, load in_context mmio
- * into HW by mmio write. If it is not, skip this mmio
- * write.
- */
- if (mmio->in_context &&
- ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
- i915_modparams.enable_execlists)
- continue;
-
- if (mmio->mask)
- v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
- else
- v = vgpu_vreg(vgpu, mmio->reg);
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "load",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-
- handle_tlb_pending_event(vgpu, ring_id);
-}
-
-/* Switch ring mmio values (context) from vgpu to host. */
-static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
- i915_reg_t last_reg = _MMIO(0);
- u32 v;
- int i, array_size;
-
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- restore_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
-
- if (mmio->mask) {
- vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
- v = mmio->value | (mmio->mask << 16);
- } else
- v = mmio->value;
-
- if (mmio->in_context)
- continue;
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "restore",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-}
-
-/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
- * @pre: the last vGPU that own the engine
- * @next: the vGPU to switch to
- * @ring_id: specify the engine
- *
- * If pre is null indicates that host own the engine. If next is null
- * indicates that we are switching to host workload.
- */
-void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
-{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
- return;
-
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
- pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
-
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
- /**
- * We are using raw mmio access wrapper to improve the
- * performace for batch mmio read/write, so we need
- * handle forcewake mannually.
- */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- /**
- * TODO: Optimize for vGPU to vGPU switch by merging
- * switch_mmio_to_host() and switch_mmio_to_vgpu().
- */
- if (pre)
- switch_mmio_to_host(pre, ring_id);
-
- if (next)
- switch_mmio_to_vgpu(next, ring_id);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 03532df..75b7bc7 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
+ bool active;
ktime_t sched_in_time;
ktime_t sched_out_time;
@@ -102,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
list_for_each(pos, &sched_data->lru_runq_head) {
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
- fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
- vgpu_data->sched_ctl.weight /
- total_weight;
+ fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
+ total_weight) * vgpu_data->sched_ctl.weight;
vgpu_data->allocated_ts = fair_timeslice;
vgpu_data->left_ts = vgpu_data->allocated_ts;
@@ -308,8 +308,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
+
+ /* this vgpu id has been removed */
+ if (idr_is_empty(&gvt->vgpu_idr))
+ hrtimer_cancel(&sched_data->timer);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -325,6 +332,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data->period), HRTIMER_MODE_ABS);
+ vgpu_data->active = true;
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
@@ -332,6 +340,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list);
+ vgpu_data->active = false;
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
@@ -367,9 +376,17 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
- gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+ if (!vgpu_data->active) {
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+ }
+}
+
+void intel_gvt_kick_schedule(struct intel_gvt *gvt)
+{
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
}
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -377,6 +394,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
int ring_id;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+
+ if (!vgpu_data->active)
+ return;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index ba00a5f..7b59e3e 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+void intel_gvt_kick_schedule(struct intel_gvt *gvt);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 69f8f0d..638abe8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,12 +52,83 @@ static void set_context_pdp_root_pointer(
pdp_pair[i].val = pdp[7 - i];
}
+static void update_shadow_pdps(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ if (WARN_ON(!workload->shadow_mm))
+ return;
+
+ if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
+ return;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+ set_context_pdp_root_pointer(shadow_ring_context,
+ (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+ kunmap(page);
+}
+
+/*
+ * when populating shadow ctx from guest, we should not overrride oa related
+ * registers, so that they will not be overlapped by guest oa configs. Thus
+ * made it possible to capture oa data from host for both host and guests.
+ */
+static void sr_oa_regs(struct intel_vgpu_workload *workload,
+ u32 *reg_state, bool save)
+{
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ int i = 0;
+ u32 flex_mmio[] = {
+ i915_mmio_reg_offset(EU_PERF_CNTL0),
+ i915_mmio_reg_offset(EU_PERF_CNTL1),
+ i915_mmio_reg_offset(EU_PERF_CNTL2),
+ i915_mmio_reg_offset(EU_PERF_CNTL3),
+ i915_mmio_reg_offset(EU_PERF_CNTL4),
+ i915_mmio_reg_offset(EU_PERF_CNTL5),
+ i915_mmio_reg_offset(EU_PERF_CNTL6),
+ };
+
+ if (!workload || !reg_state || workload->ring_id != RCS)
+ return;
+
+ if (save) {
+ workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
+
+ for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+ u32 state_offset = ctx_flexeu0 + i * 2;
+
+ workload->flex_mmio[i] = reg_state[state_offset + 1];
+ }
+ } else {
+ reg_state[ctx_oactxctrl] =
+ i915_mmio_reg_offset(GEN8_OACTXCONTROL);
+ reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
+
+ for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+ u32 state_offset = ctx_flexeu0 + i * 2;
+ u32 mmio = flex_mmio[i];
+
+ reg_state[state_offset] = mmio;
+ reg_state[state_offset + 1] = workload->flex_mmio[i];
+ }
+ }
+}
+
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -81,16 +152,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EINVAL;
+ return -EFAULT;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@@ -98,11 +169,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
+ sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+#define COPY_REG_MASKED(name) {\
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val),\
+ &shadow_ring_context->name.val, 4);\
+ shadow_ring_context->name.val |= 0xffff << 16;\
+ }
- COPY_REG(ctx_ctrl);
+ COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
@@ -111,22 +189,21 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
-
- set_context_pdp_root_pointer(shadow_ring_context,
- workload->shadow_mm->shadow_page_table);
+#undef COPY_REG_MASKED
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page);
return 0;
}
-static inline bool is_gvt_request(struct drm_i915_gem_request *req)
+static inline bool is_gvt_request(struct i915_request *req)
{
return i915_gem_context_force_single_submission(req->ctx);
}
@@ -148,7 +225,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct i915_request *req = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -225,6 +302,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
void *shadow_ring_buffer_va;
u32 *cs;
+ struct i915_request *req = workload->req;
+
+ if (IS_KABYLAKE(req->i915) &&
+ is_inhibit_context(req->ctx, req->engine->id))
+ intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
@@ -248,7 +330,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
@@ -267,11 +349,12 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -284,7 +367,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
@@ -327,17 +410,18 @@ err_scan:
return ret;
}
-int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
@@ -346,7 +430,7 @@ int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
- workload->req = i915_gem_request_get(rq);
+ workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
@@ -358,11 +442,216 @@ err_unpin:
return ret;
}
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
+
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_gvt *gvt = workload->vgpu->gvt;
+ const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_vgpu_shadow_bb *bb;
+ int ret;
+
+ list_for_each_entry(bb, &workload->shadow_bb, list) {
+ bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
+ if (IS_ERR(bb->vma)) {
+ ret = PTR_ERR(bb->vma);
+ goto err;
+ }
+
+ /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
+ * is only updated into ring_scan_buffer, not real ring address
+ * allocated in later copy_workload_to_ring_buffer. pls be noted
+ * shadow_ring_buffer_va is now pointed to real ring buffer va
+ * in copy_workload_to_ring_buffer.
+ */
+
+ if (bb->bb_offset)
+ bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ + bb->bb_offset;
+
+ /* relocate shadow batch buffer */
+ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
+ if (gmadr_bytes == 8)
+ bb->bb_start_cmd_va[2] = 0;
+
+ /* No one is going to touch shadow bb from now on. */
+ if (bb->clflush & CLFLUSH_AFTER) {
+ drm_clflush_virt_range(bb->va, bb->obj->base.size);
+ bb->clflush &= ~CLFLUSH_AFTER;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
+ if (ret)
+ goto err;
+
+ i915_gem_obj_finish_shmem_access(bb->obj);
+ bb->accessing = false;
+
+ i915_vma_move_to_active(bb->vma, workload->req, 0);
+ }
+ return 0;
+err:
+ release_shadow_batch_buffer(workload);
+ return ret;
+}
+
+static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap_atomic(page);
+
+ shadow_ring_context->bb_per_ctx_ptr.val =
+ (shadow_ring_context->bb_per_ctx_ptr.val &
+ (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
+ shadow_ring_context->rcs_indirect_ctx.val =
+ (shadow_ring_context->rcs_indirect_ctx.val &
+ (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
+
+ kunmap_atomic(shadow_ring_context);
+ return 0;
+}
+
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ struct i915_vma *vma;
+ unsigned char *per_ctx_va =
+ (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
+ wa_ctx->indirect_ctx.size;
+
+ if (wa_ctx->indirect_ctx.size == 0)
+ return 0;
+
+ vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
+ 0, CACHELINE_BYTES, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
+
+ wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
+ memset(per_ctx_va, 0, CACHELINE_BYTES);
+
+ update_wa_ctx_2_shadow_ctx(wa_ctx);
+ return 0;
+}
+
+static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_shadow_bb *bb, *pos;
+
+ if (list_empty(&workload->shadow_bb))
+ return;
+
+ bb = list_first_entry(&workload->shadow_bb,
+ struct intel_vgpu_shadow_bb, list);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
+ if (bb->obj) {
+ if (bb->accessing)
+ i915_gem_obj_finish_shmem_access(bb->obj);
+
+ if (bb->va && !IS_ERR(bb->va))
+ i915_gem_object_unpin_map(bb->obj);
+
+ if (bb->vma && !IS_ERR(bb->vma)) {
+ i915_vma_unpin(bb->vma);
+ i915_vma_close(bb->vma);
+ }
+ __i915_gem_object_release_unless_active(bb->obj);
+ }
+ list_del(&bb->list);
+ kfree(bb);
+ }
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static int prepare_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ret = 0;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ return ret;
+ }
+
+ update_shadow_pdps(workload);
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (workload->prepare) {
+ ret = workload->prepare(workload);
+ if (ret)
+ goto err_shadow_wa_ctx;
+ }
+
+ return 0;
+err_shadow_wa_ctx:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+ return ret;
+}
+
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
@@ -375,12 +664,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
- if (workload->prepare) {
- ret = workload->prepare(workload);
- if (ret) {
- engine->context_unpin(engine, shadow_ctx);
- goto out;
- }
+ ret = prepare_workload(workload);
+ if (ret) {
+ engine->context_unpin(engine, shadow_ctx);
+ goto out;
}
out:
@@ -390,7 +677,7 @@ out:
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request(workload->req);
+ i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -448,7 +735,7 @@ static struct intel_vgpu_workload *pick_next_workload(
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
- atomic_inc(&workload->vgpu->running_workload_num);
+ atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
@@ -458,8 +745,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -483,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
- GTT_PAGE_SHIFT));
+ I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context descriptor\n");
return;
@@ -492,7 +780,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- GTT_PAGE_SIZE);
+ I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@@ -517,23 +805,41 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
- GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
+ I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
}
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ /* free the unsubmited workloads in the queues. */
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ list_for_each_entry_safe(pos, n,
+ &s->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ intel_vgpu_destroy_workload(pos);
+ }
+ clear_bit(engine->id, s->shadow_ctx_desc_updated);
+ }
+}
+
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct intel_vgpu_workload *workload;
- struct intel_vgpu *vgpu;
+ struct intel_vgpu_workload *workload =
+ scheduler->current_workload[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
int event;
mutex_lock(&gvt->lock);
- workload = scheduler->current_workload[ring_id];
- vgpu = workload->vgpu;
-
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
@@ -558,7 +864,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- i915_gem_request_put(fetch_and_zero(&workload->req));
+ i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
@@ -570,7 +876,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
- engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ engine->context_unpin(engine, s->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -580,9 +886,32 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
list_del_init(&workload->list);
+
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
+
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ }
+
workload->complete(workload);
- atomic_dec(&vgpu->running_workload_num);
+ atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
@@ -652,7 +981,7 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -665,20 +994,23 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
+ if (ret && (vgpu_is_vm_unhealthy(ret)))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- if (atomic_read(&vgpu->running_workload_num)) {
+ if (atomic_read(&s->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
- !atomic_read(&vgpu->running_workload_num));
+ !atomic_read(&s->running_workload_num));
}
}
@@ -743,26 +1075,367 @@ err:
return ret;
}
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_clean_submission - free submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
- i915_gem_context_put(vgpu->shadow_ctx);
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ i915_gem_context_put(s->shadow_ctx);
+ kmem_cache_destroy(s->workloads);
}
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
+
+/**
+ * intel_vgpu_reset_submission - reset submission-related resource for vGPU
+ * @vgpu: a vGPU
+ * @engine_mask: engines expected to be reset
+ *
+ * This function is called when a vGPU is being destroyed.
+ *
+ */
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+
+ if (!s->active)
+ return;
+
+ clean_workloads(vgpu, engine_mask);
+ s->ops->reset(vgpu, engine_mask);
+}
+
+/**
+ * intel_vgpu_setup_submission - setup submission-related resource for vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is called when a vGPU is being created.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- atomic_set(&vgpu->running_workload_num, 0);
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+ int ret;
- vgpu->shadow_ctx = i915_gem_context_create_gvt(
+ s->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(vgpu->shadow_ctx))
- return PTR_ERR(vgpu->shadow_ctx);
+ if (IS_ERR(s->shadow_ctx))
+ return PTR_ERR(s->shadow_ctx);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
+ s->shadow_ctx->priority = INT_MAX;
+
+ bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
+ s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
+ sizeof(struct intel_vgpu_workload), 0,
+ SLAB_HWCACHE_ALIGN,
+ offsetof(struct intel_vgpu_workload, rb_tail),
+ sizeof_field(struct intel_vgpu_workload, rb_tail),
+ NULL);
- if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption)
- vgpu->shadow_ctx->priority = INT_MAX;
+ if (!s->workloads) {
+ ret = -ENOMEM;
+ goto out_shadow_ctx;
+ }
- vgpu->shadow_ctx->engine[RCS].initialised = true;
+ for_each_engine(engine, vgpu->gvt->dev_priv, i)
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
- bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ atomic_set(&s->running_workload_num, 0);
+ bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
return 0;
+
+out_shadow_ctx:
+ i915_gem_context_put(s->shadow_ctx);
+ return ret;
+}
+
+/**
+ * intel_vgpu_select_submission_ops - select virtual submission interface
+ * @vgpu: a vGPU
+ * @interface: expected vGPU virtual submission interface
+ *
+ * This function is called when guest configures submission interface.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned long engine_mask,
+ unsigned int interface)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ const struct intel_vgpu_submission_ops *ops[] = {
+ [INTEL_VGPU_EXECLIST_SUBMISSION] =
+ &intel_vgpu_execlist_submission_ops,
+ };
+ int ret;
+
+ if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ return -EINVAL;
+
+ if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ return -EINVAL;
+
+ if (s->active)
+ s->ops->clean(vgpu, engine_mask);
+
+ if (interface == 0) {
+ s->ops = NULL;
+ s->virtual_submission_interface = 0;
+ s->active = false;
+ gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
+ return 0;
+ }
+
+ ret = ops[interface]->init(vgpu, engine_mask);
+ if (ret)
+ return ret;
+
+ s->ops = ops[interface];
+ s->virtual_submission_interface = interface;
+ s->active = true;
+
+ gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+ vgpu->id, s->ops->name);
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_destroy_workload - destroy a vGPU workload
+ * @vgpu: a vGPU
+ *
+ * This function is called when destroy a vGPU workload.
+ *
+ */
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu_submission *s = &workload->vgpu->submission;
+
+ if (workload->shadow_mm)
+ intel_vgpu_mm_put(workload->shadow_mm);
+
+ kmem_cache_free(s->workloads, workload);
+}
+
+static struct intel_vgpu_workload *
+alloc_workload(struct intel_vgpu *vgpu)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_vgpu_workload *workload;
+
+ workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
+ if (!workload)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&workload->list);
+ INIT_LIST_HEAD(&workload->shadow_bb);
+
+ init_waitqueue_head(&workload->shadow_ctx_status_wq);
+ atomic_set(&workload->shadow_ctx_active, 0);
+
+ workload->status = -EINPROGRESS;
+ workload->shadowed = false;
+ workload->vgpu = vgpu;
+
+ return workload;
+}
+
+#define RING_CTX_OFF(x) \
+ offsetof(struct execlist_ring_context, x)
+
+static void read_guest_pdps(struct intel_vgpu *vgpu,
+ u64 ring_context_gpa, u32 pdp[8])
+{
+ u64 gpa;
+ int i;
+
+ gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+
+ for (i = 0; i < 8; i++)
+ intel_gvt_hypervisor_read_gpa(vgpu,
+ gpa + i * 8, &pdp[7 - i], 4);
+}
+
+static int prepare_mm(struct intel_vgpu_workload *workload)
+{
+ struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
+ struct intel_vgpu_mm *mm;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ intel_gvt_gtt_type_t root_entry_type;
+ u64 pdps[GVT_RING_CTX_NR_PDPS];
+
+ switch (desc->addressing_mode) {
+ case 1: /* legacy 32-bit */
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ break;
+ case 3: /* legacy 64-bit */
+ root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ break;
+ default:
+ gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
+ return -EINVAL;
+ }
+
+ read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
+
+ mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
+
+ workload->shadow_mm = mm;
+ return 0;
+}
+
+#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
+ ((a)->lrca == (b)->lrca))
+
+#define get_last_workload(q) \
+ (list_empty(q) ? NULL : container_of(q->prev, \
+ struct intel_vgpu_workload, list))
+/**
+ * intel_vgpu_create_workload - create a vGPU workload
+ * @vgpu: a vGPU
+ * @desc: a guest context descriptor
+ *
+ * This function is called when creating a vGPU workload.
+ *
+ * Returns:
+ * struct intel_vgpu_workload * on success, negative error code in
+ * pointer if failed.
+ *
+ */
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u64 ring_context_gpa;
+ u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ int ret;
+
+ ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
+ if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
+ return ERR_PTR(-EINVAL);
+ }
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_header.val), &head, 4);
+
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ring_tail.val), &tail, 4);
+
+ head &= RB_HEAD_OFF_MASK;
+ tail &= RB_TAIL_OFF_MASK;
+
+ if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ }
+
+ gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+
+ /* record some ring buffer register values for scan and shadow */
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_start.val), &start, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+
+ workload = alloc_workload(vgpu);
+ if (IS_ERR(workload))
+ return workload;
+
+ workload->ring_id = ring_id;
+ workload->ctx_desc = *desc;
+ workload->ring_context_gpa = ring_context_gpa;
+ workload->rb_head = head;
+ workload->rb_tail = tail;
+ workload->rb_start = start;
+ workload->rb_ctl = ctl;
+
+ if (ring_id == RCS) {
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
+ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
+
+ workload->wa_ctx.indirect_ctx.guest_gma =
+ indirect_ctx & INDIRECT_CTX_ADDR_MASK;
+ workload->wa_ctx.indirect_ctx.size =
+ (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
+ CACHELINE_BYTES;
+ workload->wa_ctx.per_ctx.guest_gma =
+ per_ctx & PER_CTX_ADDR_MASK;
+ workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ }
+
+ gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
+ workload, ring_id, head, tail, start, ctl);
+
+ ret = prepare_mm(workload);
+ if (ret) {
+ kmem_cache_free(s->workloads, workload);
+ return ERR_PTR(ret);
+ }
+
+ /* Only scan and shadow the first workload in the queue
+ * as there is only one pre-allocated buf-obj for shadow.
+ */
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
+ intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ if (ret && (vgpu_is_vm_unhealthy(ret))) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ intel_vgpu_destroy_workload(workload);
+ return ERR_PTR(ret);
+ }
+
+ return workload;
+}
+
+/**
+ * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * @workload: the workload to queue in
+ */
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
+{
+ list_add_tail(&workload->list,
+ workload_q_head(workload->vgpu, workload->ring_id));
+ intel_gvt_kick_schedule(workload->vgpu->gvt);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index b9f8722..486ed57 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
int ring_id;
- struct drm_i915_gem_request *req;
+ struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadowed;
@@ -110,26 +110,27 @@ struct intel_vgpu_workload {
/* shadow batch buffer */
struct list_head shadow_bb;
struct intel_shadow_wa_ctx wa_ctx;
+
+ /* oa registers */
+ u32 oactxctrl;
+ u32 flex_mmio[7];
};
-/* Intel shadow batch buffer is a i915 gem object */
-struct intel_shadow_bb_entry {
+struct intel_vgpu_shadow_bb {
struct list_head list;
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
void *va;
- unsigned long len;
u32 *bb_start_cmd_va;
+ unsigned int clflush;
+ bool accessing;
+ unsigned long bb_offset;
};
#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->workload_q_head[ring_id]))
+ (&(vgpu->submission.workload_q_head[ring_id]))
-#define queue_workload(workload) do { \
- list_add_tail(&workload->list, \
- workload_q_head(workload->vgpu, workload->ring_id)); \
- wake_up(&workload->vgpu->gvt-> \
- scheduler.waitq[workload->ring_id]); \
-} while (0)
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
@@ -137,12 +138,24 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
-int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
+int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
+
+void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
+void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
+
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned long engine_mask,
+ unsigned int interface);
-void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+extern const struct intel_vgpu_submission_ops
+intel_vgpu_execlist_submission_ops;
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+struct intel_vgpu_workload *
+intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ struct execlist_ctx_descriptor_format *desc);
-int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 8c15038..82093f1 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
);
TRACE_EVENT(gma_translate,
- TP_PROTO(int id, char *type, int ring_id, int pt_level,
+ TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
unsigned long gma, unsigned long gpa),
- TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
+ TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN)
@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN,
- "VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
- id, type, ring_id, pt_level, gma, gpa);
+ "VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
+ id, type, ring_id, root_entry_type, gma, gpa);
),
TP_printk("%s", __entry->buf)
@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change,
TP_printk("%s", __entry->buf)
);
-TRACE_EVENT(gpt_change,
+TRACE_EVENT(spt_guest_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index),
@@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi,
);
TRACE_EVENT(render_mmio,
- TP_PROTO(int id, char *action, unsigned int reg,
+ TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
- TP_ARGS(id, action, reg, new_val, old_val),
+ TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
TP_STRUCT__entry(
- __field(int, id)
+ __field(int, old_id)
+ __field(int, new_id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg)
__field(unsigned int, old_val)
@@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio,
),
TP_fast_assign(
- __entry->id = id;
+ __entry->old_id = old_id;
+ __entry->new_id = new_id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
__entry->reg = reg;
__entry->old_val = old_val;
__entry->new_val = new_val;
),
- TP_printk("VM%u %s reg %x, old %08x new %08x\n",
- __entry->id, __entry->buf, __entry->reg,
+ TP_printk("VM%u -> VM%u %s reg %x, old %08x new %08x\n",
+ __entry->old_id, __entry->new_id,
+ __entry->buf, __entry->reg,
__entry->old_val, __entry->new_val)
);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 02c61a1..2e0a02a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -38,22 +38,25 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
- vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
- vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
- vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
@@ -226,13 +229,14 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
- if (atomic_read(&vgpu->running_workload_num)) {
+ if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
}
intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock);
}
@@ -252,16 +256,19 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
WARN(vgpu->active, "vGPU is still active!\n");
+ intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
- intel_vgpu_clean_gvt_context(vgpu);
- intel_vgpu_clean_execlist(vgpu);
+ intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -293,7 +300,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->gvt = gvt;
for (i = 0; i < I915_NUM_ENGINES; i++)
- INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+ INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
@@ -346,8 +353,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
- bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
-
+ INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+ INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
+ idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu);
@@ -368,32 +376,42 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_opregion(vgpu);
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_execlist(vgpu);
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
- goto out_clean_display;
+ goto out_clean_opregion;
- ret = intel_vgpu_init_gvt_context(vgpu);
+ ret = intel_vgpu_setup_submission(vgpu);
if (ret)
- goto out_clean_execlist;
+ goto out_clean_display;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
- goto out_clean_shadow_ctx;
+ goto out_clean_submission;
+
+ ret = intel_gvt_debugfs_add_vgpu(vgpu);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ if (ret)
+ goto out_clean_sched_policy;
mutex_unlock(&gvt->lock);
return vgpu;
-out_clean_shadow_ctx:
- intel_vgpu_clean_gvt_context(vgpu);
-out_clean_execlist:
- intel_vgpu_clean_execlist(vgpu);
+out_clean_sched_policy:
+ intel_vgpu_clean_sched_policy(vgpu);
+out_clean_submission:
+ intel_vgpu_clean_submission(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+ intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
@@ -500,11 +518,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, resetting_eng);
-
+ intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
-
+ intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 8ba932b..95478db 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
/**
* DOC: batch buffer command parser
@@ -798,22 +799,15 @@ struct cmd_node {
*/
static inline u32 cmd_header_key(u32 x)
{
- u32 shift;
-
switch (x >> INSTR_CLIENT_SHIFT) {
default:
case INSTR_MI_CLIENT:
- shift = STD_MI_OPCODE_SHIFT;
- break;
+ return x >> STD_MI_OPCODE_SHIFT;
case INSTR_RC_CLIENT:
- shift = STD_3D_OPCODE_SHIFT;
- break;
+ return x >> STD_3D_OPCODE_SHIFT;
case INSTR_BC_CLIENT:
- shift = STD_2D_OPCODE_SHIFT;
- break;
+ return x >> STD_2D_OPCODE_SHIFT;
}
-
- return x >> shift;
}
static int init_hash_table(struct intel_engine_cs *engine,
@@ -947,7 +941,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->needs_cmd_parser = true;
+ engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
}
/**
@@ -959,7 +953,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!engine->needs_cmd_parser)
+ if (!intel_engine_needs_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -1038,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
const struct drm_i915_reg_table *table = engine->reg_tables;
int count = engine->reg_table_count;
- do {
+ for (; count > 0; ++table, --count) {
if (!table->master || is_master) {
const struct drm_i915_reg_descriptor *reg;
@@ -1046,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
if (reg != NULL)
return reg;
}
- } while (table++, --count);
+ }
return NULL;
}
@@ -1218,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
continue;
}
+ if (desc->bits[i].offset >= length) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
+ return false;
+ }
+
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
@@ -1357,7 +1357,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv, id) {
- if (engine->needs_cmd_parser) {
+ if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c65e381..89f7ff2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,47 +30,29 @@
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include "intel_drv.h"
-#include "i915_guc_submission.h"
+#include "intel_guc_submission.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
}
-static __always_inline void seq_print_param(struct seq_file *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
-}
-
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
-#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
+ intel_device_info_dump_flags(info, &p);
+ intel_device_info_dump_runtime(info, &p);
+ intel_driver_caps_print(&dev_priv->caps, &p);
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
- I915_PARAMS_FOR_EACH(PRINT_PARAM);
-#undef PRINT_PARAM
+ i915_params_dump(&i915_modparams, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -111,8 +93,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
+ for_each_ggtt_vma(vma, obj) {
+ if (drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -168,8 +150,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
- obj->base.read_domains,
- obj->base.write_domain,
+ obj->read_domains,
+ obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -522,8 +504,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size);
- seq_printf(m, "%llu [%llu] gtt total\n",
- ggtt->base.total, ggtt->mappable_end);
+ seq_printf(m, "%llu [%pa] gtt total\n",
+ ggtt->base.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -537,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct task_struct *task;
mutex_lock(&dev->struct_mutex);
@@ -554,7 +536,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* Therefore, we need to protect this ->comm access using RCU.
*/
request = list_first_entry_or_null(&file_priv->mm.request_list,
- struct drm_i915_gem_request,
+ struct i915_request,
client_link);
rcu_read_lock();
task = pid_task(request && request->ctx->pid ?
@@ -664,38 +646,56 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_engine_cs *engine)
+static void gen8_display_interrupt_info(struct seq_file *m)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node *rb;
-
- seq_printf(m, "Current sequence (%s): %x\n",
- engine->name, intel_engine_get_seqno(engine));
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ int pipe;
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
- seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
- engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+ seq_printf(m, "Pipe %c IMR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IMR(pipe)));
+ seq_printf(m, "Pipe %c IIR:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IIR(pipe)));
+ seq_printf(m, "Pipe %c IER:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
}
- spin_unlock_irq(&b->rb_lock);
-}
-static int i915_gem_seqno_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IMR));
+ seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IIR));
+ seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_PORT_IER));
- for_each_engine(engine, dev_priv, id)
- i915_ring_seqno_info(m, engine);
+ seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IMR));
+ seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IIR));
+ seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+ I915_READ(GEN8_DE_MISC_IER));
- return 0;
+ seq_printf(m, "PCU interrupt mask:\t%08x\n",
+ I915_READ(GEN8_PCU_IMR));
+ seq_printf(m, "PCU interrupt identity:\t%08x\n",
+ I915_READ(GEN8_PCU_IIR));
+ seq_printf(m, "PCU interrupt enable:\t%08x\n",
+ I915_READ(GEN8_PCU_IER));
}
-
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -759,6 +759,27 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ seq_printf(m, "Master Interrupt Control: %08x\n",
+ I915_READ(GEN11_GFX_MSTR_IRQ));
+
+ seq_printf(m, "Render/Copy Intr Enable: %08x\n",
+ I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
+ seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
+ I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
+ seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
+ I915_READ(GEN11_GUC_SG_INTR_ENABLE));
+ seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
+ I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
+ seq_printf(m, "Crypto Intr Enable:\t %08x\n",
+ I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
+ seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
+ I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
+
+ seq_printf(m, "Display Interrupt Control:\t%08x\n",
+ I915_READ(GEN11_DISPLAY_INT_CTL));
+
+ gen8_display_interrupt_info(m);
} else if (INTEL_GEN(dev_priv) >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -772,49 +793,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i)));
}
- for_each_pipe(dev_priv, pipe) {
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
- seq_printf(m, "Pipe %c power disabled\n",
- pipe_name(pipe));
- continue;
- }
- seq_printf(m, "Pipe %c IMR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IMR(pipe)));
- seq_printf(m, "Pipe %c IIR:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IIR(pipe)));
- seq_printf(m, "Pipe %c IER:\t%08x\n",
- pipe_name(pipe),
- I915_READ(GEN8_DE_PIPE_IER(pipe)));
-
- intel_display_power_put(dev_priv, power_domain);
- }
-
- seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IMR));
- seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IIR));
- seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_PORT_IER));
-
- seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IMR));
- seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IIR));
- seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
- I915_READ(GEN8_DE_MISC_IER));
-
- seq_printf(m, "PCU interrupt mask:\t%08x\n",
- I915_READ(GEN8_PCU_IMR));
- seq_printf(m, "PCU interrupt identity:\t%08x\n",
- I915_READ(GEN8_PCU_IIR));
- seq_printf(m, "PCU interrupt enable:\t%08x\n",
- I915_READ(GEN8_PCU_IER));
+ gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
@@ -896,14 +875,35 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv, id) {
- if (INTEL_GEN(dev_priv) >= 6) {
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ seq_printf(m, "RCS Intr Mask:\t %08x\n",
+ I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
+ seq_printf(m, "BCS Intr Mask:\t %08x\n",
+ I915_READ(GEN11_BCS_RSVD_INTR_MASK));
+ seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
+ seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
+ seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
+ I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
+ seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
+ I915_READ(GEN11_GUC_SG_INTR_MASK));
+ seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
+ I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
+ seq_printf(m, "Crypto Intr Mask:\t %08x\n",
+ I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
+ seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
+ I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
+
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, engine);
}
+
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1040,7 +1040,10 @@ i915_next_seqno_set(void *data, u64 val)
if (ret)
return ret;
+ intel_runtime_pm_get(dev_priv);
ret = i915_gem_set_global_seqno(dev, val);
+ intel_runtime_pm_put(dev_priv);
+
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1151,13 +1154,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- cagf = intel_gpu_freq(dev_priv, cagf);
+ cagf = intel_gpu_freq(dev_priv,
+ intel_get_cagf(dev_priv, rpstat));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -1514,19 +1512,6 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
- unsigned forcewake_count;
- int count = 0;
-
- forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
- if (forcewake_count) {
- seq_puts(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
- } else {
- /* NB: we cannot use forcewake, else we read the wrong values */
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- udelay(10);
- seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
- }
gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
@@ -1537,9 +1522,12 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- mutex_unlock(&dev_priv->pcu_lock);
+ if (INTEL_GEN(dev_priv) <= 7) {
+ mutex_lock(&dev_priv->pcu_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids);
+ mutex_unlock(&dev_priv->pcu_lock);
+ }
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1595,12 +1583,15 @@ static int gen6_drpc_info(struct seq_file *m)
print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ if (INTEL_GEN(dev_priv) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
return i915_forcewake_domains(m, NULL);
}
@@ -1639,20 +1630,23 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
- if (!HAS_FBC(dev_priv)) {
- seq_puts(m, "FBC unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
seq_puts(m, "FBC enabled\n");
else
- seq_printf(m, "FBC disabled: %s\n",
- dev_priv->fbc.no_fbc_reason);
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (fbc->work.scheduled)
+ seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
+ fbc->work.scheduled_vblank,
+ drm_crtc_vblank_count(&fbc->crtc->base));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1672,7 +1666,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1719,10 +1713,8 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1808,10 +1800,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_LLC(dev_priv)) {
- seq_puts(m, "unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_LLC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1974,7 +1964,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
- seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@@ -1990,75 +1979,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
-static void i915_dump_lrc_obj(struct seq_file *m,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct i915_vma *vma = ctx->engine[engine->id].state;
- struct page *page;
- int j;
-
- seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
-
- if (!vma) {
- seq_puts(m, "\tFake context\n");
- return;
- }
-
- if (vma->flags & I915_VMA_GLOBAL_BIND)
- seq_printf(m, "\tBound in GGTT at 0x%08x\n",
- i915_ggtt_offset(vma));
-
- if (i915_gem_object_pin_pages(vma->obj)) {
- seq_puts(m, "\tFailed to get pages for context object\n\n");
- return;
- }
-
- page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
- if (page) {
- u32 *reg_state = kmap_atomic(page);
-
- for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m,
- "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- j * 4,
- reg_state[j], reg_state[j + 1],
- reg_state[j + 2], reg_state[j + 3]);
- }
- kunmap_atomic(reg_state);
- }
-
- i915_gem_object_unpin_pages(vma->obj);
- seq_putc(m, '\n');
-}
-
-static int i915_dump_lrc(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
- int ret;
-
- if (!i915_modparams.enable_execlists) {
- seq_printf(m, "Logical Ring Contexts are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- for_each_engine(engine, dev_priv, id)
- i915_dump_lrc_obj(m, ctx, engine);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
@@ -2361,8 +2281,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p;
- if (!HAS_HUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_HUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
@@ -2380,8 +2300,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct drm_printer p;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
@@ -2434,7 +2354,7 @@ static void i915_guc_log_info(struct seq_file *m,
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2454,29 +2374,15 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tTotal: %llu\n", tot);
}
-static bool check_guc_submission(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
-
- if (!guc->execbuf_client) {
- seq_printf(m, "GuC submission %s\n",
- HAS_GUC_SCHED(dev_priv) ?
- "disabled" :
- "not supported");
- return false;
- }
-
- return true;
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
+
+ GEM_BUG_ON(!guc->execbuf_client);
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2484,6 +2390,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
+ if (guc->preempt_client) {
+ seq_printf(m, "\nGuC preempt client @ %p:\n",
+ guc->preempt_client);
+ i915_guc_client_info(m, dev_priv, guc->preempt_client);
+ }
i915_guc_log_info(m, dev_priv);
@@ -2497,12 +2408,12 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- struct i915_guc_client *client = guc->execbuf_client;
+ struct intel_guc_client *client = guc->execbuf_client;
unsigned int tmp;
int index;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
struct intel_engine_cs *engine;
@@ -2555,6 +2466,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (dump_load_err)
obj = dev_priv->guc.load_err_log;
else if (dev_priv->guc.log.vma)
@@ -2586,6 +2500,9 @@ static int i915_guc_log_control_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2597,21 +2514,11 @@ static int i915_guc_log_control_get(void *data, u64 *val)
static int i915_guc_log_control_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- int ret;
- if (!dev_priv->guc.log.vma)
- return -EINVAL;
-
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
- ret = i915_guc_log_control(dev_priv, val);
- intel_runtime_pm_put(dev_priv);
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return ret;
+ return intel_guc_log_control(&dev_priv->guc, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
@@ -2648,17 +2555,19 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
u32 stat[3];
enum pipe pipe;
bool enabled = false;
+ bool sink_support;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
- if (!HAS_PSR(dev_priv)) {
- seq_puts(m, "PSR not supported\n");
+ sink_support = dev_priv->psr.sink_support;
+ seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
+ if (!sink_support)
return 0;
- }
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
- seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
- seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
@@ -2716,9 +2625,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
if (dev_priv->psr.psr2_support) {
- u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
+ u32 psr2 = I915_READ(EDP_PSR2_STATUS);
- seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
+ seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
psr2, psr2_live_status(psr2));
}
mutex_unlock(&dev_priv->psr.lock);
@@ -2734,39 +2643,76 @@ static int i915_sink_crc(struct seq_file *m, void *data)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp = NULL;
+ struct drm_modeset_acquire_ctx ctx;
int ret;
u8 crc[6];
- drm_modeset_lock_all(dev);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
drm_connector_list_iter_begin(dev, &conn_iter);
+
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_crtc *crtc;
+ struct drm_connector_state *state;
+ struct intel_crtc_state *crtc_state;
- if (!connector->base.state->best_encoder)
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- crtc = connector->base.state->crtc;
- if (!crtc->state->active)
+retry:
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret)
+ goto err;
+
+ state = connector->base.state;
+ if (!state->best_encoder)
continue;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ crtc = state->crtc;
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret)
+ goto err;
+
+ crtc_state = to_intel_crtc_state(crtc->state);
+ if (!crtc_state->base.active)
continue;
- intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
+ /*
+ * We need to wait for all crtc updates to complete, to make
+ * sure any pending modesets and plane updates are completed.
+ */
+ if (crtc_state->base.commit) {
+ ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
- ret = intel_dp_sink_crc(intel_dp, crc);
+ if (ret)
+ goto err;
+ }
+
+ intel_dp = enc_to_intel_dp(state->best_encoder);
+
+ ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
if (ret)
- goto out;
+ goto err;
seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
crc[0], crc[1], crc[2],
crc[3], crc[4], crc[5]);
goto out;
+
+err:
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+ goto out;
}
ret = -ENODEV;
out:
drm_connector_list_iter_end(&conn_iter);
- drm_modeset_unlock_all(dev);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
}
@@ -2805,7 +2751,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+ seq_printf(m, "GPU idle: %s (epoch %u)\n",
+ yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2854,10 +2801,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
csr = &dev_priv->csr;
@@ -3049,7 +2994,7 @@ static void intel_connector_info(struct seq_file *m,
break;
case DRM_MODE_CONNECTOR_HDMIA:
if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
- intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+ intel_encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@@ -3240,90 +3185,39 @@ static int i915_engine_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s\n",
- yesno(dev_priv->gt.awake));
+ seq_printf(m, "GT awake? %s (epoch %u)\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
+ seq_printf(m, "CS timestamp frequency: %u kHz\n",
+ dev_priv->info.cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
- intel_engine_dump(engine, &p);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(dev_priv);
return 0;
}
-static int i915_shrinker_info(struct seq_file *m, void *unused)
+static int i915_rcs_topology(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
- seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+ intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
return 0;
}
-static int i915_semaphore_status(struct seq_file *m, void *unused)
+static int i915_shrinker_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- int num_rings = INTEL_INFO(dev_priv)->num_rings;
- enum intel_engine_id id;
- int j, ret;
-
- if (!i915_modparams.semaphores) {
- seq_puts(m, "Semaphores are disabled\n");
- return 0;
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- if (IS_BROADWELL(dev_priv)) {
- struct page *page;
- uint64_t *seqno;
-
- page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
-
- seqno = (uint64_t *)kmap_atomic(page);
- for_each_engine(engine, dev_priv, id) {
- uint64_t offset;
-
- seq_printf(m, "%s\n", engine->name);
-
- seq_puts(m, " Last signal:");
- for (j = 0; j < num_rings; j++) {
- offset = id * I915_NUM_ENGINES + j;
- seq_printf(m, "0x%08llx (0x%02llx) ",
- seqno[offset], offset * 8);
- }
- seq_putc(m, '\n');
-
- seq_puts(m, " Last wait: ");
- for (j = 0; j < num_rings; j++) {
- offset = id + (j * I915_NUM_ENGINES);
- seq_printf(m, "0x%08llx (0x%02llx) ",
- seqno[offset], offset * 8);
- }
- seq_putc(m, '\n');
+ struct drm_i915_private *i915 = node_to_i915(m->private);
- }
- kunmap_atomic(seqno);
- } else {
- seq_puts(m, " Last signal:");
- for_each_engine(engine, dev_priv, id)
- for (j = 0; j < num_rings; j++)
- seq_printf(m, "0x%08x\n",
- I915_READ(engine->semaphore.mbox.signal[j]));
- seq_putc(m, '\n');
- }
+ seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+ seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -3454,7 +3348,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
int plane;
if (INTEL_GEN(dev_priv) < 9)
- return 0;
+ return -ENODEV;
drm_modeset_lock_all(dev);
@@ -3521,7 +3415,10 @@ static void drrs_status_per_crtc(struct seq_file *m,
/* disable_drrs() will make drrs->dp NULL */
if (!drrs->dp) {
- seq_puts(m, "Idleness DRRS: Disabled");
+ seq_puts(m, "Idleness DRRS: Disabled\n");
+ if (dev_priv->psr.enabled)
+ seq_puts(m,
+ "\tAs PSR is enabled, DRRS is not enabled\n");
mutex_unlock(&drrs->mutex);
return;
}
@@ -3601,7 +3498,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -4090,7 +3987,8 @@ i915_wedged_set(void *data, u64 val)
engine->hangcheck.stalled = true;
}
- i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
+ i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
+ val);
wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
@@ -4224,7 +4122,7 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED);
if (val & DROP_RETIRE)
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
@@ -4243,10 +4141,8 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_IDLE)
drain_delayed_work(&dev_priv->gt.idle_work);
- if (val & DROP_FREED) {
- synchronize_rcu();
+ if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv);
- }
return ret;
}
@@ -4437,7 +4333,7 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask = BIT(0);
- sseu->subslice_mask |= BIT(ss);
+ sseu->subslice_mask[0] |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
@@ -4448,20 +4344,70 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
}
}
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
- struct sseu_dev_info *sseu)
+static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
- int s_max = 3, ss_max = 4;
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
int s, ss;
- u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+ u32 s_reg[info->sseu.max_slices];
+ u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
- /* BXT has a single slice and at most 3 subslices. */
- if (IS_GEN9_LP(dev_priv)) {
- s_max = 1;
- ss_max = 3;
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ /*
+ * FIXME: Valid SS Mask respects the spec and read
+ * only valid bits for those registers, excluding reserverd
+ * although this seems wrong because it would leave many
+ * subslices without ACK.
+ */
+ s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
+ GEN10_PGCTL_VALID_SS_MASK(s);
+ eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
}
- for (s = 0; s < s_max; s++) {
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+ sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
+
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
+ unsigned int eu_cnt;
+
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
+ eu_mask[ss % 2]);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
+ }
+ }
+}
+
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
+{
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ int s, ss;
+ u32 s_reg[info->sseu.max_slices];
+ u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
+
+ for (s = 0; s < info->sseu.max_slices; s++) {
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
@@ -4476,18 +4422,18 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < info->sseu.max_slices; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
- sseu->subslice_mask =
- INTEL_INFO(dev_priv)->sseu.subslice_mask;
+ if (IS_GEN9_BC(dev_priv))
+ sseu->subslice_mask[s] =
+ INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
- for (ss = 0; ss < ss_max; ss++) {
+ for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
if (IS_GEN9_LP(dev_priv)) {
@@ -4495,7 +4441,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
/* skip disabled subslice */
continue;
- sseu->subslice_mask |= BIT(ss);
+ sseu->subslice_mask[s] |= BIT(ss);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -4517,9 +4463,12 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
sseu->eu_per_subslice =
INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ sseu->subslice_mask[s] =
+ INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ }
sseu->eu_total = sseu->eu_per_subslice *
sseu_subslice_total(sseu);
@@ -4538,6 +4487,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const char *type = is_available_info ? "Available" : "Enabled";
+ int s;
seq_printf(m, " %s Slice Mask: %04x\n", type,
sseu->slice_mask);
@@ -4545,10 +4495,10 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
sseu_subslice_total(sseu));
- seq_printf(m, " %s Subslice Mask: %04x\n", type,
- sseu->subslice_mask);
- seq_printf(m, " %s Subslice Per Slice: %u\n", type,
- hweight8(sseu->subslice_mask));
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ seq_printf(m, " %s Slice%i subslices: %u\n", type,
+ s, hweight8(sseu->subslice_mask[s]));
+ }
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
seq_printf(m, " %s EU Per Subslice: %u\n", type,
@@ -4582,6 +4532,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
+ sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
+ sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+ sseu.max_eus_per_subslice =
+ INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
intel_runtime_pm_get(dev_priv);
@@ -4589,8 +4543,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
cherryview_sseu_device_status(dev_priv, &sseu);
} else if (IS_BROADWELL(dev_priv)) {
broadwell_sseu_device_status(dev_priv, &sseu);
- } else if (INTEL_GEN(dev_priv) >= 9) {
+ } else if (IS_GEN9(dev_priv)) {
gen9_sseu_device_status(dev_priv, &sseu);
+ } else if (INTEL_GEN(dev_priv) >= 10) {
+ gen10_sseu_device_status(dev_priv, &sseu);
}
intel_runtime_pm_put(dev_priv);
@@ -4707,12 +4663,51 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write
};
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *intel_crtc;
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (INTEL_GEN(dev_priv) < 7)
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (!intel_crtc->base.state->active ||
+ !intel_crtc->config->has_drrs)
+ continue;
+
+ for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
+ val ? "en" : "dis", val);
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ if (val)
+ intel_edp_drrs_enable(intel_dp,
+ intel_crtc->config);
+ else
+ intel_edp_drrs_disable(intel_dp,
+ intel_crtc->config);
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
@@ -4736,7 +4731,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -4749,8 +4743,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
+ {"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
- {"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
@@ -4787,7 +4781,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops}
+ {"i915_ipc_status", &i915_ipc_status_fops},
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2cf10d1..07c07d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,12 +48,15 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_pmu.h"
+#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
static struct drm_driver driver;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line)
@@ -69,6 +72,7 @@ bool __i915_inject_load_failure(const char *func, int line)
return false;
}
+#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
@@ -106,8 +110,12 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
return i915_modparams.inject_load_failure &&
i915_load_fail_count == i915_modparams.inject_load_failure;
+#else
+ return false;
+#endif
}
#define i915_load_error(dev_priv, fmt, ...) \
@@ -115,10 +123,90 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev_priv));
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ /* PantherPoint is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ return PCH_KBP;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ WARN_ON(!IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ default:
+ return PCH_NONE;
+ }
+}
-static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
{
- enum intel_pch ret = PCH_NOP;
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static unsigned short
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+{
+ unsigned short id = 0;
/*
* In a virtualized passthrough environment we can be in a
@@ -127,28 +215,25 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev_priv)) {
- ret = PCH_IBX;
- DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
- } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- ret = PCH_CPT;
- DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- ret = PCH_LPT;
- if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
- dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else
- dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- ret = PCH_SPT;
- DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
- } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- ret = PCH_CNP;
- DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
- }
+ if (IS_GEN5(dev_priv))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+ else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+
+ if (id)
+ DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ else
+ DRM_DEBUG_KMS("Assuming no PCH\n");
- return ret;
+ return id;
}
static void intel_detect_pch(struct drm_i915_private *dev_priv)
@@ -175,94 +260,31 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one.
*/
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id;
+ enum intel_pch pch_type;
- dev_priv->pch_id = id;
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
- if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_IBX;
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev_priv));
- } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) &&
- !IS_IVYBRIDGE(dev_priv));
- } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
- /* PantherPoint is CPT compatible */
- dev_priv->pch_type = PCH_CPT;
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) &&
- !IS_IVYBRIDGE(dev_priv));
- } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) ||
- IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) &&
- !IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
- /* WildcatPoint is LPT compatible */
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) ||
- IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
- /* WildcatPoint is LPT compatible */
- dev_priv->pch_type = PCH_LPT;
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) &&
- !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) &&
- !IS_BDW_ULT(dev_priv));
- } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_SPT;
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
- } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_SPT;
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
- } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_KBP;
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- pch->subsystem_vendor ==
- PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- pch->subsystem_device ==
- PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_type =
- intel_virt_detect_pch(dev_priv);
- } else
- continue;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ id = intel_virt_detect_pch(dev_priv);
+ if (id) {
+ pch_type = intel_pch_type(dev_priv, id);
+ if (WARN_ON(pch_type == PCH_NONE))
+ pch_type = PCH_NOP;
+ } else {
+ pch_type = PCH_NOP;
+ }
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
break;
}
}
@@ -272,8 +294,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
pci_dev_put(pch);
}
-static int i915_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -321,7 +343,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915_modparams.semaphores;
+ value = HAS_LEGACY_SEMAPHORES(dev_priv);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -367,16 +389,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915_gem_mmap_gtt_version();
break;
case I915_PARAM_HAS_SCHEDULER:
- value = 0;
- if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
- value |= I915_SCHEDULER_CAP_ENABLED;
- value |= I915_SCHEDULER_CAP_PRIORITY;
-
- if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
- i915_modparams.enable_execlists &&
- !i915_modparams.enable_guc_submission)
- value |= I915_SCHEDULER_CAP_PREEMPTION;
- }
+ value = dev_priv->caps.scheduler;
break;
case I915_PARAM_MMAP_VERSION:
@@ -407,16 +420,22 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = 1;
break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(dev_priv);
+ break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+ value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -613,10 +632,12 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -677,7 +698,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_uc;
- intel_modeset_gem_init(dev);
+ intel_setup_overlay(dev_priv);
if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
@@ -689,8 +710,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
- drm_kms_helper_poll_init(dev);
-
return 0;
cleanup_gem:
@@ -724,7 +743,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].base = ggtt->gmadr.start;
ap->ranges[0].size = ggtt->mappable_end;
primary =
@@ -790,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/*
* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
- * by the GPU. i915_gem_retire_requests() is called directly when we
+ * by the GPU. i915_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit
* bo.
*
@@ -838,6 +857,11 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
* We don't keep the workarounds for pre-production hardware, so we expect our
* driver to fail on these machines in one way or another. A little warning on
* dmesg may help both the user and the bug triagers.
+ *
+ * Our policy for removing pre-production workarounds is to keep the
+ * current gen workarounds as a guide to the bring-up of the next gen
+ * (workarounds have a habit of persisting!). Anything older than that
+ * should be removed along with the complications they introduce.
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
@@ -857,6 +881,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
+ * @ent: the matching pci_device_id
*
* Initialize everything that is a "SW-only" state, that is state not
* requiring accessing the device or exposing the driver via kernel internal
@@ -882,17 +907,12 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- device_info->platform_mask = BIT(device_info->platform);
-
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
- device_info->gen_mask = BIT(device_info->gen - 1);
-
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@@ -923,12 +943,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_display_crc_init(dev_priv);
- intel_device_info_dump(dev_priv);
-
intel_detect_preproduction_hw(dev_priv);
- i915_perf_init(dev_priv);
-
return 0;
err_irq:
@@ -945,7 +961,6 @@ err_engines:
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
- i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
intel_irq_fini(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -1048,10 +1063,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- i915_modparams.enable_execlists =
- intel_sanitize_enable_execlists(dev_priv,
- i915_modparams.enable_execlists);
-
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
@@ -1063,11 +1074,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
- i915_modparams.semaphores =
- intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
- DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
- yesno(i915_modparams.semaphores));
-
intel_uc_sanitize_options(dev_priv);
intel_gvt_sanitize_options(dev_priv);
@@ -1088,10 +1094,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
+ intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
intel_sanitize_options(dev_priv);
+ i915_perf_init(dev_priv);
+
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
return ret;
@@ -1197,6 +1205,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_perf_fini(dev_priv);
+
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -1215,7 +1225,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_init(dev_priv);
+ i915_gem_shrinker_register(dev_priv);
+ i915_pmu_register(dev_priv);
/*
* Notify a valid surface after modesetting,
@@ -1254,6 +1265,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* cannot run before the connectors are registered.
*/
intel_fbdev_initial_config_async(dev);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous fbdev
+ * configuration, for which we use the fbdev->async_cookie.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -1265,17 +1283,40 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv);
+ /*
+ * After flushing the fbdev (incl. a late async config which will
+ * have delayed queuing of a hotplug event), then flush the hotplug
+ * events.
+ */
+ drm_kms_helper_poll_fini(&dev_priv->drm);
+
intel_gpu_ips_teardown();
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv);
+ i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
i915_guc_log_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
- i915_gem_shrinker_cleanup(dev_priv);
+ i915_gem_shrinker_unregister(dev_priv);
+}
+
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("i915 device info:");
+
+ intel_device_info_dump(&dev_priv->info, &p);
+ intel_device_info_dump_runtime(&dev_priv->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
}
/**
@@ -1363,13 +1404,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_init_ipc(dev_priv);
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
-
intel_runtime_pm_put(dev_priv);
+ i915_welcome_messages(dev_priv);
+
return 0;
out_cleanup_hw:
@@ -1407,19 +1445,7 @@ void i915_driver_unload(struct drm_device *dev)
intel_modeset_cleanup(dev);
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ intel_bios_cleanup(dev_priv);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
@@ -1585,15 +1611,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- bool fw_csr;
int ret;
disable_rpm_wakeref_asserts(dev_priv);
intel_display_set_init_power(dev_priv, false);
- fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
- suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
@@ -1601,8 +1624,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* also enable deeper system power states that would be blocked if the
* firmware was inactive.
*/
- if (!fw_csr)
+ if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
+ dev_priv->csr.dmc_payload == NULL) {
intel_power_domains_suspend(dev_priv);
+ dev_priv->power_domains_suspended = true;
+ }
ret = 0;
if (IS_GEN9_LP(dev_priv))
@@ -1614,8 +1640,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
- if (!fw_csr)
+ if (dev_priv->power_domains_suspended) {
intel_power_domains_init_hw(dev_priv, true);
+ dev_priv->power_domains_suspended = false;
+ }
goto out;
}
@@ -1636,8 +1664,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
- dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
-
out:
enable_rpm_wakeref_asserts(dev_priv);
@@ -1682,8 +1708,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_csr_ucode_resume(dev_priv);
- i915_gem_resume(dev_priv);
-
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
@@ -1704,14 +1728,7 @@ static int i915_drm_resume(struct drm_device *dev)
drm_mode_config_reset(dev);
- mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev_priv)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- i915_gem_set_wedged(dev_priv);
- }
- mutex_unlock(&dev->struct_mutex);
-
- intel_guc_resume(dev_priv);
+ i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@@ -1745,8 +1762,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- intel_autoenable_gt_powersave(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
return 0;
@@ -1815,8 +1830,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
- if (!dev_priv->suspended_to_idle)
- gen9_sanitize_dc_state(dev_priv);
+ gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
@@ -1824,16 +1838,17 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
- if (IS_GEN9_LP(dev_priv) ||
- !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+ if (dev_priv->power_domains_suspended)
intel_power_domains_init_hw(dev_priv, true);
+ else
+ intel_display_set_init_power(dev_priv, true);
i915_gem_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
out:
- dev_priv->suspended_to_idle = false;
+ dev_priv->power_domains_suspended = false;
return ret;
}
@@ -1874,7 +1889,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gpu_error *error = &i915->gpu_error;
int ret;
+ int i;
+ might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -1892,22 +1909,29 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
disable_irq(i915->drm.irq);
ret = i915_gem_reset_prepare(i915);
if (ret) {
- DRM_ERROR("GPU recovery failed\n");
- intel_gpu_reset(i915, ALL_ENGINES);
- goto error;
+ dev_err(i915->drm.dev, "GPU recovery failed\n");
+ goto taint;
}
- ret = intel_gpu_reset(i915, ALL_ENGINES);
- if (ret) {
- if (ret != -ENODEV)
- DRM_ERROR("Failed to reset chip: %i\n", ret);
+ if (!intel_has_gpu_reset(i915)) {
+ if (i915_modparams.reset)
+ dev_err(i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
- i915_gem_reset(i915);
- intel_overlay_reset(i915);
+ for (i = 0; i < 3; i++) {
+ ret = intel_gpu_reset(i915, ALL_ENGINES);
+ if (ret == 0)
+ break;
+
+ msleep(100);
+ }
+ if (ret) {
+ dev_err(i915->drm.dev, "Failed to reset chip\n");
+ goto taint;
+ }
/* Ok, now get things going again... */
@@ -1917,10 +1941,14 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
*/
ret = i915_ggtt_enable_hw(i915);
if (ret) {
- DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
+ DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
+ ret);
goto error;
}
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
+
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1931,7 +1959,8 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
*/
ret = i915_gem_init_hw(i915);
if (ret) {
- DRM_ERROR("Failed hw init on reset %d\n", ret);
+ DRM_ERROR("Failed to initialise HW following reset (%d)\n",
+ ret);
goto error;
}
@@ -1946,12 +1975,33 @@ wakeup:
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
+ intel_gpu_reset(i915, ALL_ENGINES);
goto finish;
}
+static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
+}
+
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
@@ -1968,28 +2018,32 @@ error:
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR_OR_NULL(active_request)) {
+ /* Either the previous reset failed, or we pardon the reset. */
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
if (!(flags & I915_RESET_QUIET)) {
dev_notice(engine->i915->drm.dev,
"Resetting %s after gpu hang\n", engine->name);
}
error->reset_engine_count[engine->id]++;
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(active_request)) {
- DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
- ret = PTR_ERR(active_request);
- goto out;
- }
-
- ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+ if (!engine->i915->guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine->i915, engine);
+ else
+ ret = intel_guc_reset_engine(&engine->i915->guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+ engine->i915->guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -2504,7 +2558,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
+ if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2520,10 +2574,12 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_guc_suspend(dev_priv);
+ intel_uc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_uncore_suspend(dev_priv);
+
ret = 0;
if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
@@ -2536,15 +2592,20 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_uncore_runtime_resume(dev_priv);
+
intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_uc_resume(dev_priv);
+
+ i915_gem_init_swizzling(dev_priv);
+ i915_gem_restore_fences(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
- intel_uncore_suspend(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
@@ -2605,8 +2666,6 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- intel_guc_resume(dev_priv);
-
if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
@@ -2621,6 +2680,10 @@ static int intel_runtime_resume(struct device *kdev)
intel_uncore_runtime_resume(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ intel_uc_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
@@ -2628,8 +2691,6 @@ static int intel_runtime_resume(struct device *kdev)
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
-
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
@@ -2721,7 +2782,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -2733,8 +2794,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -2753,11 +2814,11 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
@@ -2770,6 +2831,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e143004..ce18b6c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
@@ -55,22 +56,24 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#include "intel_uncore.h"
#include "intel_bios.h"
+#include "intel_device_info.h"
+#include "intel_display.h"
#include "intel_dpll_mgr.h"
-#include "intel_uc.h"
#include "intel_lrc.h"
+#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uncore.h"
+#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_request.h"
#include "i915_vma.h"
#include "intel_gvt.h"
@@ -80,8 +83,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20171023"
-#define DRIVER_TIMESTAMP 1508748913
+#define DRIVER_DATE "20180308"
+#define DRIVER_TIMESTAMP 1520513379
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -101,9 +104,13 @@
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+#else
+#define i915_inject_load_failure() false
+#endif
typedef struct {
uint32_t val;
@@ -243,173 +250,6 @@ static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
return clamp_u64_to_fixed16(interm_sum);
}
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *onoff(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *enableddisabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
-enum pipe {
- INVALID_PIPE = -1,
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- _PIPE_EDP,
- I915_MAX_PIPES = _PIPE_EDP
-};
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
- I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
- switch (transcoder) {
- case TRANSCODER_A:
- return "A";
- case TRANSCODER_B:
- return "B";
- case TRANSCODER_C:
- return "C";
- case TRANSCODER_EDP:
- return "EDP";
- case TRANSCODER_DSI_A:
- return "DSI A";
- case TRANSCODER_DSI_C:
- return "DSI C";
- default:
- return "<invalid>";
- }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
- return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x+.
- */
-enum plane {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-#define plane_name(p) ((p) + 'A')
-
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_CURSOR,
- I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
- for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
- for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
-
-enum port {
- PORT_NONE = -1,
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- I915_MAX_PORTS
-};
-#define port_name(p) ((p) + 'A')
-
-#define I915_NUM_PHYS_VLV 2
-
-enum dpio_channel {
- DPIO_CH0,
- DPIO_CH1
-};
-
-enum dpio_phy {
- DPIO_PHY0,
- DPIO_PHY1,
- DPIO_PHY2,
-};
-
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -471,122 +311,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
- for_each_if ((__mask) & (1 << (__p)))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-#define for_each_sprite(__dev_priv, __p, __s) \
- for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
- (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
-
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((plane_mask) & \
- (1 << drm_plane_index(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
-
-#define for_each_intel_encoder(dev, intel_encoder) \
- list_for_each_entry(intel_encoder, \
- &(dev)->mode_config.encoder_list, \
- base.head)
-
-#define for_each_intel_connector_iter(intel_connector, iter) \
- while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
- list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
- for_each_if ((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if ((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if (BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_rev(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
- (__i)++) \
- for_each_if (plane_state)
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_crtc && \
- ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
- (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
- (__i)++) \
- for_each_if (crtc)
-
-
-#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -623,20 +347,6 @@ struct drm_i915_file_private {
atomic_t context_bans;
};
-/* Used by dp and fdi links */
-struct intel_link_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-void intel_link_compute_m_n(int bpp, int nlanes,
- int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool reduce_m_n);
-
/* Interface history:
*
* 1.1: Original.
@@ -651,27 +361,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
-};
-#define OPREGION_SIZE (8*1024)
-
struct intel_overlay;
struct intel_overlay_error_state;
@@ -699,7 +388,8 @@ struct drm_i915_display_funcs {
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
- int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
+ int (*get_fifo_size)(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc,
@@ -726,10 +416,12 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state);
void (*update_crtcs)(struct drm_atomic_state *state);
- void (*audio_codec_enable)(struct drm_connector *connector,
- struct intel_encoder *encoder,
- const struct drm_display_mode *adjusted_mode);
- void (*audio_codec_disable)(struct intel_encoder *encoder);
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
@@ -761,142 +453,13 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func) \
- func(is_mobile); \
- func(is_lp); \
- func(is_alpha_support); \
- /* Keep has_* in alphabetical order */ \
- func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
- func(has_reset_engine); \
- func(has_fbc); \
- func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
- func(has_guc); \
- func(has_guc_ct); \
- func(has_hotplug); \
- func(has_l3_dpf); \
- func(has_llc); \
- func(has_logical_ring_contexts); \
- func(has_logical_ring_preemption); \
- func(has_overlay); \
- func(has_pooled_eu); \
- func(has_psr); \
- func(has_rc6); \
- func(has_rc6p); \
- func(has_resource_streamer); \
- func(has_runtime_pm); \
- func(has_snoop); \
- func(unfenced_needs_alignment); \
- func(cursor_needs_physical); \
- func(hws_needs_physical); \
- func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-};
-
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
-}
-
-/* Keep in gen based order, and chronological order within a gen */
-enum intel_platform {
- INTEL_PLATFORM_UNINITIALIZED = 0,
- INTEL_I830,
- INTEL_I845G,
- INTEL_I85X,
- INTEL_I865G,
- INTEL_I915G,
- INTEL_I915GM,
- INTEL_I945G,
- INTEL_I945GM,
- INTEL_G33,
- INTEL_PINEVIEW,
- INTEL_I965G,
- INTEL_I965GM,
- INTEL_G45,
- INTEL_GM45,
- INTEL_IRONLAKE,
- INTEL_SANDYBRIDGE,
- INTEL_IVYBRIDGE,
- INTEL_VALLEYVIEW,
- INTEL_HASWELL,
- INTEL_BROADWELL,
- INTEL_CHERRYVIEW,
- INTEL_SKYLAKE,
- INTEL_BROXTON,
- INTEL_KABYLAKE,
- INTEL_GEMINILAKE,
- INTEL_COFFEELAKE,
- INTEL_CANNONLAKE,
- INTEL_MAX_PLATFORMS
-};
-
-struct intel_device_info {
- u16 device_id;
- u16 gen_mask;
-
- u8 gen;
- u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
- u8 ring_mask; /* Rings supported by the HW */
-
- enum intel_platform platform;
- u32 platform_mask;
-
- u32 display_mmio_offset;
-
- u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- unsigned int page_sizes; /* page sizes supported by the HW */
-
-#define DEFINE_FLAG(name) u8 name:1
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
-#undef DEFINE_FLAG
- u16 ddb_size; /* in blocks */
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
- int cursor_offsets[I915_MAX_PIPES];
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
-};
-
struct intel_display_error_state;
struct i915_gpu_state {
struct kref ref;
- struct timeval time;
- struct timeval boottime;
- struct timeval uptime;
+ ktime_t time;
+ ktime_t boottime;
+ ktime_t uptime;
struct drm_i915_private *i915;
@@ -909,8 +472,15 @@ struct i915_gpu_state {
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct intel_driver_caps driver_caps;
struct i915_params params;
+ struct i915_error_uc {
+ struct intel_uc_fw guc_fw;
+ struct intel_uc_fw huc_fw;
+ struct drm_i915_error_object *guc_log;
+ } uc;
+
/* Generic register state */
u32 eir;
u32 pgtbl_er;
@@ -933,12 +503,11 @@ struct i915_gpu_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_object *semaphore;
- struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
/* Software tracked state */
+ bool idle;
bool waiting;
int num_waiters;
unsigned long hangcheck_timestamp;
@@ -987,6 +556,7 @@ struct i915_gpu_state {
int ban_score;
int active;
int guilty;
+ bool bannable;
} context;
struct drm_i915_error_object {
@@ -1001,6 +571,7 @@ struct i915_gpu_state {
long user_bo_count;
struct drm_i915_error_object *wa_ctx;
+ struct drm_i915_error_object *default_state;
struct drm_i915_error_request {
long jiffies;
@@ -1096,6 +667,7 @@ struct intel_fbc {
*/
struct intel_fbc_state_cache {
struct i915_vma *vma;
+ unsigned long flags;
struct {
unsigned int mode_flags;
@@ -1134,10 +706,11 @@ struct intel_fbc {
*/
struct intel_fbc_reg_params {
struct i915_vma *vma;
+ unsigned long flags;
struct {
enum pipe pipe;
- enum plane plane;
+ enum i9xx_plane_id i9xx_plane;
unsigned int fence_y_offset;
} crtc;
@@ -1152,7 +725,7 @@ struct intel_fbc {
struct intel_fbc_work {
bool scheduled;
- u32 scheduled_vblank;
+ u64 scheduled_vblank;
struct work_struct work;
} work;
@@ -1189,7 +762,6 @@ struct i915_drrs {
struct i915_psr {
struct mutex lock;
bool sink_support;
- bool source_ok;
struct intel_dp *enabled;
bool active;
struct delayed_work work;
@@ -1218,6 +790,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
+ PCH_ICP, /* Ice Lake PCH */
PCH_NOP,
};
@@ -1376,6 +949,8 @@ struct intel_rps {
struct intel_rc6 {
bool enabled;
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
};
struct intel_llc_pstate {
@@ -1386,7 +961,6 @@ struct intel_gen6_power_mgmt {
struct intel_rps rps;
struct intel_rc6 rc6;
struct intel_llc_pstate llc_pstate;
- struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
@@ -1523,15 +1097,17 @@ struct i915_gem_mm {
struct llist_head free_list;
struct work_struct free_work;
spinlock_t free_lock;
+ /**
+ * Count of objects pending destructions. Used to skip needlessly
+ * waiting on an RCU barrier if no objects are waiting to be freed.
+ */
+ atomic_t free_count;
/**
* Small stash of WC pages
*/
struct pagevec wc_stash;
- /** Usable portion of the GTT for GEM */
- dma_addr_t stolen_base; /* limited to low memory (32-bit) */
-
/**
* tmpfs instance used for shmem backed objects
*/
@@ -1580,6 +1156,8 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
+
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
@@ -1653,7 +1231,7 @@ struct i915_gpu_error {
*
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_gem_request_alloc(), this bit is checked and the sequence
+ * i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
*/
unsigned long flags;
@@ -1692,12 +1270,15 @@ enum modeset_restore {
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
#define DDC_PIN_C 0x04
#define DDC_PIN_D 0x06
struct ddi_vbt_port_info {
+ int max_tmds_clock;
+
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
* The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
@@ -1716,6 +1297,7 @@ struct ddi_vbt_port_info {
uint8_t dp_boost_level;
uint8_t hdmi_boost_level;
+ int dp_max_link_rate; /* 0 for not limited by VBT */
};
enum psr_lines_to_wait {
@@ -1784,6 +1366,7 @@ struct intel_vbt_data {
u32 size;
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
} dsi;
int crt_ddc_pin;
@@ -1895,6 +1478,7 @@ struct skl_wm_params {
uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum;
uint32_t linetime_us;
+ uint32_t dbuf_block_size;
};
/*
@@ -2227,7 +1811,8 @@ struct i915_oa_ops {
};
struct intel_cdclk_state {
- unsigned int cdclk, vco, ref;
+ unsigned int cdclk, vco, ref, bypass;
+ u8 voltage_level;
};
struct drm_i915_private {
@@ -2241,6 +1826,31 @@ struct drm_i915_private {
struct kmem_cache *priorities;
const struct intel_device_info info;
+ struct intel_driver_caps caps;
+
+ /**
+ * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
+ * end of stolen which we can optionally use to create GEM objects
+ * backed by stolen memory. Note that stolen_usable_size tells us
+ * exactly how much of this we are actually allowed to use, given that
+ * some portion of it is in fact reserved for use by hardware functions.
+ */
+ struct resource dsm;
+ /**
+ * Reseved portion of Data Stolen Memory
+ */
+ struct resource dsm_reserved;
+
+ /*
+ * Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
+ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
void __iomem *regs;
@@ -2281,7 +1891,8 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
- struct i915_vma *semaphore;
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -2339,6 +1950,7 @@ struct drm_i915_private {
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
+ unsigned int fdi_pll_freq;
unsigned int czclk_freq;
struct {
@@ -2418,6 +2030,8 @@ struct drm_i915_private {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -2489,6 +2103,7 @@ struct drm_i915_private {
*/
struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts;
u32 fdi_rx_config;
@@ -2504,7 +2119,7 @@ struct drm_i915_private {
u32 bxt_phy_grc;
u32 suspend_count;
- bool suspended_to_idle;
+ bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
@@ -2609,7 +2224,6 @@ struct drm_i915_private {
bool periodic;
int period_exponent;
- int timestamp_frequency;
struct i915_oa_config test_config;
@@ -2720,6 +2334,12 @@ struct drm_i915_private {
bool awake;
/**
+ * The number of times we have woken up.
+ */
+ unsigned int epoch;
+#define I915_EPOCH_INVALID 0
+
+ /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -2754,6 +2374,8 @@ struct drm_i915_private {
int irq;
} lpe_audio;
+ struct i915_pmu pmu;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -2809,18 +2431,17 @@ enum hdmi_force_audio {
*
* We have one bit per pipe and per scanout plane type.
*/
-#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
- (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
-#define INTEL_FRONTBUFFER_CURSOR(pipe) \
- (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
-#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
- (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
+})
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
- (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
- (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
/*
* Optimised SGL iterator for GEM objects
@@ -3000,6 +2621,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
+#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -3049,6 +2671,10 @@ intel_info(const struct drm_i915_private *dev_priv)
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
+#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
+ (dev_priv)->info.gt == 3)
+#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
+ (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -3109,6 +2735,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
+#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
@@ -3120,6 +2747,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BLT_RING ENGINE_MASK(BCS)
#define VEBOX_RING ENGINE_MASK(VECS)
#define BSD2_RING ENGINE_MASK(VCS2)
+#define BSD3_RING ENGINE_MASK(VCS3)
+#define BSD4_RING ENGINE_MASK(VCS4)
+#define VEBOX2_RING ENGINE_MASK(VECS2)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
@@ -3130,6 +2760,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
+
#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
@@ -3140,6 +2772,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_elsq)
+#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
+ ((dev_priv)->info.has_logical_ring_preemption)
+
+#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
+
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
@@ -3155,9 +2794,10 @@ intel_info(const struct drm_i915_private *dev_priv)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
-/* WaRsDisableCoarsePowerGating:skl,bxt */
+/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -3182,7 +2822,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
@@ -3191,8 +2831,10 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
+#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
@@ -3210,8 +2852,16 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+
+/* For now, anything with a GuC has also HuC */
+#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+/* Having a GuC is not the same as using a GuC */
+#define USES_GUC(dev_priv) intel_uc_is_using_guc()
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
+#define USES_HUC(dev_priv) intel_uc_is_using_huc()
+
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
@@ -3229,23 +2879,26 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_CNP_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@@ -3288,8 +2941,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
-bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
-
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3318,7 +2969,9 @@ extern int i915_reset_engine(struct intel_engine_cs *engine,
unsigned int flags);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
-extern int intel_guc_reset(struct drm_i915_private *dev_priv);
+extern int intel_reset_guc(struct drm_i915_private *dev_priv);
+extern int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3336,8 +2989,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(enum hpd_pin pin);
-enum hpd_pin intel_hpd_pin(enum port port);
+enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
+ enum hpd_pin pin);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3450,10 +3105,10 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -3497,6 +3152,9 @@ void i915_gem_free_object(struct drm_gem_object *obj);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
+ if (!atomic_read(&i915->mm.free_count))
+ return;
+
/* A single pass should suffice to release all the freed objects (along
* most call paths) , but be a little more paranoid in that freeing
* the objects does take a little amount of time, during which the rcu
@@ -3678,7 +3336,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -3693,11 +3351,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-
static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
@@ -3729,7 +3385,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
@@ -3738,7 +3394,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3768,7 +3424,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- const struct i915_ggtt_view *view);
+ const struct i915_ggtt_view *view,
+ unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
@@ -3855,6 +3512,8 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
+
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
@@ -3876,12 +3535,13 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size);
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
/* i915_gem_internal.c */
struct drm_i915_gem_object *
@@ -3889,7 +3549,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
@@ -3898,9 +3558,9 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10
-unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
-void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
+void i915_gem_shrinker_register(struct drm_i915_private *i915);
+void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
/* i915_gem_tiling.c */
@@ -4027,6 +3687,7 @@ extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin);
+extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
@@ -4040,6 +3701,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
@@ -4052,41 +3714,6 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
-
-/* intel_opregion.c */
-#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
-extern void intel_opregion_register(struct drm_i915_private *dev_priv);
-extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
-extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
- bool enable);
-extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
- pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-#else
-static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-}
-static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
-{
- return 0;
-}
-static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
-{
- return 0;
-}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
-{
- return -ENODEV;
-}
-#endif
-
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
@@ -4103,14 +3730,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
-const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump(struct drm_i915_private *dev_priv);
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
-extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
@@ -4140,7 +3762,12 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 val, int fast_timeout_us,
+ int slow_timeout_ms);
+#define sandybridge_pcode_write(dev_priv, mbox, val) \
+ sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
+
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
@@ -4177,8 +3804,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_count);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask);
uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
@@ -4187,24 +3813,39 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
bool reset);
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void chv_phy_release_cl2_override(struct intel_encoder *encoder);
-void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph);
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
-void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg);
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+
+static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+}
+
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -4372,9 +4013,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
static inline bool
-__i915_request_irq_complete(const struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 seqno;
/* Note that the engine may have wrapped around the seqno, and
@@ -4383,7 +4024,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return true;
/* The request was dequeued before we were awoken. We check after
@@ -4392,14 +4033,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request.
*/
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -4448,7 +4089,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
wake_up_process(b->irq_wait->tsk);
spin_unlock_irq(&b->irq_lock);
- if (__i915_gem_request_completed(req, seqno))
+ if (__i915_request_completed(rq, seqno))
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cfba89..7b5a9d7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -240,8 +240,8 @@ err_phys:
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
if (cpu_write_needs_clflush(obj))
obj->cache_dirty = true;
}
@@ -257,7 +257,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
obj->mm.dirty = false;
if (needs_clflush &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
@@ -353,7 +353,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
long timeout,
struct intel_rps_client *rps_client)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
@@ -366,10 +366,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
timeout);
rq = to_request(fence);
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
goto out;
- /* This client is about to stall waiting for the GPU. In many cases
+ /*
+ * This client is about to stall waiting for the GPU. In many cases
* this is undesirable and limits the throughput of the system, as
* many clients cannot continue processing user input/output whilst
* blocked. RPS autotuning may take tens of milliseconds to respond
@@ -384,18 +385,16 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps_client) {
+ if (rps_client && !i915_request_started(rq)) {
if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps_client);
- else
- rps_client = NULL;
}
- timeout = i915_wait_request(rq, flags, timeout);
+ timeout = i915_request_wait(rq, flags, timeout);
out:
- if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
- i915_gem_request_retire_upto(rq);
+ if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
+ i915_request_retire_upto(rq);
return timeout;
}
@@ -434,20 +433,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
dma_fence_put(shared[i]);
kfree(shared);
+ /*
+ * If both shared fences and an exclusive fence exist,
+ * then by construction the shared fences must be later
+ * than the exclusive fence. If we successfully wait for
+ * all the shared fences, we know that the exclusive fence
+ * must all be signaled. If all the shared fences are
+ * signaled, we can prune the array and recover the
+ * floating references on the fences/requests.
+ */
prune_fences = count && timeout >= 0;
} else {
excl = reservation_object_get_excl_rcu(resv);
}
- if (excl && timeout >= 0) {
+ if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout,
rps_client);
- prune_fences = timeout >= 0;
- }
dma_fence_put(excl);
- /* Oportunistically prune the fences iff we know they have *all* been
+ /*
+ * Opportunistically prune the fences iff we know they have *all* been
* signaled and that the reservation object has not been changed (i.e.
* no new fences have been added).
*/
@@ -464,7 +471,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
@@ -472,10 +479,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
rq = to_request(fence);
engine = rq->engine;
- if (!engine->schedule)
- return;
- engine->schedule(rq, prio);
+ rcu_read_lock();
+ if (engine->schedule)
+ engine->schedule(rq, prio);
+ rcu_read_unlock();
}
static void fence_set_priority(struct dma_fence *fence, int prio)
@@ -531,7 +539,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
- * @rps: client (user process) to charge for any waitboosting
+ * @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
@@ -666,17 +674,13 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (!(obj->base.write_domain & flush_domains))
- return;
-
- /* No actual flushing is required for the GTT write domain. Writes
- * to it "immediately" go to main memory as far as we know, so there's
- * no chipset flush. It also doesn't land in render cache.
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
@@ -687,22 +691,43 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
* timing. This issue has only been observed when switching quickly
* between GTT writes and CPU reads from inside the kernel on recent hw,
* and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour).
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
*/
+
wmb();
- switch (obj->base.write_domain) {
+ intel_runtime_pm_get(dev_priv);
+ spin_lock_irq(&dev_priv->uncore.lock);
+
+ POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (!HAS_LLC(dev_priv)) {
- intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
- }
+ i915_gem_flush_ggtt_writes(dev_priv);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+ for_each_ggtt_vma(vma, obj) {
+ if (vma->iomap)
+ continue;
+
+ i915_vma_unset_ggtt_write(vma);
+ }
break;
case I915_GEM_DOMAIN_CPU:
@@ -715,7 +740,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
break;
}
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
static inline int
@@ -815,7 +840,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!obj->cache_dirty &&
- !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = CLFLUSH_BEFORE;
out:
@@ -874,7 +899,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* Same trick applies to invalidate partially written
* cachelines read before writing.
*/
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush |= CLFLUSH_BEFORE;
}
@@ -1099,7 +1124,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_base += offset & PAGE_MASK;
}
- if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1307,7 +1332,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1549,10 +1574,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_is_active(vma))
continue;
@@ -1612,7 +1634,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
- /* Flush and acquire obj->pages so that we are coherent through
+ /*
+ * Proxy objects do not control access to the backing storage, ergo
+ * they cannot be used as a means to manipulate the cache domain
+ * tracking for that backing storage. The proxy object is always
+ * considered to be outside of any cache domain.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
@@ -1668,6 +1702,11 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * Proxy objects are barred from CPU access, so there is no
+ * need to ban sw_finish as it is a nop.
+ */
+
/* Pinned buffers may be scanout, so flush the cache */
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
@@ -1718,7 +1757,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
if (!obj->base.filp) {
i915_gem_object_put(obj);
- return -EINVAL;
+ return -ENXIO;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1936,9 +1975,9 @@ int i915_gem_fault(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->mappable);
+ &ggtt->iomap);
if (ret)
goto err_fence;
@@ -1948,6 +1987,8 @@ int i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
+ i915_vma_set_ggtt_write(vma);
+
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
@@ -2012,12 +2053,8 @@ static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj)
i915_vma_unset_userfault(vma);
- }
}
/**
@@ -2363,8 +2400,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -2567,7 +2604,7 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
@@ -2662,7 +2699,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+ if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ return ERR_PTR(-ENXIO);
ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
@@ -2794,24 +2832,23 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static bool ban_context(const struct i915_gem_context *ctx,
- unsigned int score)
-{
- return (i915_gem_context_is_bannable(ctx) &&
- score >= CONTEXT_SCORE_BAN_THRESHOLD);
-}
-
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
- unsigned int score;
bool banned;
atomic_inc(&ctx->guilty_count);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = ban_context(ctx, score);
- DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
- ctx->name, score, yesno(banned));
+ banned = false;
+ if (i915_gem_context_is_bannable(ctx)) {
+ unsigned int score;
+
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY,
+ &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+
+ DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
+ ctx->name, score, yesno(banned));
+ }
if (!banned)
return;
@@ -2828,10 +2865,10 @@ static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
atomic_inc(&ctx->active_count);
}
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *active = NULL;
+ struct i915_request *request, *active = NULL;
unsigned long flags;
/* We are called by the error capture and reset at a random
@@ -2844,8 +2881,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) {
- if (__i915_gem_request_completed(request,
- request->global_seqno))
+ if (__i915_request_completed(request, request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
@@ -2878,10 +2914,10 @@ static bool engine_stalled(struct intel_engine_cs *engine)
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
-struct drm_i915_gem_request *
+struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request = NULL;
+ struct i915_request *request = NULL;
/*
* During the reset sequence, we must prevent the engine from
@@ -2908,13 +2944,31 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
- * to a second via its engine->irq_tasklet *just* as we are
+ * to a second via its execlists->tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP.
- * Turning off the engine->irq_tasklet until the reset is over
+ * Turning off the execlists->tasklet until the reset is over
* prevents the race.
+ *
+ * Note that this needs to be a single atomic operation on the
+ * tasklet (flush existing tasks, prevent new tasks) to prevent
+ * a race between reset and set-wedged. It is not, so we do the best
+ * we can atm and make sure we don't lock the machine up in the more
+ * common case of recursively being called from set-wedged from inside
+ * i915_reset.
+ */
+ if (!atomic_read(&engine->execlists.tasklet.count))
+ tasklet_kill(&engine->execlists.tasklet);
+ tasklet_disable(&engine->execlists.tasklet);
+
+ /*
+ * We're using worker to queue preemption requests from the tasklet in
+ * GuC submission mode.
+ * Even though tasklet was disabled, we may still have a worker queued.
+ * Let's make sure that all workers scheduled before disabling the
+ * tasklet are completed before continuing with the reset.
*/
- tasklet_kill(&engine->execlists.irq_tasklet);
- tasklet_disable(&engine->execlists.irq_tasklet);
+ if (engine->i915->guc.preempt_wq)
+ flush_workqueue(engine->i915->guc.preempt_wq);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2929,7 +2983,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
enum intel_engine_id id;
int err = 0;
@@ -2948,7 +3002,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
return err;
}
-static void skip_request(struct drm_i915_gem_request *request)
+static void skip_request(struct i915_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
@@ -2967,7 +3021,7 @@ static void skip_request(struct drm_i915_gem_request *request)
dma_fence_set_error(&request->fence, -EIO);
}
-static void engine_skip_context(struct drm_i915_gem_request *request)
+static void engine_skip_context(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_context *hung_ctx = request->ctx;
@@ -2991,9 +3045,9 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
}
/* Returns the request if it was guilty of the hang */
-static struct drm_i915_gem_request *
+static struct i915_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* The guilty request will get skipped on a hung engine.
*
@@ -3047,9 +3101,14 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
}
void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
- engine->irq_posted = 0;
+ /*
+ * Make sure this write is visible before we re-enable the interrupt
+ * handlers on another CPU, as tasklet_enable() resolves to just
+ * a compiler barrier which is insufficient for our purpose here.
+ */
+ smp_store_mb(engine->irq_posted, 0);
if (request)
request = i915_gem_reset_request(engine, request);
@@ -3070,7 +3129,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
@@ -3079,6 +3138,25 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ *
+ * More mysteriously, if we leave the engine idle after a reset,
+ * the next userspace batch may hang, with what appears to be
+ * an incoherent read by the CS (presumably stale TLB). An
+ * empty request appears sufficient to paper over the glitch.
+ */
+ if (intel_engine_is_idle(engine)) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine,
+ dev_priv->kernel_context);
+ if (!IS_ERR(rq))
+ __i915_request_add(rq, false);
+ }
}
i915_gem_restore_fences(dev_priv);
@@ -3093,7 +3171,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->execlists.irq_tasklet);
+ tasklet_enable(&engine->execlists.tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
@@ -3112,21 +3190,21 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
}
}
-static void nop_submit_request(struct drm_i915_gem_request *request)
+static void nop_submit_request(struct i915_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
}
-static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+static void nop_complete_submit_request(struct i915_request *request)
{
unsigned long flags;
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
@@ -3136,13 +3214,28 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+
+ set_bit(I915_WEDGED, &i915->gpu_error.flags);
+ smp_mb__after_atomic();
+
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ i915_gem_reset_prepare_engine(engine);
+
engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
+ }
+ i915->caps.scheduler = 0;
/*
* Make sure no one is running the old callback before we proceed with
@@ -3172,7 +3265,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
unsigned long flags;
- /* Mark all pending requests as complete so that any concurrent
+ /*
+ * Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
*/
@@ -3180,9 +3274,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_init_global_seqno(engine,
intel_engine_last_submit(engine));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ i915_gem_reset_finish_engine(engine);
}
- set_bit(I915_WEDGED, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
@@ -3206,7 +3301,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
*/
list_for_each_entry(tl, &i915->gt.timelines, link) {
for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = i915_gem_active_peek(&tl->engine[i].last_request,
&i915->drm.struct_mutex);
@@ -3255,28 +3350,93 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (mutex_trylock(&dev->struct_mutex)) {
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
- /* Keep the retire handler running until we are finally idle.
+ /*
+ * Keep the retire handler running until we are finally idle.
* We do not need to do this test under locking as in the worst-case
* we queue the retire worker once too often.
*/
- if (READ_ONCE(dev_priv->gt.awake)) {
- i915_queue_hangcheck(dev_priv);
+ if (READ_ONCE(dev_priv->gt.awake))
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
+}
+
+static void shrink_caches(struct drm_i915_private *i915)
+{
+ /*
+ * kmem_cache_shrink() discards empty slabs and reorders partially
+ * filled slabs to prioritise allocating from the mostly full slabs,
+ * with the aim of reducing fragmentation.
+ */
+ kmem_cache_shrink(i915->priorities);
+ kmem_cache_shrink(i915->dependencies);
+ kmem_cache_shrink(i915->requests);
+ kmem_cache_shrink(i915->luts);
+ kmem_cache_shrink(i915->vmas);
+ kmem_cache_shrink(i915->objects);
+}
+
+struct sleep_rcu_work {
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+ struct drm_i915_private *i915;
+ unsigned int epoch;
+};
+
+static inline bool
+same_epoch(struct drm_i915_private *i915, unsigned int epoch)
+{
+ /*
+ * There is a small chance that the epoch wrapped since we started
+ * sleeping. If we assume that epoch is at least a u32, then it will
+ * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+ */
+ return epoch == READ_ONCE(i915->gt.epoch);
+}
+
+static void __sleep_work(struct work_struct *work)
+{
+ struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
+ struct drm_i915_private *i915 = s->i915;
+ unsigned int epoch = s->epoch;
+
+ kfree(s);
+ if (same_epoch(i915, epoch))
+ shrink_caches(i915);
+}
+
+static void __sleep_rcu(struct rcu_head *rcu)
+{
+ struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
+ struct drm_i915_private *i915 = s->i915;
+
+ if (same_epoch(i915, s->epoch)) {
+ INIT_WORK(&s->work, __sleep_work);
+ queue_work(i915->wq, &s->work);
+ } else {
+ kfree(s);
}
}
+static inline bool
+new_requests_since_last_retire(const struct drm_i915_private *i915)
+{
+ return (READ_ONCE(i915->gt.active_requests) ||
+ work_pending(&i915->gt.idle_work.work));
+}
+
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
- struct drm_device *dev = &dev_priv->drm;
+ unsigned int epoch = I915_EPOCH_INVALID;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@@ -3284,16 +3444,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
/*
* Wait for last execlists context complete, but bail out in case a
- * new request is submitted.
+ * new request is submitted. As we don't trust the hardware, we
+ * continue on if the wait times out. This is necessary to allow
+ * the machine to suspend even if the hardware dies, and we will
+ * try to recover in resume (after depriving the hardware of power,
+ * it may be in a better mmod).
*/
- wait_for(intel_engines_are_idle(dev_priv), 10);
- if (READ_ONCE(dev_priv->gt.active_requests))
- return;
+ __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
+ intel_engines_are_idle(dev_priv),
+ I915_IDLE_ENGINES_TIMEOUT * 1000,
+ 10, 500);
rearm_hangcheck =
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
/* Currently busy, come back later */
mod_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
@@ -3305,33 +3470,64 @@ i915_gem_idle_work_handler(struct work_struct *work)
* New request retired after this work handler started, extend active
* period until next instance of the work.
*/
- if (work_pending(work))
+ if (new_requests_since_last_retire(dev_priv))
goto out_unlock;
- if (dev_priv->gt.active_requests)
- goto out_unlock;
+ /*
+ * Be paranoid and flush a concurrent interrupt to make sure
+ * we don't reactivate any irq tasklets after parking.
+ *
+ * FIXME: Note that even though we have waited for execlists to be idle,
+ * there may still be an in-flight interrupt even though the CSB
+ * is now empty. synchronize_irq() makes sure that a residual interrupt
+ * is completed before we continue, but it doesn't prevent the HW from
+ * raising a spurious interrupt later. To complete the shield we should
+ * coordinate disabling the CS irq with flushing the interrupts.
+ */
+ synchronize_irq(dev_priv->drm.irq);
- if (wait_for(intel_engines_are_idle(dev_priv), 10))
- DRM_ERROR("Timeout waiting for engines to idle\n");
+ intel_engines_park(dev_priv);
+ i915_gem_timelines_park(dev_priv);
- intel_engines_mark_idle(dev_priv);
- i915_gem_timelines_mark_idle(dev_priv);
+ i915_pmu_gt_parked(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
+ epoch = dev_priv->gt.epoch;
+ GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
rearm_hangcheck = false;
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
intel_runtime_pm_put(dev_priv);
out_unlock:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
GEM_BUG_ON(!dev_priv->gt.awake);
i915_queue_hangcheck(dev_priv);
}
+
+ /*
+ * When we are idle, it is an opportune time to reap our caches.
+ * However, we have many objects that utilise RCU and the ordered
+ * i915->wq that this work is executing on. To try and flush any
+ * pending frees now we are idle, we first wait for an RCU grace
+ * period, and then queue a task (that will run last on the wq) to
+ * shrink and re-optimize the caches.
+ */
+ if (same_epoch(dev_priv, epoch)) {
+ struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s) {
+ s->i915 = dev_priv;
+ s->epoch = epoch;
+ call_rcu(&s->rcu, __sleep_rcu);
+ }
+ }
}
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
@@ -3467,8 +3663,19 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
static int wait_for_engines(struct drm_i915_private *i915)
{
- if (wait_for(intel_engines_are_idle(i915), 50)) {
- DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+ if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ }
+
i915_gem_set_wedged(i915);
return -EIO;
}
@@ -3494,9 +3701,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
-
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
+ i915_retire_requests(i915);
ret = wait_for_engines(i915);
} else {
@@ -3515,7 +3720,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
if (obj->cache_dirty)
i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
}
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
@@ -3552,7 +3757,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3573,17 +3778,17 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* WC domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_WC;
- obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
obj->mm.dirty = true;
}
@@ -3615,7 +3820,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
return 0;
/* Flush and acquire obj->pages so that we are coherent through
@@ -3636,17 +3841,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
}
@@ -3695,7 +3900,8 @@ restart:
return -EBUSY;
}
- if (i915_gem_valid_gtt_space(vma, cache_level))
+ if (!i915_vma_is_closed(vma) &&
+ i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);
@@ -3748,7 +3954,7 @@ restart:
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ for_each_ggtt_vma(vma, obj) {
ret = i915_vma_put_fence(vma);
if (ret)
return ret;
@@ -3850,6 +4056,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * The caching mode of proxy object is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
if (obj->cache_level == level)
goto out;
@@ -3880,7 +4095,8 @@ out:
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- const struct i915_ggtt_view *view)
+ const struct i915_ggtt_view *view,
+ unsigned int flags)
{
struct i915_vma *vma;
int ret;
@@ -3917,25 +4133,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
- if (!view || view->type == I915_GGTT_VIEW_NORMAL)
+ if ((flags & PIN_MAPPABLE) == 0 &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- PIN_MAPPABLE | PIN_NONBLOCK);
- if (IS_ERR(vma)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned int flags;
-
- /* Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- flags = 0;
- if (HAS_GMCH_DISPLAY(i915))
- flags = PIN_MAPPABLE;
+ flags |
+ PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- }
if (IS_ERR(vma))
goto err_unpin_global;
@@ -3948,7 +4153,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
@@ -4001,15 +4206,15 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
@@ -4036,7 +4241,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct drm_i915_gem_request *request, *target = NULL;
+ struct i915_request *request, *target = NULL;
long ret;
/* ABI: return -EIO if already wedged */
@@ -4056,16 +4261,16 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
if (target)
- i915_gem_request_get(target);
+ i915_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
- ret = i915_wait_request(target,
+ ret = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(target);
+ i915_request_put(target);
return ret < 0 ? ret : 0;
}
@@ -4084,7 +4289,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- if (!view && flags & PIN_MAPPABLE) {
+ if (flags & PIN_MAPPABLE &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -4178,7 +4384,7 @@ static __always_inline unsigned int
__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
/* We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
@@ -4191,8 +4397,8 @@ __busy_set_if_active(const struct dma_fence *fence,
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct drm_i915_gem_request, fence);
- if (i915_gem_request_completed(rq))
+ rq = container_of(fence, struct i915_request, fence);
+ if (i915_request_completed(rq))
return 0;
return flag(rq->engine->uabi_id);
@@ -4337,8 +4543,7 @@ out:
}
static void
-frontbuffer_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
@@ -4445,8 +4650,8 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
i915_gem_object_init(obj, &i915_gem_object_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
@@ -4560,6 +4765,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17);
i915_gem_object_free(obj);
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+
if (on)
cond_resched();
}
@@ -4590,7 +4798,8 @@ static void __i915_gem_free_work(struct work_struct *work)
container_of(work, struct drm_i915_private, mm.free_work);
struct llist_node *freed;
- /* All file-owned VMA should have been released by this point through
+ /*
+ * All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
* However, the object may also be bound into the global GTT (e.g.
* older GPUs without per-process support, or for direct access through
@@ -4617,13 +4826,18 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- /* We can't simply use call_rcu() from i915_gem_free_object()
- * as we need to block whilst unbinding, and the call_rcu
- * task may be called from softirq context. So we take a
- * detour through a worker.
+ /*
+ * Since we require blocking on struct_mutex to unbind the freed
+ * object from the GPU before releasing resources back to the
+ * system, we can not do that directly from the RCU callback (which may
+ * be a softirq context), but must instead then defer that work onto a
+ * kthread. We use the RCU callback rather than move the freed object
+ * directly onto the work queue so that we can mix between using the
+ * worker and performing frees directly from subsequent allocations for
+ * crude but effective memory throttling.
*/
if (llist_add(&obj->freed, &i915->mm.free_list))
- schedule_work(&i915->mm.free_work);
+ queue_work(i915->wq, &i915->mm.free_work);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -4636,11 +4850,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (discard_backing_storage(obj))
obj->mm.madv = I915_MADV_DONTNEED;
- /* Before we free the object, make sure any pure RCU-only
+ /*
+ * Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
* i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu().
*/
+ atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
@@ -4655,14 +4871,16 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
-static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
+ struct i915_gem_context *kernel_context = i915->kernel_context;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id)
- GEM_BUG_ON(engine->last_retired_context &&
- !i915_gem_context_is_kernel(engine->last_retired_context));
+ for_each_engine(engine, i915, id) {
+ GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
+ GEM_BUG_ON(engine->last_retired_context != kernel_context);
+ }
}
void i915_gem_sanitize(struct drm_i915_private *i915)
@@ -4681,10 +4899,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- if (INTEL_GEN(i915) >= 5) {
- int reset = intel_gpu_reset(i915, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
+ if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
+ WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
}
int i915_gem_suspend(struct drm_i915_private *dev_priv)
@@ -4721,7 +4937,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
- intel_guc_suspend(dev_priv);
+ intel_uc_suspend(dev_priv);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
@@ -4768,23 +4984,43 @@ err_unlock:
return ret;
}
-void i915_gem_resume(struct drm_i915_private *dev_priv)
+void i915_gem_resume(struct drm_i915_private *i915)
{
- struct drm_device *dev = &dev_priv->drm;
+ WARN_ON(i915->gt.awake);
- WARN_ON(dev_priv->gt.awake);
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(i915);
- /* As we didn't flush the kernel context before suspend, we cannot
+ /*
+ * As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- dev_priv->gt.resume(dev_priv);
+ i915->gt.resume(i915);
- mutex_unlock(&dev->struct_mutex);
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
+ intel_uc_resume(i915);
+
+ /* Always reload a context for powersaving. */
+ if (i915_gem_switch_to_kernel_context(i915))
+ goto err_wedged;
+
+out_unlock:
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return;
+
+err_wedged:
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ i915_gem_set_wedged(i915);
+ }
+ goto out_unlock;
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
@@ -4844,8 +5080,11 @@ static int __i915_gem_restart_engines(void *data)
for_each_engine(engine, i915, id) {
err = engine->init_hw(engine);
- if (err)
+ if (err) {
+ DRM_ERROR("Failed to restart %s (%d)\n",
+ engine->name, err);
return err;
+ }
}
return 0;
@@ -4897,44 +5136,138 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
- DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
- /* Need to do basic initialisation of all rings first: */
- ret = __i915_gem_restart_engines(dev_priv);
- if (ret)
- goto out;
-
- intel_mocs_init_l3cc_table(dev_priv);
-
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Enabling uc failed (%d)\n", ret);
goto out;
+ }
+
+ intel_mocs_init_l3cc_table(dev_priv);
+ /* Only when the HW is re-initialised, can we replay the requests */
+ ret = __i915_gem_restart_engines(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
- if (INTEL_INFO(dev_priv)->gen < 6)
- return false;
+ struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- /* TODO: make semaphores and Execlists play nicely together */
- if (i915_modparams.enable_execlists)
- return false;
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
- if (value >= 0)
- return value;
+ ctx = i915_gem_context_create_kernel(i915, 0);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- /* Enable semaphores on SNB when IO remapping is off */
- if (IS_GEN6(dev_priv) && intel_vtd_active())
- return false;
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
- return true;
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ctx;
+ }
+
+ err = 0;
+ if (engine->init_context)
+ err = engine->init_context(rq);
+
+ __i915_request_add(rq, true);
+ if (err)
+ goto err_active;
+ }
+
+ err = i915_gem_switch_to_kernel_context(i915);
+ if (err)
+ goto err_active;
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err)
+ goto err_active;
+
+ assert_kernel_context_is_current(i915);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_vma *state;
+
+ state = ctx->engine[id].state;
+ if (!state)
+ continue;
+
+ /*
+ * As we will hold a reference to the logical state, it will
+ * not be torn down with the context, and importantly the
+ * object will hold onto its vma (making it possible for a
+ * stray GTT write to corrupt our defaults). Unmap the vma
+ * from the GTT to prevent such accidents and reclaim the
+ * space.
+ */
+ err = i915_vma_unbind(state);
+ if (err)
+ goto err_active;
+
+ err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ if (err)
+ goto err_active;
+
+ engine->default_state = i915_gem_object_get(state->obj);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ unsigned int found = intel_engines_has_context_isolation(i915);
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ for_each_engine(engine, i915, id) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((found & bit) != expected) {
+ DRM_ERROR("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ }
+ }
+ }
+
+out_ctx:
+ i915_gem_context_set_closed(ctx);
+ i915_gem_context_put(ctx);
+ return err;
+
+err_active:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. First try to flush any remaining
+ * request, ensure we are pointing at the kernel context and
+ * then remove it.
+ */
+ if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
+ goto out_ctx;
+
+ if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+ goto out_ctx;
+
+ i915_gem_contexts_lost(i915);
+ goto out_ctx;
}
int i915_gem_init(struct drm_i915_private *dev_priv)
@@ -4952,18 +5285,22 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915_modparams.enable_execlists) {
- dev_priv->gt.resume = intel_legacy_submission_resume;
- dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- } else {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+ } else {
+ dev_priv->gt.resume = intel_legacy_submission_resume;
+ dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
+ ret = intel_uc_init_misc(dev_priv);
+ if (ret)
+ return ret;
+
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
@@ -4974,20 +5311,96 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_unlock;
+ }
ret = i915_gem_contexts_init(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_ggtt;
+ }
ret = intel_engines_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_context;
+ }
+
+ intel_init_gt_powersave(dev_priv);
+
+ ret = intel_uc_init(dev_priv);
if (ret)
- goto out_unlock;
+ goto err_pm;
ret = i915_gem_init_hw(dev_priv);
+ if (ret)
+ goto err_uc_init;
+
+ /*
+ * Despite its name intel_init_clock_gating applies both display
+ * clock gating workarounds; GT mmio workarounds and the occasional
+ * GT power context workaround. Worse, sometimes it includes a context
+ * register workaround which we need to apply before we record the
+ * default HW state for all contexts.
+ *
+ * FIXME: break up the workarounds and apply them at the right time!
+ */
+ intel_init_clock_gating(dev_priv);
+
+ ret = __intel_engines_record_defaults(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
+ if (i915_inject_load_failure()) {
+ ret = -ENODEV;
+ goto err_init_hw;
+ }
+
+ if (i915_inject_load_failure()) {
+ ret = -EIO;
+ goto err_init_hw;
+ }
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return 0;
+
+ /*
+ * Unwinding is complicated by that we want to handle -EIO to mean
+ * disable GPU submission but keep KMS alive. We want to mark the
+ * HW as irrevisibly wedged, but keep enough state around that the
+ * driver doesn't explode during runtime.
+ */
+err_init_hw:
+ i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ i915_gem_contexts_lost(dev_priv);
+ intel_uc_fini_hw(dev_priv);
+err_uc_init:
+ intel_uc_fini(dev_priv);
+err_pm:
+ if (ret != -EIO) {
+ intel_cleanup_gt_powersave(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ }
+err_context:
+ if (ret != -EIO)
+ i915_gem_contexts_fini(dev_priv);
+err_ggtt:
+err_unlock:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_uc_fini_misc(dev_priv);
+
+ if (ret != -EIO)
+ i915_gem_cleanup_userptr(dev_priv);
+
if (ret == -EIO) {
- /* Allow engine initialisation to fail by marking the GPU as
+ /*
+ * Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
@@ -4998,10 +5411,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = 0;
}
-out_unlock:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
+ i915_gem_drain_freed_objects(dev_priv);
return ret;
}
@@ -5025,10 +5435,10 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
int i;
- if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev_priv)->gen >= 4 ||
+ else if (INTEL_GEN(dev_priv) >= 4 ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
@@ -5052,6 +5462,22 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
i915_gem_detect_bit_6_swizzle(dev_priv);
}
+static void i915_gem_init__mm(struct drm_i915_private *i915)
+{
+ spin_lock_init(&i915->mm.object_stat_lock);
+ spin_lock_init(&i915->mm.obj_lock);
+ spin_lock_init(&i915->mm.free_lock);
+
+ init_llist_head(&i915->mm.free_list);
+
+ INIT_LIST_HEAD(&i915->mm.unbound_list);
+ INIT_LIST_HEAD(&i915->mm.bound_list);
+ INIT_LIST_HEAD(&i915->mm.fence_list);
+ INIT_LIST_HEAD(&i915->mm.userfault_list);
+
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+}
+
int
i915_gem_load_init(struct drm_i915_private *dev_priv)
{
@@ -5069,7 +5495,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->luts)
goto err_vmas;
- dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
+ dev_priv->requests = KMEM_CACHE(i915_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
@@ -5093,15 +5519,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_priorities;
- INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
-
- spin_lock_init(&dev_priv->mm.obj_lock);
- spin_lock_init(&dev_priv->mm.free_lock);
- init_llist_head(&dev_priv->mm.free_list);
- INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
- INIT_LIST_HEAD(&dev_priv->mm.bound_list);
- INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+ i915_gem_init__mm(dev_priv);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
@@ -5139,7 +5557,8 @@ err_out:
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
+ GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5209,7 +5628,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
@@ -5295,7 +5714,7 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
if (IS_ERR(obj))
return obj;
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
offset = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index ee54597..f54c4ff 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,7 +28,14 @@
#include <linux/bug.h>
#ifdef CONFIG_DRM_I915_DEBUG_GEM
-#define GEM_BUG_ON(expr) BUG_ON(expr)
+#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
+ pr_err("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
+ BUG(); \
+ } \
+ } while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
#define GEM_DEBUG_DECL(var) var
@@ -44,6 +51,12 @@
#define GEM_DEBUG_BUG_ON(expr)
#endif
-#define I915_NUM_ENGINES 5
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
+#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#else
+#define GEM_TRACE(...) do { } while (0)
+#endif
+
+#define I915_NUM_ENGINES 8
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index c93005c..d3cbe84 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (!reservation_object_test_signaled_rcu(resv, true))
break;
- i915_gem_retire_requests(pool->engine->i915);
+ i915_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj));
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index f663cd9..f5c570d 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -167,7 +167,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
true, I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &clflush->dma);
@@ -177,7 +177,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} else if (obj->mm.pages) {
__i915_do_clflush(obj);
} else {
- GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
obj->cache_dirty = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f782cf2..f2cbea7 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -211,17 +211,23 @@ static void context_close(struct i915_gem_context *ctx)
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
+ unsigned int max;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ max = GEN11_MAX_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ 0, max, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ 0, max, GFP_KERNEL);
if (ret < 0)
return ret;
}
@@ -316,7 +322,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
+ if (USES_GUC(dev_priv))
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -338,11 +344,6 @@ static void __destroy_hw_context(struct i915_gem_context *ctx,
context_close(ctx);
}
-/**
- * The default context needs to exist per ring that uses contexts. It stores the
- * context state of the GPU for applications that don't utilize HW contexts, as
- * well as an idle case.
- */
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@@ -409,7 +410,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(to_i915(dev)))
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -418,8 +419,8 @@ out:
return ctx;
}
-static struct i915_gem_context *
-create_kernel_context(struct drm_i915_private *i915, int prio)
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
@@ -449,35 +450,33 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
i915_gem_context_free(ctx);
}
+static bool needs_preempt_context(struct drm_i915_private *i915)
+{
+ return HAS_LOGICAL_RING_PREEMPTION(i915);
+}
+
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int err;
+ /* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(dev_priv->preempt_context);
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
- if (intel_vgpu_active(dev_priv) &&
- HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (!i915_modparams.enable_execlists) {
- DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
- return -EINVAL;
- }
- }
-
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
/* lowest priority; idle task */
- ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
- err = PTR_ERR(ctx);
- goto err;
+ return PTR_ERR(ctx);
}
/*
* For easy recognisablity, we want the kernel context to be 0 and then
@@ -487,23 +486,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
- ctx = create_kernel_context(dev_priv, INT_MAX);
- if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default preempt context\n");
- err = PTR_ERR(ctx);
- goto err_kernel_context;
+ if (needs_preempt_context(dev_priv)) {
+ ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
+ if (!IS_ERR(ctx))
+ dev_priv->preempt_context = ctx;
+ else
+ DRM_ERROR("Failed to create preempt context; disabling preemption\n");
}
- dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" :
"fake");
return 0;
-
-err_kernel_context:
- destroy_kernel_context(&dev_priv->kernel_context);
-err:
- return err;
}
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -515,6 +509,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
engine->legacy_active_context = NULL;
+ engine->legacy_active_ppgtt = NULL;
if (!engine->last_retired_context)
continue;
@@ -522,35 +517,14 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = NULL;
}
-
- /* Force the GPU state to be restored on enabling */
- if (!i915_modparams.enable_execlists) {
- struct i915_gem_context *ctx;
-
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- if (!i915_gem_context_is_default(ctx))
- continue;
-
- for_each_engine(engine, dev_priv, id)
- ctx->engine[engine->id].initialised = false;
-
- ctx->remap_slice = ALL_L3_SLICES(dev_priv);
- }
-
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *kce =
- &dev_priv->kernel_context->engine[engine->id];
-
- kce->initialised = true;
- }
- }
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
- destroy_kernel_context(&i915->preempt_context);
+ if (i915->preempt_context)
+ destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
@@ -596,310 +570,7 @@ void i915_gem_context_close(struct drm_file *file)
idr_destroy(&file_priv->context_idr);
}
-static inline int
-mi_set_context(struct drm_i915_gem_request *req, u32 flags)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *engine = req->engine;
- enum intel_engine_id id;
- const int num_rings =
- /* Use an extended w/a on gen7 if signalling from other rings */
- (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
- INTEL_INFO(dev_priv)->num_rings - 1 :
- 0;
- int len;
- u32 *cs;
-
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
- len = 4;
- if (INTEL_GEN(dev_priv) >= 7)
- len += 2 + (num_rings ? 4*num_rings + 6 : 0);
-
- cs = intel_ring_begin(req, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_GEN(dev_priv) >= 7) {
- *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- if (num_rings) {
- struct intel_engine_cs *signaller;
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(signaller, dev_priv, id) {
- if (signaller == engine)
- continue;
-
- *cs++ = i915_mmio_reg_offset(
- RING_PSMI_CTL(signaller->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
- }
- }
-
- *cs++ = MI_NOOP;
- *cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
- /*
- * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
- * WaMiSetContext_Hang:snb,ivb,vlv
- */
- *cs++ = MI_NOOP;
-
- if (INTEL_GEN(dev_priv) >= 7) {
- if (num_rings) {
- struct intel_engine_cs *signaller;
- i915_reg_t last_reg = {}; /* keep gcc quiet */
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(signaller, dev_priv, id) {
- if (signaller == engine)
- continue;
-
- last_reg = RING_PSMI_CTL(signaller->mmio_base);
- *cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = _MASKED_BIT_DISABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
-
- /* Insert a delay before the next switch! */
- *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- }
-
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
-static int remap_l3(struct drm_i915_gem_request *req, int slice)
-{
- u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
- int i;
-
- if (!remap_info)
- return 0;
-
- cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
- for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
- *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
- *cs++ = remap_info[i];
- }
- *cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
-static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *engine,
- struct i915_gem_context *to)
-{
- if (to->remap_slice)
- return false;
-
- if (!to->engine[RCS].initialised)
- return false;
-
- if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
- return false;
-
- return to == engine->legacy_active_context;
-}
-
-static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
-{
- struct i915_gem_context *from = engine->legacy_active_context;
-
- if (!ppgtt)
- return false;
-
- /* Always load the ppgtt on first use */
- if (!from)
- return true;
-
- /* Same context without new entries, skip */
- if ((!from->ppgtt || from->ppgtt == ppgtt) &&
- !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
- return false;
-
- if (engine->id != RCS)
- return true;
-
- if (INTEL_GEN(engine->i915) < 8)
- return true;
-
- return false;
-}
-
-static bool
-needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
- struct i915_gem_context *to,
- u32 hw_flags)
-{
- if (!ppgtt)
- return false;
-
- if (!IS_GEN8(to->i915))
- return false;
-
- if (hw_flags & MI_RESTORE_INHIBIT)
- return true;
-
- return false;
-}
-
-static int do_rcs_switch(struct drm_i915_gem_request *req)
-{
- struct i915_gem_context *to = req->ctx;
- struct intel_engine_cs *engine = req->engine;
- struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_gem_context *from = engine->legacy_active_context;
- u32 hw_flags;
- int ret, i;
-
- GEM_BUG_ON(engine->id != RCS);
-
- if (skip_rcs_switch(ppgtt, engine, to))
- return 0;
-
- if (needs_pd_load_pre(ppgtt, engine)) {
- /* Older GENs and non render rings still want the load first,
- * "PP_DCLV followed by PP_DIR_BASE register through Load
- * Register Immediate commands in Ring Buffer before submitting
- * a context."*/
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- if (ret)
- return ret;
- }
-
- if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
- /* NB: If we inhibit the restore, the context is not allowed to
- * die because future work may end up depending on valid address
- * space. This means we must enforce that a page table load
- * occur when this occurs. */
- hw_flags = MI_RESTORE_INHIBIT;
- else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
- hw_flags = MI_FORCE_RESTORE;
- else
- hw_flags = 0;
-
- if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
- ret = mi_set_context(req, hw_flags);
- if (ret)
- return ret;
-
- engine->legacy_active_context = to;
- }
-
- /* GEN8 does *not* require an explicit reload if the PDPs have been
- * setup, and we do not wish to move them.
- */
- if (needs_pd_load_post(ppgtt, to, hw_flags)) {
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- /* The hardware context switch is emitted, but we haven't
- * actually changed the state - so it's probably safe to bail
- * here. Still, let the user know something dangerous has
- * happened.
- */
- if (ret)
- return ret;
- }
-
- if (ppgtt)
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to->remap_slice & (1<<i)))
- continue;
-
- ret = remap_l3(req, i);
- if (ret)
- return ret;
-
- to->remap_slice &= ~(1<<i);
- }
-
- if (!to->engine[RCS].initialised) {
- if (engine->init_context) {
- ret = engine->init_context(req);
- if (ret)
- return ret;
- }
- to->engine[RCS].initialised = true;
- }
-
- return 0;
-}
-
-/**
- * i915_switch_context() - perform a GPU context switch.
- * @req: request for which we'll execute the context switch
- *
- * The context life cycle is simple. The context refcount is incremented and
- * decremented by 1 and create and destroy. If the context is in use by the GPU,
- * it will have a refcount > 1. This allows us to destroy the context abstract
- * object while letting the normal object tracking destroy the backing BO.
- *
- * This function should not be used in execlists mode. Instead the context is
- * switched by writing to the ELSP and requests keep a reference to their
- * context.
- */
-int i915_switch_context(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
-
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (i915_modparams.enable_execlists)
- return 0;
-
- if (!req->ctx->engine[engine->id].state) {
- struct i915_gem_context *to = req->ctx;
- struct i915_hw_ppgtt *ppgtt =
- to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
-
- if (needs_pd_load_pre(ppgtt, engine)) {
- int ret;
-
- trace_switch_mm(engine, to);
- ret = ppgtt->switch_mm(ppgtt, req);
- if (ret)
- return ret;
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- }
-
- engine->legacy_active_context = to;
- return 0;
- }
-
- return do_rcs_switch(req);
-}
-
-static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{
struct i915_gem_timeline *timeline;
@@ -915,8 +586,7 @@ static bool engine_has_kernel_context(struct intel_engine_cs *engine)
return false;
}
- return (!engine->last_retired_context ||
- i915_gem_context_is_kernel(engine->last_retired_context));
+ return intel_engine_has_kernel_context(engine);
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
@@ -927,37 +597,40 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *req;
- int ret;
+ struct i915_request *rq;
- if (engine_has_kernel_context(engine))
+ if (engine_has_idle_kernel_context(engine))
continue;
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
}
- ret = i915_switch_context(req);
- i915_add_request(req);
- if (ret)
- return ret;
+ /*
+ * Force a flush after the switch to ensure that all rendering
+ * and operations prior to switching to the kernel context hits
+ * memory. This should be guaranteed by the previous request,
+ * but an extra layer of paranoia before we declare the system
+ * idle (on suspend etc) is advisable!
+ */
+ __i915_request_add(rq, true);
}
return 0;
@@ -1133,11 +806,11 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_PRIORITY:
{
- int priority = args->value;
+ s64 priority = args->value;
if (args->size)
ret = -EINVAL;
- else if (!to_i915(dev)->engine[RCS]->schedule)
+ else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 44688e2..7854262 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -29,6 +29,8 @@
#include <linux/list.h>
#include <linux/radix-tree.h>
+#include "i915_gem.h"
+
struct pid;
struct drm_device;
@@ -37,6 +39,7 @@ struct drm_file;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_hw_ppgtt;
+struct i915_request;
struct i915_vma;
struct intel_ring;
@@ -157,7 +160,6 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
- bool initialised;
} engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -274,7 +276,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
-int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_release(struct kref *ctx_ref);
@@ -292,6 +294,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+struct i915_gem_context *
+i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
+
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 864439a..69a7aec 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -330,8 +330,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* write-combined buffer or a delay through the chipset for GTT
* writes that do require us to treat GTT as a separate cache domain.)
*/
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = 0;
return &obj->base;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8daa8a7..54814a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -46,7 +46,7 @@ static bool ggtt_is_idle(struct drm_i915_private *i915)
return false;
for_each_engine(engine, i915, id) {
- if (engine->last_retired_context != i915->kernel_context)
+ if (!intel_engine_has_kernel_context(engine))
return false;
}
@@ -73,6 +73,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
if (err)
return err;
+ GEM_BUG_ON(!ggtt_is_idle(i915));
return 0;
}
@@ -167,7 +168,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
else
phases[1] = NULL;
@@ -216,6 +217,7 @@ search_again:
if (ret)
return ret;
+ cond_resched();
goto search_again;
}
@@ -291,7 +293,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
- i915_gem_retire_requests(vm->i915);
+ i915_retire_requests(vm->i915);
check_color = vm->mm.color_adjust;
if (check_color) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 435ed95..8c170db 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -200,7 +200,7 @@ struct i915_execbuffer {
struct i915_gem_context *ctx; /** context for building the request */
struct i915_address_space *vm; /** GTT and vma for the request */
- struct drm_i915_gem_request *request; /** our request to build */
+ struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -227,7 +227,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
} reloc_cache;
@@ -271,7 +271,7 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return eb->engine->needs_cmd_parser && eb->batch_len;
+ return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
}
static int eb_create(struct i915_execbuffer *eb)
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
list_add_tail(&vma->exec_link, &eb->unbound);
if (drm_mm_node_allocated(&vma->node))
err = i915_vma_unbind(vma);
+ if (unlikely(err))
+ vma->exec_flags = NULL;
}
return err;
}
@@ -884,7 +886,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
i915_gem_object_unpin_map(cache->rq->batch->obj);
i915_gem_chipset_flush(cache->rq->i915);
- __i915_add_request(cache->rq, true);
+ __i915_request_add(cache->rq, true);
cache->rq = NULL;
}
@@ -1012,7 +1014,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -1068,12 +1070,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
{
struct reloc_cache *cache = &eb->reloc_cache;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+ GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
@@ -1101,21 +1103,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_gem_request_alloc(eb->engine, eb->ctx);
+ rq = i915_request_alloc(eb->engine, eb->ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
- err = i915_gem_request_await_object(rq, vma->obj, true);
- if (err)
- goto err_request;
-
- err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
+ err = i915_request_await_object(rq, vma->obj, true);
if (err)
goto err_request;
@@ -1147,7 +1141,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
return 0;
err_request:
- i915_add_request(rq);
+ i915_request_add(rq);
err_unpin:
i915_vma_unpin(batch);
err_unmap:
@@ -1733,7 +1727,7 @@ slow:
}
static void eb_export_fence(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
@@ -1745,9 +1739,9 @@ static void eb_export_fence(struct i915_vma *vma,
*/
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &req->fence);
+ reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv) == 0)
- reservation_object_add_shared_fence(resv, &req->fence);
+ reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
@@ -1763,7 +1757,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
struct drm_i915_gem_object *obj = vma->obj;
if (flags & EXEC_OBJECT_CAPTURE) {
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
if (unlikely(!capture))
@@ -1794,7 +1788,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (flags & EXEC_OBJECT_ASYNC)
continue;
- err = i915_gem_request_await_object
+ err = i915_request_await_object
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
if (err)
return err;
@@ -1818,8 +1812,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915);
- /* Unconditionally invalidate GPU caches and TLBs. */
- return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
+ return 0;
}
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
@@ -1847,13 +1840,13 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- const unsigned int idx = req->engine->id;
+ const unsigned int idx = rq->engine->id;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/*
@@ -1867,35 +1860,35 @@ void i915_vma_move_to_active(struct i915_vma *vma,
if (!i915_vma_is_active(vma))
obj->active_count++;
i915_vma_set_active(vma, idx);
- i915_gem_active_set(&vma->last_read[idx], req);
+ i915_gem_active_set(&vma->last_read[idx], rq);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
- obj->base.write_domain = 0;
+ obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, req);
+ i915_gem_active_set(&obj->frontbuffer_write, rq);
- obj->base.read_domains = 0;
+ obj->read_domains = 0;
}
- obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, req);
+ i915_gem_active_set(&vma->last_fence, rq);
}
-static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
+static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
{
u32 *cs;
int i;
- if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
+ if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- cs = intel_ring_begin(req, 4 * 2 + 2);
+ cs = intel_ring_begin(rq, 4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1905,7 +1898,7 @@ static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
*cs++ = 0;
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1951,10 +1944,10 @@ out:
}
static void
-add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
+add_to_client(struct i915_request *rq, struct drm_file *file)
{
- req->file_priv = file->driver_priv;
- list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
+ rq->file_priv = file->driver_priv;
+ list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -1965,10 +1958,6 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
- err = i915_switch_context(eb->request);
- if (err)
- return err;
-
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
err = i915_reset_gen7_sol_offsets(eb->request);
if (err)
@@ -1986,7 +1975,7 @@ static int eb_submit(struct i915_execbuffer *eb)
return 0;
}
-/**
+/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
*/
@@ -2074,23 +2063,27 @@ static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{
- const unsigned int nfences = args->num_cliprects;
+ const unsigned long nfences = args->num_cliprects;
struct drm_i915_gem_exec_fence __user *user;
struct drm_syncobj **fences;
- unsigned int n;
+ unsigned long n;
int err;
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
return NULL;
- if (nfences > SIZE_MAX / sizeof(*fences))
+ /* Check multiplication overflow for access_ok() and kvmalloc_array() */
+ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
+ if (nfences > min_t(unsigned long,
+ ULONG_MAX / sizeof(*user),
+ SIZE_MAX / sizeof(*fences)))
return ERR_PTR(-EINVAL);
user = u64_to_user_ptr(args->cliprects_ptr);
- if (!access_ok(VERIFY_READ, user, nfences * 2 * sizeof(u32)))
+ if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
return ERR_PTR(-EFAULT);
- fences = kvmalloc_array(args->num_cliprects, sizeof(*fences),
+ fences = kvmalloc_array(nfences, sizeof(*fences),
__GFP_NOWARN | GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
@@ -2158,7 +2151,7 @@ await_fence_array(struct i915_execbuffer *eb,
if (!fence)
return -EINVAL;
- err = i915_gem_request_await_dma_fence(eb->request, fence);
+ err = i915_request_await_dma_fence(eb->request, fence);
dma_fence_put(fence);
if (err < 0)
return err;
@@ -2372,14 +2365,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
+ eb.request = i915_request_alloc(eb.engine, eb.ctx);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
}
if (in_fence) {
- err = i915_gem_request_await_dma_fence(eb.request, in_fence);
+ err = i915_request_await_dma_fence(eb.request, in_fence);
if (err < 0)
goto err_request;
}
@@ -2407,10 +2400,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
*/
eb.request->batch = eb.batch;
- trace_i915_gem_request_queue(eb.request, eb.batch_flags);
+ trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- __i915_add_request(eb.request, err == 0);
+ __i915_request_add(eb.request, err == 0);
add_to_client(eb.request, file);
if (fences)
@@ -2419,7 +2412,7 @@ err_request:
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
- args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+ args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
args->rsvd2 |= (u64)out_fence_fd << 32;
out_fence_fd = -1;
} else {
@@ -2447,26 +2440,44 @@ err_in_fence:
return err;
}
+static size_t eb_element_size(void)
+{
+ return (sizeof(struct drm_i915_gem_exec_object2) +
+ sizeof(struct i915_vma *) +
+ sizeof(unsigned int));
+}
+
+static bool check_buffer_count(size_t count)
+{
+ const size_t sz = eb_element_size();
+
+ /*
+ * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
+ * array size (see eb_create()). Otherwise, we can accept an array as
+ * large as can be addressed (though use large arrays at your peril)!
+ */
+
+ return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
+}
+
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ const size_t count = args->buffer_count;
unsigned int i;
int err;
- if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
- DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ if (!check_buffer_count(count)) {
+ DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2485,9 +2496,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
/* Copy in the exec list from userland */
- exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
+ exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+ exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
@@ -2498,7 +2509,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
err = copy_from_user(exec_list,
u64_to_user_ptr(args->buffers_ptr),
- sizeof(*exec_list) * args->buffer_count);
+ sizeof(*exec_list) * count);
if (err) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, err);
@@ -2545,19 +2556,17 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
+ const size_t count = args->buffer_count;
int err;
- if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
- DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ if (!check_buffer_count(count)) {
+ DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2565,17 +2574,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
/* Allocate an extra slot for use by the command parser */
- exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+ exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
+ DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
+ count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
- sizeof(*exec2_list) * args->buffer_count)) {
- DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
+ sizeof(*exec2_list) * count)) {
+ DRM_DEBUG("copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 012250f..d548ac0 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -64,7 +64,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_INFO(fence->i915)->gen >= 6) {
+ if (INTEL_GEN(fence->i915) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -230,10 +230,14 @@ static int fence_update(struct drm_i915_fence_reg *fence,
}
if (fence->vma) {
- ret = i915_gem_active_retire(&fence->vma->last_fence,
- &fence->vma->obj->base.dev->struct_mutex);
+ struct i915_vma *old = fence->vma;
+
+ ret = i915_gem_active_retire(&old->last_fence,
+ &old->obj->base.dev->struct_mutex);
if (ret)
return ret;
+
+ i915_vma_flush_writes(old);
}
if (fence->vma && fence->vma != vma) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2af65ec..21d72f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -178,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (has_full_48bit_ppgtt)
return 3;
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct pagevec *pvec = &vm->free_pages;
+ struct pagevec stash;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
- /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+ /*
+ * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stash);
do {
struct page *page;
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- pvec->pages[pvec->nr++] = page;
- } while (pagevec_space(pvec));
+ stash.pages[stash.nr++] = page;
+ } while (stash.nr < pagevec_space(pvec));
- if (unlikely(!pvec->nr))
- return NULL;
+ if (stash.nr) {
+ int nr = min_t(int, stash.nr, pagevec_space(pvec));
+ struct page **pages = stash.pages + stash.nr - nr;
- set_pages_array_wc(pvec->pages, pvec->nr);
+ if (nr && !set_pages_array_wc(pages, nr)) {
+ memcpy(pvec->pages + pvec->nr,
+ pages, sizeof(pages[0]) * nr);
+ pvec->nr += nr;
+ stash.nr -= nr;
+ }
+
+ pagevec_release(&stash);
+ }
- return pvec->pages[--pvec->nr];
+ return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
}
static void vm_free_pages_release(struct i915_address_space *vm,
@@ -454,6 +472,14 @@ static void vm_free_pages_release(struct i915_address_space *vm,
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
+ /*
+ * On !llc, we need to change the pages back to WB. We only do so
+ * in bulk, so we rarely need to change the page attributes here,
+ * but doing so requires a stop_machine() from deep inside arch/x86/mm.
+ * To make detection of the possible sleep more likely, use an
+ * unconditional might_sleep() for everybody.
+ */
+ might_sleep();
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm, false);
}
@@ -517,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct page *page = NULL;
- dma_addr_t addr;
- int order;
+ unsigned long size;
/*
* In order to utilize 64K pages for an object with a size < 2M, we will
@@ -533,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* TODO: we should really consider write-protecting the scratch-page and
* sharing between ppgtt
*/
+ size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
- order = get_order(I915_GTT_PAGE_SIZE_64K);
- page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
- if (page) {
- addr = dma_map_page(vm->dma, page, 0,
- I915_GTT_PAGE_SIZE_64K,
- PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, addr))) {
- __free_pages(page, order);
- page = NULL;
- }
-
- if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
- dma_unmap_page(vm->dma, addr,
- I915_GTT_PAGE_SIZE_64K,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(page, order);
- page = NULL;
- }
- }
+ size = I915_GTT_PAGE_SIZE_64K;
+ gfp |= __GFP_NOWARN;
}
+ gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+ do {
+ int order = get_order(size);
+ struct page *page;
+ dma_addr_t addr;
- if (!page) {
- order = 0;
- page = alloc_page(gfp | __GFP_ZERO);
+ page = alloc_pages(gfp, order);
if (unlikely(!page))
- return -ENOMEM;
+ goto skip;
- addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+ addr = dma_map_page(vm->dma, page, 0, size,
PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, addr))) {
- __free_page(page);
- return -ENOMEM;
- }
- }
+ if (unlikely(dma_mapping_error(vm->dma, addr)))
+ goto free_page;
- vm->scratch_page.page = page;
- vm->scratch_page.daddr = addr;
- vm->scratch_page.order = order;
+ if (unlikely(!IS_ALIGNED(addr, size)))
+ goto unmap_page;
- return 0;
+ vm->scratch_page.page = page;
+ vm->scratch_page.daddr = addr;
+ vm->scratch_page.order = order;
+ return 0;
+
+unmap_page:
+ dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+ __free_pages(page, order);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ gfp &= ~__GFP_NOWARN;
+ } while (1);
}
static void cleanup_scratch_page(struct i915_address_space *vm)
@@ -650,27 +673,22 @@ static void free_pd(struct i915_address_space *vm,
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- unsigned int i;
-
fill_px(vm, pd,
gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- for (i = 0; i < I915_PDES; i++)
- pd->page_table[i] = vm->scratch_pt;
+ memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
}
static int __pdp_init(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- unsigned int i;
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
- for (i = 0; i < pdpes; i++)
- pdp->page_directory[i] = vm->scratch_pd;
+ memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
return 0;
}
@@ -692,7 +710,7 @@ alloc_pdp(struct i915_address_space *vm)
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!use_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -741,25 +759,22 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
static void gen8_initialize_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4)
{
- unsigned int i;
-
fill_px(vm, pml4,
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
- pml4->pdps[i] = vm->scratch_pdp;
+ memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct drm_i915_gem_request *req,
+static int gen8_write_pdp(struct i915_request *rq,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
BUG_ON(entry >= 4);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -769,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
int i, ret;
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- ret = gen8_write_pdp(req, i, pd_daddr);
+ ret = gen8_write_pdp(rq, i, pd_daddr);
if (ret)
return ret;
}
@@ -791,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
}
static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+ return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
@@ -1333,15 +1348,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
+ pd->used_pdes++;
+
pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ if (IS_ERR(pt)) {
+ pd->used_pdes--;
goto unwind;
+ }
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
@@ -1365,13 +1383,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
+ pdp->used_pdpes++;
+
pd = alloc_pd(vm);
- if (IS_ERR(pd))
+ if (IS_ERR(pd)) {
+ pdp->used_pdpes--;
goto unwind;
+ }
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
@@ -1711,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1727,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1749,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *dev_priv = rq->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
@@ -2080,7 +2101,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
ppgtt->base.i915 = dev_priv;
ppgtt->base.dma = &dev_priv->drm.pdev->dev;
- if (INTEL_INFO(dev_priv)->gen < 8)
+ if (INTEL_GEN(dev_priv) < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
@@ -2154,7 +2175,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0;
if (!USES_PPGTT(dev_priv))
@@ -2228,9 +2249,9 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base);
/* vmas should already be unbound and destroyed */
- WARN_ON(!list_empty(&ppgtt->base.active_list));
- WARN_ON(!list_empty(&ppgtt->base.inactive_list));
- WARN_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
ppgtt->base.cleanup(&ppgtt->base);
i915_address_space_fini(&ppgtt->base);
@@ -2248,35 +2269,73 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
-
- if (INTEL_INFO(dev_priv)->gen < 6)
- return;
+ u32 fault;
for_each_engine(engine, dev_priv, id) {
- u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (fault_reg & RING_FAULT_VALID) {
+ fault = I915_READ(RING_FAULT_REG(engine));
+ if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
- fault_reg & PAGE_MASK,
- fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault_reg),
- RING_FAULT_FAULT_TYPE(fault_reg));
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
I915_WRITE(RING_FAULT_REG(engine),
- fault_reg & ~RING_FAULT_VALID);
+ fault & ~RING_FAULT_VALID);
}
}
- /* Engine specific init may not have been done till this point. */
- if (dev_priv->engine[RCS])
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+}
+
+static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
+{
+ u32 fault = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ fault & ~RING_FAULT_VALID);
+ }
+
+ POSTING_READ(GEN8_RING_FAULT_REG);
+}
+
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
+{
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(dev_priv) >= 8)
+ gen8_check_and_clear_faults(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_check_and_clear_faults(dev_priv);
+ else
+ return;
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -2300,9 +2359,10 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
do {
- if (dma_map_sg(&obj->base.dev->pdev->dev,
- pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL))
+ if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_NO_WARN))
return 0;
/* If the DMA remap fails, one cause can be that we have
@@ -2754,10 +2814,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
i915->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
ggtt->base.bind_vma = aliasing_gtt_bind_vma;
- WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
+ GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2848,7 +2908,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->base.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- WARN_ON(!list_empty(&ggtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -2877,7 +2937,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->mappable);
+ io_mapping_fini(&ggtt->iomap);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2914,50 +2974,6 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
-static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
- return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t chv_get_stolen_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
-
- /*
- * 0x0 to 0x10: 32MB increments starting at 0MB
- * 0x11 to 0x16: 4MB increments starting at 8MB
- * 0x17 to 0x1d: 4MB increments start at 36MB
- */
- if (gmch_ctrl < 0x11)
- return (size_t)gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
- else
- return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
-}
-
-static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
-{
- gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
-
- if (gen9_gmch_ctl < 0xf0)
- return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
- else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
-}
-
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -3041,7 +3057,7 @@ const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry;
+ struct intel_ppat_entry *entry = NULL;
unsigned int scanned, best_score;
int i;
@@ -3064,7 +3080,7 @@ intel_ppat_get(struct drm_i915_private *i915, u8 value)
}
if (scanned == ppat->max_entries) {
- if (!best_score)
+ if (!entry)
return ERR_PTR(-ENOSPC);
kref_get(&entry->ref);
@@ -3171,12 +3187,6 @@ static void cnl_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- /* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(ppat->i915)) {
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
@@ -3303,8 +3313,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3315,13 +3327,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_GEN(dev_priv) >= 9) {
- ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev_priv)) {
- ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
- ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
@@ -3361,14 +3370,16 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
int err;
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
return -ENXIO;
}
@@ -3379,8 +3390,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
@@ -3417,6 +3426,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
+ phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3426,10 +3436,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
}
intel_gtt_get(&ggtt->base.total,
- &ggtt->stolen_size,
- &ggtt->mappable_base,
+ &gmadr_base,
&ggtt->mappable_end);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(gmadr_base,
+ ggtt->mappable_end);
+
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
@@ -3474,9 +3487,9 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
+ if (USES_GUC(dev_priv)) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if ((ggtt->base.total - 1) >> 32) {
@@ -3484,21 +3497,21 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
" of address space! Found %lldM!\n",
ggtt->base.total >> 20);
ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if (ggtt->mappable_end > ggtt->base.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%llx, total=%llx\n",
- ggtt->mappable_end, ggtt->base.total);
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->base.total);
ggtt->mappable_end = ggtt->base.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %lluM\n",
- ggtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
if (intel_vtd_active())
DRM_INFO("VT-d active for gfx access\n");
@@ -3527,14 +3540,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
- dev_priv->ggtt.mappable_base,
+ if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+ dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) {
ret = -EIO;
goto out_gtt_cleanup;
}
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
/*
* Initialise stolen early so that we may reserve preallocated
@@ -3564,6 +3577,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
i915->ggtt.invalidate = guc_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
@@ -3572,6 +3587,8 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
i915->ggtt.invalidate = gen6_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -3591,10 +3608,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
bool ggtt_bound = false;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != &ggtt->base)
- continue;
-
+ for_each_ggtt_vma(vma, obj) {
if (!i915_vma_unbind(vma))
continue;
@@ -3708,9 +3722,6 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
rot_info->plane[i].stride, st, sg);
}
- DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
-
kvfree(page_addr_list);
return st;
@@ -3720,8 +3731,8 @@ err_sg_alloc:
err_st_alloc:
kvfree(page_addr_list);
- DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+ DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -3790,6 +3801,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
case I915_GGTT_VIEW_NORMAL:
vma->pages = vma->obj->mm.pages;
return 0;
@@ -3802,11 +3816,6 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
break;
-
- default:
- WARN_ONCE(1, "GGTT view %u not implemented!\n",
- vma->ggtt_view.type);
- return -EINVAL;
}
ret = 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 93211a9..6efc017 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,7 +39,8 @@
#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
-#include "i915_gem_request.h"
+
+#include "i915_request.h"
#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE_4K BIT(12)
@@ -368,23 +369,10 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
*/
struct i915_ggtt {
struct i915_address_space base;
- struct io_mapping mappable; /* Mapping to our CPU mappable region */
-
- phys_addr_t mappable_base; /* PA of our GMADR */
- u64 mappable_end; /* End offset that we can CPU map */
- /* Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- u32 stolen_size; /* Total size of stolen memory */
- u32 stolen_usable_size; /* Total size minus reserved ranges */
- u32 stolen_reserved_base;
- u32 stolen_reserved_size;
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
@@ -411,7 +399,7 @@ struct i915_hw_ppgtt {
gen6_pte_t __iomem *pd_addr;
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index ee83ec8..0d0144b 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
@@ -95,7 +96,8 @@ create_st:
struct page *page;
do {
- page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
+ order);
if (page)
break;
if (!order--)
@@ -165,6 +167,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
};
/**
+ * i915_gem_object_create_internal: create an object with volatile pages
+ * @i915: the i915 device
+ * @size: the size in bytes of backing storage to allocate for the object
+ *
* Creates a new object that wraps some internal memory for private use.
* This object is not backed by swappable storage, and as such its contents
* are volatile and only valid whilst pinned. If the object is reaped by the
@@ -195,8 +201,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 63ce38c..54f00b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,7 +33,7 @@
#include <drm/i915_drm.h>
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_i915_gem_object;
@@ -53,8 +53,9 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY BIT(2)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -147,6 +148,21 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+ /**
+ * @read_domains: Read memory domains.
+ *
+ * These monitor which caches contain read/write data related to the
+ * object. When transitioning from one set of domains to another,
+ * the driver is called to ensure that caches are suitably flushed and
+ * invalidated.
+ */
+ u16 read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ u16 write_domain;
+
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write;
@@ -260,6 +276,8 @@ struct drm_i915_gem_object {
} userptr;
unsigned long scratch;
+
+ void *gvt_info;
};
/** for phys allocated objects */
@@ -362,6 +380,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+}
+
+static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return obj->active_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3703dc9..1036e86 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -26,10 +26,12 @@
*/
#include "i915_drv.h"
+#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
+ struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 batch_offset;
u32 batch_size;
@@ -40,6 +42,9 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
+ if (engine->id != RCS)
+ return NULL;
+
switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
@@ -74,17 +79,16 @@ static int render_state_setup(struct intel_render_state *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
- struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
unsigned int needs_clflush;
u32 *d;
int ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
if (ret)
return ret;
- d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
+ d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -112,7 +116,7 @@ static int render_state_setup(struct intel_render_state *so,
goto err;
}
- so->batch_offset = so->vma->node.start;
+ so->batch_offset = i915_ggtt_offset(so->vma);
so->batch_size = rodata->batch_items * sizeof(u32);
while (i % CACHELINE_DWORDS)
@@ -160,9 +164,9 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
out:
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_obj_finish_shmem_access(so->obj);
return ret;
err:
@@ -173,112 +177,57 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_init(struct intel_engine_cs *engine)
+int i915_gem_render_state_emit(struct i915_request *rq)
{
- struct intel_render_state *so;
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- int ret;
+ struct intel_engine_cs *engine = rq->engine;
+ struct intel_render_state so = {}; /* keep the compiler happy */
+ int err;
- if (engine->id != RCS)
+ so.rodata = render_state_get_rodata(engine);
+ if (!so.rodata)
return 0;
- rodata = render_state_get_rodata(engine);
- if (!rodata)
- return 0;
-
- if (rodata->batch_items * 4 > PAGE_SIZE)
+ if (so.rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
- so = kmalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return -ENOMEM;
-
- obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto err_free;
- }
+ so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(so.obj))
+ return PTR_ERR(so.obj);
- so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
- if (IS_ERR(so->vma)) {
- ret = PTR_ERR(so->vma);
+ so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(so.vma)) {
+ err = PTR_ERR(so.vma);
goto err_obj;
}
- so->rodata = rodata;
- engine->render_state = so;
- return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err_free:
- kfree(so);
- return ret;
-}
-
-int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
-{
- struct intel_render_state *so;
- int ret;
-
- lockdep_assert_held(&req->i915->drm.struct_mutex);
-
- so = req->engine->render_state;
- if (!so)
- return 0;
-
- /* Recreate the page after shrinking */
- if (!i915_gem_object_has_pages(so->vma->obj))
- so->batch_offset = -1;
-
- ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- return ret;
+ err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma;
- if (so->vma->node.start != so->batch_offset) {
- ret = render_state_setup(so, req->i915);
- if (ret)
- goto err_unpin;
- }
-
- ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
- if (ret)
+ err = render_state_setup(&so, rq->i915);
+ if (err)
goto err_unpin;
- ret = req->engine->emit_bb_start(req,
- so->batch_offset, so->batch_size,
- I915_DISPATCH_SECURE);
- if (ret)
+ err = engine->emit_bb_start(rq,
+ so.batch_offset, so.batch_size,
+ I915_DISPATCH_SECURE);
+ if (err)
goto err_unpin;
- if (so->aux_size > 8) {
- ret = req->engine->emit_bb_start(req,
- so->aux_offset, so->aux_size,
- I915_DISPATCH_SECURE);
- if (ret)
+ if (so.aux_size > 8) {
+ err = engine->emit_bb_start(rq,
+ so.aux_offset, so.aux_size,
+ I915_DISPATCH_SECURE);
+ if (err)
goto err_unpin;
}
- i915_vma_move_to_active(so->vma, req, 0);
+ i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
- i915_vma_unpin(so->vma);
- return ret;
-}
-
-void i915_gem_render_state_fini(struct intel_engine_cs *engine)
-{
- struct intel_render_state *so;
- struct drm_i915_gem_object *obj;
-
- so = fetch_and_zero(&engine->render_state);
- if (!so)
- return;
-
- obj = so->vma->obj;
-
- i915_vma_close(so->vma);
- __i915_gem_object_release_unless_active(obj);
-
- kfree(so);
+ i915_vma_unpin(so.vma);
+err_vma:
+ i915_vma_close(so.vma);
+err_obj:
+ __i915_gem_object_release_unless_active(so.obj);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 8748184..112cda8 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -24,10 +24,8 @@
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
-struct drm_i915_gem_request;
+struct i915_request;
-int i915_gem_render_state_init(struct intel_engine_cs *engine);
-int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct i915_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3770e33..5757fb7 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,9 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
{
- switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
+ switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
preempt_disable();
do {
cpu_relax();
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
*unlock = true;
break;
}
@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
BUG();
}
-static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
+static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
{
if (!unlock)
return;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
}
static bool swap_available(void)
@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
/**
* i915_gem_shrink - Shrink buffer object caches
- * @dev_priv: i915 device
+ * @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
* @flags: control flags for selecting cache types
@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
* The number of pages of backing storage actually released.
*/
unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
+i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags)
@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
struct list_head *list;
unsigned int bit;
} phases[] = {
- { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+ { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
+ { &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(dev_priv, &unlock))
+ if (!shrinker_lock(i915, &unlock))
return 0;
/*
@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will free as much as we can and hope to get a second chance.
*/
if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
- trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv);
+ trace_i915_gem_shrink(i915, target, flags);
+ i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
- !intel_runtime_pm_get_if_in_use(dev_priv))
+ !intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND;
/*
@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock(&i915->mm.obj_lock);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
}
scanned += obj->base.size >> PAGE_SHIFT;
- spin_lock(&dev_priv->mm.obj_lock);
+ spin_lock(&i915->mm.obj_lock);
}
list_splice_tail(&still_in_list, phase->list);
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
}
if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(i915);
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
if (nr_scanned)
*nr_scanned += scanned;
@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
/**
* i915_gem_shrink_all - Shrink buffer object caches completely
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding
@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* Returns:
* The number of pages of backing storage actually released.
*/
-unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
unsigned long freed;
- intel_runtime_pm_get(dev_priv);
- freed = i915_gem_shrink(dev_priv, -1UL, NULL,
+ intel_runtime_pm_get(i915);
+ freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
return freed;
}
@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(dev_priv, &unlock))
+ if (!shrinker_lock(i915, &unlock))
return SHRINK_STOP;
- freed = i915_gem_shrink(dev_priv,
+ freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
- freed += i915_gem_shrink(dev_priv,
+ if (sc->nr_scanned < sc->nr_to_scan)
+ freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- if (freed < sc->nr_to_scan && current_is_kswapd()) {
- intel_runtime_pm_get(dev_priv);
- freed += i915_gem_shrink(dev_priv,
+ if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
+ intel_runtime_pm_get(i915);
+ freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
}
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
return sc->nr_scanned ? freed : SHRINK_STOP;
}
static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
+shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
- if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
- shrinker_lock(dev_priv, unlock))
+ if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+ shrinker_lock(i915, unlock))
break;
schedule_timeout_killable(1);
@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
- freed_pages = i915_gem_shrink_all(dev_priv);
+ freed_pages = i915_gem_shrink_all(i915);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
+ spin_lock(&i915->mm.obj_lock);
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size >> PAGE_SHIFT;
}
- spin_unlock(&dev_priv->mm.obj_lock);
+ spin_unlock(&i915->mm.obj_lock);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
@@ -455,74 +455,74 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
bool unlock;
int ret;
- if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
+ if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
if (ret)
goto out;
- intel_runtime_pm_get(dev_priv);
- freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
+ intel_runtime_pm_get(i915);
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(i915);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
- &dev_priv->ggtt.base.inactive_list, vm_link) {
+ &i915->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
}
out:
- shrinker_unlock(dev_priv, unlock);
+ shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
/**
- * i915_gem_shrinker_init - Initialize i915 shrinker
- * @dev_priv: i915 device
+ * i915_gem_shrinker_register - Register the i915 shrinker
+ * @i915: i915 device
*
* This function registers and sets up the i915 shrinker and OOM handler.
*/
-void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
+void i915_gem_shrinker_register(struct drm_i915_private *i915)
{
- dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
- dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- dev_priv->mm.shrinker.batch = 4096;
- WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
+ i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
+ i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
+ i915->mm.shrinker.seeks = DEFAULT_SEEKS;
+ i915->mm.shrinker.batch = 4096;
+ WARN_ON(register_shrinker(&i915->mm.shrinker));
- dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+ i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
+ WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
- dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
- WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
+ i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
/**
- * i915_gem_shrinker_cleanup - Clean up i915 shrinker
- * @dev_priv: i915 device
+ * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
+ * @i915: i915 device
*
* This function unregisters the i915 shrinker and OOM handler.
*/
-void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
{
- WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
- WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
- unregister_shrinker(&dev_priv->mm.shrinker);
+ WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+ unregister_shrinker(&i915->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 03e7abc..62aa679 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -30,9 +30,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define KB(x) ((x) * 1024)
-#define MB(x) (KB(x) * 1024)
-
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -79,129 +76,26 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
+static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+ struct resource *dsm)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
- dma_addr_t base;
-
- /* Almost universally we can find the Graphics Base of Stolen Memory
- * at register BSM (0x5c) in the igfx configuration space. On a few
- * (desktop) machines this is also mirrored in the bridge device at
- * different locations, or in the MCHBAR.
- *
- * On 865 we just check the TOUD register.
- *
- * On 830/845/85x the stolen memory base isn't available in any
- * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
- *
- */
- base = 0;
- if (INTEL_GEN(dev_priv) >= 3) {
- u32 bsm;
-
- pci_read_config_dword(pdev, INTEL_BSM, &bsm);
-
- base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev_priv)) {
- u32 tseg_size = 0;
- u16 toud = 0;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
- I865_TOUD, &toud);
-
- base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I85X_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE)
- tseg_size = MB(1);
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
- I85X_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I845G(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_ESMRAMC, &tmp);
+ if (dsm->start == 0 || dsm->end <= dsm->start)
+ return -EINVAL;
- if (tmp & TSEG_ENABLE) {
- if (tmp & I830_TSEG_SIZE_1M)
- tseg_size = MB(1);
- else
- tseg_size = KB(512);
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- }
-
- if (base == 0 || add_overflows(base, ggtt->stolen_size))
- return 0;
+ /*
+ * TODO: We have yet too encounter the case where the GTT wasn't at the
+ * end of stolen. With that assumption we could simplify this.
+ */
- /* make sure we don't clobber the GTT if it's within stolen memory */
+ /* Make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_GEN(dev_priv) <= 4 &&
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
- struct {
- dma_addr_t start, end;
- } stolen[2] = {
- { .start = base, .end = base + ggtt->stolen_size, },
- { .start = base, .end = base + ggtt->stolen_size, },
- };
- u64 ggtt_start, ggtt_end;
+ struct resource stolen[2] = {*dsm, *dsm};
+ struct resource ggtt_res;
+ resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev_priv))
@@ -209,70 +103,64 @@ static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
-
- if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
- stolen[0].end = ggtt_start;
- if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
- stolen[1].start = ggtt_end;
-
- /* pick the larger of the two chunks */
- if (stolen[0].end - stolen[0].start >
- stolen[1].end - stolen[1].start) {
- base = stolen[0].start;
- ggtt->stolen_size = stolen[0].end - stolen[0].start;
- } else {
- base = stolen[1].start;
- ggtt->stolen_size = stolen[1].end - stolen[1].start;
- }
+
+ ggtt_res =
+ (struct resource) DEFINE_RES_MEM(ggtt_start,
+ ggtt_total_entries(ggtt) * 4);
+
+ if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
+ stolen[0].end = ggtt_res.start;
+ if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
+ stolen[1].start = ggtt_res.end;
+
+ /* Pick the larger of the two chunks */
+ if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
+ *dsm = stolen[0];
+ else
+ *dsm = stolen[1];
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- dma_addr_t end = base + ggtt->stolen_size - 1;
-
- DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
- (unsigned long long)ggtt_start,
- (unsigned long long)ggtt_end - 1);
- DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
- &base, &end);
+ DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
+ DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
}
}
-
- /* Verify that nothing else uses this physical address. Stolen
+ /*
+ * Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
/*
* One more attempt but this time requesting region from
- * base + 1, as we have seen that this resolves the region
+ * start + 1, as we have seen that this resolves the region
* conflict with the PCI Bus.
* This is a BIOS w/a: Some BIOS wrap stolen in the root
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
- ggtt->stolen_size - 2,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
- dma_addr_t end = base + ggtt->stolen_size;
+ DRM_ERROR("conflict detected with stolen region: %pR\n",
+ dsm);
- DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
- &base, &end);
- base = 0;
+ return -EBUSY;
}
}
- return base;
+ return 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -286,13 +174,24 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ resource_size_t stolen_top = dev_priv->dsm.end + 1;
+
+ if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
+ /*
+ * Whether ILK really reuses the ELK register for this is unclear.
+ * Let's see if we catch anyone with this supposedly enabled on ILK.
+ */
+ WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -309,10 +208,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
@@ -335,10 +240,16 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
@@ -355,10 +266,16 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
@@ -381,13 +298,18 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- dma_addr_t stolen_top;
+ resource_size_t stolen_top;
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
+ *base = 0;
+ *size = 0;
+ return;
+ }
+
+ stolen_top = dev_priv->dsm.end + 1;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -403,10 +325,9 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- dma_addr_t reserved_base, stolen_top;
- u32 reserved_total, reserved_size;
- u32 stolen_usable_start;
+ resource_size_t reserved_base, stolen_top;
+ resource_size_t reserved_total, reserved_size;
+ resource_size_t stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
@@ -420,30 +341,32 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
}
- if (ggtt->stolen_size == 0)
+ if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
- if (dev_priv->mm.stolen_base == 0)
+ dev_priv->dsm = intel_graphics_stolen_res;
+
+ if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
return 0;
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ GEM_BUG_ON(dev_priv->dsm.start == 0);
+ GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+
+ stolen_top = dev_priv->dsm.end + 1;
reserved_base = 0;
reserved_size = 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 2:
case 3:
break;
case 4:
- if (IS_G4X(dev_priv))
- g4x_get_stolen_reserved(dev_priv,
- &reserved_base, &reserved_size);
- break;
+ if (!IS_G4X(dev_priv))
+ break;
+ /* fall through */
case 5:
- /* Assume the gen6 maximum for the older platforms. */
- reserved_size = 1024 * 1024;
- reserved_base = stolen_top - reserved_size;
+ g4x_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 6:
gen6_get_stolen_reserved(dev_priv,
@@ -470,50 +393,47 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = stolen_top;
}
- if (reserved_base < dev_priv->mm.stolen_base ||
- reserved_base + reserved_size > stolen_top) {
- dma_addr_t reserved_top = reserved_base + reserved_size;
- DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
- &reserved_base, &reserved_top,
- &dev_priv->mm.stolen_base, &stolen_top);
+ dev_priv->dsm_reserved =
+ (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+
+ if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
+ &dev_priv->dsm_reserved, &dev_priv->dsm);
return 0;
}
- ggtt->stolen_reserved_base = reserved_base;
- ggtt->stolen_reserved_size = reserved_size;
-
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
- ggtt->stolen_size >> 10,
- (ggtt->stolen_size - reserved_total) >> 10);
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&dev_priv->dsm) >> 10,
+ ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
stolen_usable_start = 0;
/* WaSkipStolenMemoryFirstPage:bdw+ */
if (INTEL_GEN(dev_priv) >= 8)
stolen_usable_start = 4096;
- ggtt->stolen_usable_size =
- ggtt->stolen_size - reserved_total - stolen_usable_start;
+ dev_priv->stolen_usable_size =
+ resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
/* Basic memrange allocator for stolen space. */
drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
- ggtt->stolen_usable_size);
+ dev_priv->stolen_usable_size);
return 0;
}
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
- u32 offset, u32 size)
+ resource_size_t offset, resource_size_t size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -533,7 +453,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -596,7 +516,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
@@ -611,7 +531,8 @@ cleanup:
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
@@ -644,9 +565,9 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size)
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
@@ -659,8 +580,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
- stolen_offset, gtt_offset, size);
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
+ &stolen_offset, &gtt_offset, &size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1294cf6..d9dc9df 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -205,10 +205,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
if (tiling_mode == I915_TILING_NONE)
return 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
@@ -285,10 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
mutex_unlock(&obj->mm.lock);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride);
vma->fence_alignment =
@@ -345,6 +339,15 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
+ /*
+ * The tiling mode of proxy objects is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto err;
+ }
+
if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
err = -EINVAL;
goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index c597ce2..e9fd876 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -33,11 +33,8 @@ static void __intel_timeline_init(struct intel_timeline *tl,
{
tl->fence_context = context;
tl->common = parent;
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
-#else
spin_lock_init(&tl->lock);
-#endif
+ lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
i915_syncmap_init(&tl->sync);
@@ -107,8 +104,8 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
}
/**
- * i915_gem_timelines_mark_idle -- called when the driver idles
- * @i915 - the drm_i915_private device
+ * i915_gem_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
*
* When the driver is completely idle, we know that all of our sync points
* have been signaled and our tracking is then entirely redundant. Any request
@@ -116,7 +113,7 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
* the fence is signaled and therefore we will not even look them up in the
* sync point map.
*/
-void i915_gem_timelines_mark_idle(struct drm_i915_private *i915)
+void i915_gem_timelines_park(struct drm_i915_private *i915)
{
struct i915_gem_timeline *timeline;
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index bfb5eb9..33e01bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -27,9 +27,9 @@
#include <linux/list.h>
-#include "i915_utils.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
#include "i915_syncmap.h"
+#include "i915_utils.h"
struct i915_gem_timeline;
@@ -93,7 +93,7 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
int i915_gem_timeline_init__global(struct drm_i915_private *i915);
-void i915_gem_timelines_mark_idle(struct drm_i915_private *i915);
+void i915_gem_timelines_park(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 382a77a..d596a83 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -721,7 +721,7 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.release = i915_gem_userptr_release,
};
-/**
+/*
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
*
@@ -757,7 +757,9 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* dma-buf instead.
*/
int
-i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+i915_gem_userptr_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
@@ -796,8 +798,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 653fb69..f89ac7a8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,18 +30,29 @@
#include <generated/utsrelease.h>
#include <linux/stop_machine.h>
#include <linux/zlib.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
-static const char *engine_str(int engine)
+static inline const struct intel_engine_cs *
+engine_lookup(const struct drm_i915_private *i915, unsigned int id)
{
- switch (engine) {
- case RCS: return "render";
- case VCS: return "bsd";
- case BCS: return "blt";
- case VECS: return "vebox";
- case VCS2: return "bsd2";
- default: return "";
- }
+ if (id >= I915_NUM_ENGINES)
+ return NULL;
+
+ return i915->engine[id];
+}
+
+static inline const char *
+__engine_name(const struct intel_engine_cs *engine)
+{
+ return engine ? engine->name : "";
+}
+
+static const char *
+engine_name(const struct drm_i915_private *i915, unsigned int id)
+{
+ return __engine_name(engine_lookup(i915, id));
}
static const char *tiling_flag(int tiling)
@@ -175,6 +186,21 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
+static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
+{
+ i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
+}
+
+static inline struct drm_printer
+i915_error_printer(struct drm_i915_error_state_buf *e)
+{
+ struct drm_printer p = {
+ .printfn = __i915_printfn_error,
+ .arg = e,
+ };
+ return p;
+}
+
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct compress {
@@ -328,7 +354,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->engine != -1 ? " " : "");
- err_puts(m, engine_str(err->engine));
+ err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@@ -370,6 +396,11 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]);
}
+static const char *bannable(const struct drm_i915_error_context *ctx)
+{
+ return ctx->bannable ? "" : " (unbannable)";
+}
+
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
const struct drm_i915_error_request *erq)
@@ -388,9 +419,10 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
+ err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
+ ctx->priority, ctx->ban_score, bannable(ctx),
+ ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -398,7 +430,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
{
int n;
- err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+ err_printf(m, "%s command stream:\n",
+ engine_name(m->i915, ee->engine_id));
+ err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -545,36 +579,22 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
}
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
- const struct intel_device_info *info)
-{
-#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-}
-
-static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
+ const struct intel_device_info *info,
+ const struct intel_driver_caps *caps)
+{
+ struct drm_printer p = i915_error_printer(m);
+
+ intel_device_info_dump_flags(info, &p);
+ intel_driver_caps_print(caps, &p);
+ intel_device_info_dump_topology(&info->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
- const struct i915_params *p)
+ const struct i915_params *params)
{
-#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
- I915_PARAMS_FOR_EACH(PRINT);
-#undef PRINT
+ struct drm_printer p = i915_error_printer(m);
+
+ i915_params_dump(params, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -589,11 +609,27 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
+static void err_print_uc(struct drm_i915_error_state_buf *m,
+ const struct i915_error_uc *error_uc)
+{
+ struct drm_printer p = i915_error_printer(m);
+ const struct i915_gpu_state *error =
+ container_of(error_uc, typeof(*error), uc);
+
+ if (!error->device_info.has_guc)
+ return;
+
+ intel_uc_fw_dump(&error_uc->guc_fw, &p);
+ intel_uc_fw_dump(&error_uc->huc_fw, &p);
+ print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj;
+ struct timespec64 ts;
int i, j;
if (!error) {
@@ -604,21 +640,25 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "Time: %ld s %ld us\n",
- error->time.tv_sec, error->time.tv_usec);
- err_printf(m, "Boottime: %ld s %ld us\n",
- error->boottime.tv_sec, error->boottime.tv_usec);
- err_printf(m, "Uptime: %ld s %ld us\n",
- error->uptime.tv_sec, error->uptime.tv_usec);
+ ts = ktime_to_timespec64(error->time);
+ err_printf(m, "Time: %lld s %ld us\n",
+ (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
+ ts = ktime_to_timespec64(error->boottime);
+ err_printf(m, "Boottime: %lld s %ld us\n",
+ (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
+ ts = ktime_to_timespec64(error->uptime);
+ err_printf(m, "Uptime: %lld s %ld us\n",
+ (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].hangcheck_stalled &&
error->engine[i].context.pid) {
- err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
- engine_str(i),
+ err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+ engine_name(m->i915, i),
error->engine[i].context.comm,
error->engine[i].context.pid,
- error->engine[i].context.ban_score);
+ error->engine[i].context.ban_score,
+ bannable(&error->engine[i].context));
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
@@ -706,12 +746,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, dev_priv->engine[i]->name);
if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
+ err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
ee->context.pid,
ee->context.handle,
ee->context.hw_id,
- ee->context.ban_score);
+ ee->context.ban_score,
+ bannable(&ee->context));
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -759,11 +800,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, dev_priv->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- }
- print_error_obj(m, NULL, "Semaphores", error->semaphore);
-
- print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
+ print_error_obj(m, dev_priv->engine[i],
+ "NULL context", ee->default_state);
+ }
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -771,8 +811,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info);
+ err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
+ err_print_uc(m, &error->uc);
if (m->bytes == 0 && m->err)
return m->err;
@@ -831,6 +872,22 @@ static __always_inline void free_param(const char *type, void *x)
kfree(*(void **)x);
}
+static void cleanup_params(struct i915_gpu_state *error)
+{
+#define FREE(T, x, ...) free_param(#T, &error->params.x);
+ I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
+
+static void cleanup_uc_state(struct i915_gpu_state *error)
+{
+ struct i915_error_uc *error_uc = &error->uc;
+
+ kfree(error_uc->guc_fw.path);
+ kfree(error_uc->huc_fw.path);
+ i915_error_object_free(error_uc->guc_log);
+}
+
void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
@@ -856,9 +913,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(ee->waiters);
}
- i915_error_object_free(error->semaphore);
- i915_error_object_free(error->guc_log);
-
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
kfree(error->pinned_bo);
@@ -866,9 +920,8 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+ cleanup_params(error);
+ cleanup_uc_state(error);
kfree(error);
}
@@ -912,7 +965,7 @@ i915_error_object_create(struct drm_i915_private *i915,
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
@@ -939,7 +992,7 @@ out:
static inline uint32_t
__active_get_seqno(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->global_seqno : 0;
@@ -948,7 +1001,7 @@ __active_get_seqno(struct i915_gem_active *active)
static inline int
__active_get_engine_id(struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = __i915_gem_active_peek(active);
return request ? request->engine->id : -1;
@@ -969,8 +1022,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
+ err->read_domains = obj->read_domains;
+ err->write_domain = obj->write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->mm.dirty;
@@ -1032,9 +1085,9 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code;
}
-static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void gem_record_fences(struct i915_gpu_state *error)
{
+ struct drm_i915_private *dev_priv = error->i915;
int i;
if (INTEL_GEN(dev_priv) >= 6) {
@@ -1050,55 +1103,6 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
error->nfence = i;
}
-static inline u32
-gen8_engine_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
-{
- int idx;
-
- /*
- * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
- * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
- * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
- * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
- * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
- */
-
- idx = (other - engine) - 1;
- if (idx < 0)
- idx += I915_NUM_ENGINES;
-
- return idx;
-}
-
-static void gen8_record_semaphore_state(struct i915_gpu_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *to;
- enum intel_engine_id id;
-
- if (!error->semaphore)
- return;
-
- for_each_engine(to, dev_priv, id) {
- int idx;
- u16 signal_offset;
- u32 *tmp;
-
- if (engine == to)
- continue;
-
- signal_offset =
- (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
- tmp = error->semaphore->pages[0];
- idx = gen8_engine_sync_index(engine, to);
-
- ee->semaphore_mboxes[idx] = tmp[signal_offset];
- }
-}
-
static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@@ -1172,11 +1176,12 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_record_semaphore_state(error, engine, ee);
- else
+ if (INTEL_GEN(dev_priv) >= 8) {
+ ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
+ } else {
gen6_record_semaphore_state(engine, ee);
+ ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
+ }
}
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1239,6 +1244,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hws = I915_READ(mmio);
}
+ ee->idle = intel_engine_is_idle(engine);
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
@@ -1267,7 +1273,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
}
-static void record_request(struct drm_i915_gem_request *request,
+static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
@@ -1284,10 +1290,10 @@ static void record_request(struct drm_i915_gem_request *request,
}
static void engine_record_requests(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *first,
+ struct i915_request *first,
struct drm_i915_error_engine *ee)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int count;
count = 0;
@@ -1337,7 +1343,7 @@ static void error_record_engine_execlists(struct intel_engine_cs *engine,
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct i915_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
@@ -1367,14 +1373,15 @@ static void record_context(struct drm_i915_error_context *e,
e->hw_id = ctx->hw_id;
e->priority = ctx->priority;
e->ban_score = atomic_read(&ctx->ban_score);
+ e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
}
-static void request_record_user_bo(struct drm_i915_gem_request *request,
+static void request_record_user_bo(struct i915_request *request,
struct drm_i915_error_engine *ee)
{
- struct i915_gem_capture_list *c;
+ struct i915_capture_list *c;
struct drm_i915_error_object **bo;
long count;
@@ -1400,19 +1407,34 @@ static void request_record_user_bo(struct drm_i915_gem_request *request,
ee->user_bo_count = count;
}
-static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static struct drm_i915_error_object *
+capture_object(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *obj)
+{
+ if (obj && i915_gem_object_has_pages(obj)) {
+ struct i915_vma fake = {
+ .node = { .start = U64_MAX, .size = obj->base.size },
+ .size = obj->base.size,
+ .pages = obj->mm.pages,
+ .obj = obj,
+ };
+
+ return i915_error_object_create(dev_priv, &fake);
+ } else {
+ return NULL;
+ }
+}
+
+static void gem_record_rings(struct i915_gpu_state *error)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = error->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
int i;
- error->semaphore =
- i915_error_object_create(dev_priv, dev_priv->semaphore);
-
for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = dev_priv->engine[i];
+ struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
ee->engine_id = -1;
@@ -1439,17 +1461,16 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
* by userspace.
*/
ee->batchbuffer =
- i915_error_object_create(dev_priv,
- request->batch);
+ i915_error_object_create(i915, request->batch);
- if (HAS_BROKEN_CS_TLB(dev_priv))
+ if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
engine->scratch);
request_record_user_bo(request, ee);
ee->ctx =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
request->ctx->engine[i].state);
error->simulated |=
@@ -1463,24 +1484,24 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
- i915_error_object_create(dev_priv, ring->vma);
+ i915_error_object_create(i915, ring->vma);
engine_record_requests(engine, request, ee);
}
ee->hws_page =
- i915_error_object_create(dev_priv,
+ i915_error_object_create(i915,
engine->status_page.vma);
- ee->wa_ctx =
- i915_error_object_create(dev_priv, engine->wa_ctx.vma);
+ ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
+
+ ee->default_state = capture_object(i915, engine->default_state);
}
}
-static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error,
- struct i915_address_space *vm,
- int idx)
+static void gem_capture_vm(struct i915_gpu_state *error,
+ struct i915_address_space *vm,
+ int idx)
{
struct drm_i915_error_buffer *active_bo;
struct i915_vma *vma;
@@ -1503,8 +1524,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
error->active_bo_count[idx] = count;
}
-static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_active_buffers(struct i915_gpu_state *error)
{
int cnt = 0, i, j;
@@ -1524,14 +1544,13 @@ static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
for (j = 0; j < i && !found; j++)
found = error->engine[j].vm == ee->vm;
if (!found)
- i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
+ gem_capture_vm(error, ee->vm, cnt++);
}
}
-static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_pinned_buffers(struct i915_gpu_state *error)
{
- struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct i915_address_space *vm = &error->i915->ggtt.base;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
int count_inactive, count_active;
@@ -1559,21 +1578,31 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
-static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_uc_state(struct i915_gpu_state *error)
{
- /* Capturing log buf contents won't be useful if logging was disabled */
- if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
+ struct drm_i915_private *i915 = error->i915;
+ struct i915_error_uc *error_uc = &error->uc;
+
+ /* Capturing uC state won't be useful if there is no GuC */
+ if (!error->device_info.has_guc)
return;
- error->guc_log = i915_error_object_create(dev_priv,
- dev_priv->guc.log.vma);
+ error_uc->guc_fw = i915->guc.fw;
+ error_uc->huc_fw = i915->huc.fw;
+
+ /* Non-default firmware paths will be specified by the modparam.
+ * As modparams are generally accesible from the userspace make
+ * explicit copies of the firmware paths.
+ */
+ error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
+ error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
+ error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
}
/* Capture all registers which don't fit into another category. */
-static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_reg_state(struct i915_gpu_state *error)
{
+ struct drm_i915_private *dev_priv = error->i915;
int i;
/* General organization
@@ -1670,23 +1699,25 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
engine_mask ? "reset" : "continue");
}
-static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error)
+static void capture_gen_state(struct i915_gpu_state *error)
{
- error->awake = dev_priv->gt.awake;
- error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
- error->suspended = dev_priv->runtime_pm.suspended;
+ struct drm_i915_private *i915 = error->i915;
+
+ error->awake = i915->gt.awake;
+ error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
+ error->suspended = i915->runtime_pm.suspended;
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
error->iommu = intel_iommu_gfx_mapped;
#endif
- error->reset_count = i915_reset_count(&dev_priv->gpu_error);
- error->suspend_count = dev_priv->suspend_count;
+ error->reset_count = i915_reset_count(&i915->gpu_error);
+ error->suspend_count = i915->suspend_count;
memcpy(&error->device_info,
- INTEL_INFO(dev_priv),
+ INTEL_INFO(i915),
sizeof(error->device_info));
+ error->driver_caps = i915->caps;
}
static __always_inline void dup_param(const char *type, void *x)
@@ -1695,28 +1726,31 @@ static __always_inline void dup_param(const char *type, void *x)
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
-static int capture(void *data)
+static void capture_params(struct i915_gpu_state *error)
{
- struct i915_gpu_state *error = data;
-
- do_gettimeofday(&error->time);
- error->boottime = ktime_to_timeval(ktime_get_boottime());
- error->uptime =
- ktime_to_timeval(ktime_sub(ktime_get(),
- error->i915->gt.last_init_time));
-
error->params = i915_modparams;
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
+}
+
+static int capture(void *data)
+{
+ struct i915_gpu_state *error = data;
+
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time);
- i915_capture_gen_state(error->i915, error);
- i915_capture_reg_state(error->i915, error);
- i915_gem_record_fences(error->i915, error);
- i915_gem_record_rings(error->i915, error);
- i915_capture_active_buffers(error->i915, error);
- i915_capture_pinned_buffers(error->i915, error);
- i915_gem_capture_guc_log_buffer(error->i915, error);
+ capture_params(error);
+ capture_gen_state(error);
+ capture_uc_state(error);
+ capture_reg_state(error);
+ gem_record_fences(error);
+ gem_record_rings(error);
+ capture_active_buffers(error);
+ capture_pinned_buffers(error);
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
@@ -1745,14 +1779,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
/**
* i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
+ * @i915: i915 device
+ * @engine_mask: the mask of engines triggering the hang
+ * @error_msg: a message to insert into the error capture header
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_i915_private *dev_priv,
+void i915_capture_error_state(struct drm_i915_private *i915,
u32 engine_mask,
const char *error_msg)
{
@@ -1763,25 +1799,25 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
if (!i915_modparams.error_capture)
return;
- if (READ_ONCE(dev_priv->gpu_error.first_error))
+ if (READ_ONCE(i915->gpu_error.first_error))
return;
- error = i915_capture_gpu_state(dev_priv);
+ error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
+ i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
if (!error->simulated) {
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (!dev_priv->gpu_error.first_error) {
- dev_priv->gpu_error.first_error = error;
+ spin_lock_irqsave(&i915->gpu_error.lock, flags);
+ if (!i915->gpu_error.first_error) {
+ i915->gpu_error.first_error = error;
error = NULL;
}
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
}
if (error) {
@@ -1796,7 +1832,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- dev_priv->drm.primary->index);
+ i915->drm.primary->index);
warned = true;
}
}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 97f3a56..0e5c580 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -1,11 +1,6 @@
-/**
- * \file i915_ioc32.c
- *
+/*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
- * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
- *
- *
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Alan Hourihane 2005
* All Rights Reserved.
@@ -28,6 +23,8 @@
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
+ *
+ * Author: Alan Hourihane <alanh@fairlite.demon.co.uk>
*/
#include <linux/compat.h>
@@ -55,10 +52,10 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ __put_user(req32.param, &request->param) ||
+ __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
@@ -70,13 +67,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
+ * i915_compat_ioctl - handle the mistakes of the past
+ * @filp: the file pointer
+ * @cmd: the ioctl command (and encoded flags)
+ * @arg: the ioctl argument (from userspace)
+ *
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
*/
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f820584..633c187 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -415,6 +415,9 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
if (READ_ONCE(rps->interrupts_enabled))
return;
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON_ONCE(rps->pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -431,6 +434,9 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
if (!READ_ONCE(rps->interrupts_enabled))
return;
+ if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
rps->interrupts_enabled = false;
@@ -452,6 +458,8 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
+ assert_rpm_wakelock_held(dev_priv);
+
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -459,6 +467,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
+ assert_rpm_wakelock_held(dev_priv);
+
spin_lock_irq(&dev_priv->irq_lock);
if (!dev_priv->guc.interrupts_enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
@@ -471,6 +481,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
+ assert_rpm_wakelock_held(dev_priv);
+
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->guc.interrupts_enabled = false;
@@ -1065,9 +1077,12 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq = NULL;
+ struct i915_request *rq = NULL;
struct intel_wait *wait;
+ if (!engine->breadcrumbs.irq_armed)
+ return;
+
atomic_inc(&engine->irq_count);
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
@@ -1089,25 +1104,27 @@ static void notify_ring(struct intel_engine_cs *engine)
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
wait->seqno)) {
- struct drm_i915_gem_request *waiter = wait->request;
+ struct i915_request *waiter = wait->request;
wakeup = true;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&waiter->fence.flags) &&
intel_wait_check_request(wait, waiter))
- rq = i915_gem_request_get(waiter);
+ rq = i915_request_get(waiter);
}
if (wakeup)
wake_up_process(wait->tsk);
} else {
- __intel_engine_disarm_breadcrumbs(engine);
+ if (engine->breadcrumbs.irq_armed)
+ __intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
dma_fence_signal(&rq->fence);
- i915_gem_request_put(rq);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_request_put(rq);
}
trace_intel_engine_notify(engine, wait);
@@ -1396,87 +1413,80 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- tasklet |= i915_modparams.enable_guc_submission;
+ tasklet |= USES_GUC_SUBMISSION(engine->i915);
}
if (tasklet)
- tasklet_hi_schedule(&execlists->irq_tasklet);
+ tasklet_hi_schedule(&execlists->tasklet);
}
-static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
- u32 master_ctl,
- u32 gt_iir[4])
+static void gen8_gt_irq_ack(struct drm_i915_private *i915,
+ u32 master_ctl, u32 gt_iir[4])
{
- irqreturn_t ret = IRQ_NONE;
+ void __iomem * const regs = i915->regs;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VCS2_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
- if (gt_iir[0]) {
- I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
- ret = IRQ_HANDLED;
- } else
- DRM_ERROR("The master control interrupt lied (GT0)!\n");
+ gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(gt_iir[0]))
+ raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
- if (gt_iir[1]) {
- I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
- ret = IRQ_HANDLED;
- } else
- DRM_ERROR("The master control interrupt lied (GT1)!\n");
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
- if (gt_iir[3]) {
- I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
- ret = IRQ_HANDLED;
- } else
- DRM_ERROR("The master control interrupt lied (GT3)!\n");
+ gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(gt_iir[1]))
+ raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
- if (gt_iir[2] & (dev_priv->pm_rps_events |
- dev_priv->pm_guc_events)) {
- I915_WRITE_FW(GEN8_GT_IIR(2),
- gt_iir[2] & (dev_priv->pm_rps_events |
- dev_priv->pm_guc_events));
- ret = IRQ_HANDLED;
- } else
- DRM_ERROR("The master control interrupt lied (PM)!\n");
+ gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(gt_iir[2] & (i915->pm_rps_events |
+ i915->pm_guc_events)))
+ raw_reg_write(regs, GEN8_GT_IIR(2),
+ gt_iir[2] & (i915->pm_rps_events |
+ i915->pm_guc_events));
}
- return ret;
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(gt_iir[3]))
+ raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
+ }
}
-static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir[4])
+static void gen8_gt_irq_handler(struct drm_i915_private *i915,
+ u32 master_ctl, u32 gt_iir[4])
{
- if (gt_iir[0]) {
- gen8_cs_irq_handler(dev_priv->engine[RCS],
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(dev_priv->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
- if (gt_iir[1]) {
- gen8_cs_irq_handler(dev_priv->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(dev_priv->engine[VCS2],
+ gen8_cs_irq_handler(i915->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
- if (gt_iir[3])
- gen8_cs_irq_handler(dev_priv->engine[VECS],
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gen8_cs_irq_handler(i915->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+ }
- if (gt_iir[2] & dev_priv->pm_rps_events)
- gen6_rps_irq_handler(dev_priv, gt_iir[2]);
-
- if (gt_iir[2] & dev_priv->pm_guc_events)
- gen9_guc_irq_handler(dev_priv, gt_iir[2]);
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gen6_rps_irq_handler(i915, gt_iir[2]);
+ gen9_guc_irq_handler(i915, gt_iir[2]);
+ }
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
@@ -1564,10 +1574,11 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
- u32 hotplug_trigger, u32 dig_hotplug_reg,
- const u32 hpd[HPD_NUM_PINS],
- bool long_pulse_detect(enum port port, u32 val))
+static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+ u32 *pin_mask, u32 *long_mask,
+ u32 hotplug_trigger, u32 dig_hotplug_reg,
+ const u32 hpd[HPD_NUM_PINS],
+ bool long_pulse_detect(enum port port, u32 val))
{
enum port port;
int i;
@@ -1578,7 +1589,7 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
*pin_mask |= BIT(i);
- port = intel_hpd_pin_to_port(i);
+ port = intel_hpd_pin_to_port(dev_priv, i);
if (port == PORT_NONE)
continue;
@@ -1966,8 +1977,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) {
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
- hotplug_trigger, hpd_status_g4x,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, hotplug_trigger,
+ hpd_status_g4x,
i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1979,8 +1991,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
- hotplug_trigger, hpd_status_i915,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, hotplug_trigger,
+ hpd_status_i915,
i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
@@ -2088,9 +2101,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
do {
u32 master_ctl, iir;
- u32 gt_iir[4] = {};
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
+ u32 gt_iir[4];
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -2143,7 +2156,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_handler(dev_priv, gt_iir);
+ gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2181,7 +2194,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
@@ -2323,8 +2336,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd_spt,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug_trigger, dig_hotplug_reg, hpd_spt,
spt_port_hotplug_long_detect);
}
@@ -2334,8 +2347,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
- dig_hotplug_reg, hpd_spt,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ hotplug2_trigger, dig_hotplug_reg, hpd_spt,
spt_port_hotplug2_long_detect);
}
@@ -2355,7 +2368,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
@@ -2532,7 +2545,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
@@ -2575,6 +2588,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ tmp_mask |= CNL_AUX_CHANNEL_F;
+
if (iir & tmp_mask) {
dp_aux_irq_handler(dev_priv);
found = true;
@@ -2675,11 +2691,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(arg);
u32 master_ctl;
- u32 gt_iir[4] = {};
- irqreturn_t ret;
+ u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -2691,20 +2705,21 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
-
/* Find, clear, then process each source of interrupt */
- ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
- gen8_gt_irq_handler(dev_priv, gt_iir);
- ret |= gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ if (master_ctl & ~GEN8_GT_IRQS) {
+ disable_rpm_wakeref_asserts(dev_priv);
+ gen8_de_irq_handler(dev_priv, master_ctl);
+ enable_rpm_wakeref_asserts(dev_priv);
+ }
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ_FW(GEN8_MASTER_IRQ);
- enable_rpm_wakeref_asserts(dev_priv);
+ gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
- return ret;
+ return IRQ_HANDLED;
}
struct wedge_me {
@@ -2747,6 +2762,156 @@ static void __fini_wedge(struct wedge_me *w)
(W)->i915; \
__fini_wedge((W)))
+static __always_inline void
+gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir)
+{
+ gen8_cs_irq_handler(engine, iir, 0);
+}
+
+static void
+gen11_gt_engine_irq_handler(struct drm_i915_private * const i915,
+ const unsigned int bank,
+ const unsigned int engine_n,
+ const u16 iir)
+{
+ struct intel_engine_cs ** const engine = i915->engine;
+
+ switch (bank) {
+ case 0:
+ switch (engine_n) {
+
+ case GEN11_RCS0:
+ return gen11_cs_irq_handler(engine[RCS], iir);
+
+ case GEN11_BCS:
+ return gen11_cs_irq_handler(engine[BCS], iir);
+ }
+ case 1:
+ switch (engine_n) {
+
+ case GEN11_VCS(0):
+ return gen11_cs_irq_handler(engine[_VCS(0)], iir);
+ case GEN11_VCS(1):
+ return gen11_cs_irq_handler(engine[_VCS(1)], iir);
+ case GEN11_VCS(2):
+ return gen11_cs_irq_handler(engine[_VCS(2)], iir);
+ case GEN11_VCS(3):
+ return gen11_cs_irq_handler(engine[_VCS(3)], iir);
+
+ case GEN11_VECS(0):
+ return gen11_cs_irq_handler(engine[_VECS(0)], iir);
+ case GEN11_VECS(1):
+ return gen11_cs_irq_handler(engine[_VECS(1)], iir);
+ }
+ }
+}
+
+static u32
+gen11_gt_engine_intr(struct drm_i915_private * const i915,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = i915->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident & GEN11_INTR_ENGINE_MASK;
+}
+
+static void
+gen11_gt_irq_handler(struct drm_i915_private * const i915,
+ const u32 master_ctl)
+{
+ void __iomem * const regs = i915->regs;
+ unsigned int bank;
+
+ for (bank = 0; bank < 2; bank++) {
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
+ continue;
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ if (unlikely(!intr_dw)) {
+ DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
+ continue;
+ }
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u16 iir = gen11_gt_engine_intr(i915, bank, bit);
+
+ if (unlikely(!iir))
+ continue;
+
+ gen11_gt_engine_irq_handler(i915, bank, bit, iir);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+ }
+}
+
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ struct drm_i915_private * const i915 = to_i915(arg);
+ void __iomem * const regs = i915->regs;
+ u32 master_ctl;
+
+ if (!intel_irqs_enabled(i915))
+ return IRQ_NONE;
+
+ master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+ master_ctl &= ~GEN11_MASTER_IRQ;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ /* Disable interrupts. */
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+ /* Find, clear, then process each source of interrupt. */
+ gen11_gt_irq_handler(i915, master_ctl);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ if (master_ctl & GEN11_DISPLAY_IRQ) {
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+ disable_rpm_wakeref_asserts(i915);
+ /*
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+ * for the display related bits.
+ */
+ gen8_de_irq_handler(i915, disp_ctl);
+ enable_rpm_wakeref_asserts(i915);
+ }
+
+ /* Acknowledge and enable interrupts. */
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+
+ return IRQ_HANDLED;
+}
+
/**
* i915_reset_device - do process context error handling work
* @dev_priv: i915 device private
@@ -2952,6 +3117,12 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
ilk_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ /* Even though there is no DMC, frame counter can get stuck when
+ * PSR is active as no frames are generated.
+ */
+ if (HAS_PSR(dev_priv))
+ drm_vblank_restore(dev, pipe);
+
return 0;
}
@@ -2964,6 +3135,12 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ /* Even if there is no DMC, frame counter can get stuck when
+ * PSR is active as no frames are generated, so check only for PSR.
+ */
+ if (HAS_PSR(dev_priv))
+ drm_vblank_restore(dev, pipe);
+
return 0;
}
@@ -3064,7 +3241,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
GEN3_IRQ_RESET(VLV_);
- dev_priv->irq_mask = ~0;
+ dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -3089,7 +3266,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0);
+ WARN_ON(dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
@@ -3160,6 +3337,42 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
+static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+{
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
+}
+
+static void gen11_irq_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+ gen11_gt_irq_reset(dev_priv);
+
+ I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+
+ for_each_pipe(dev_priv, pipe)
+ if (intel_display_power_is_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
+ GEN3_IRQ_RESET(GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
+}
+
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
@@ -3607,6 +3820,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ de_port_masked |= CNL_AUX_CHANNEL_F;
+
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
@@ -3654,6 +3870,41 @@ static int gen8_irq_postinstall(struct drm_device *dev)
return 0;
}
+static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
+ I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
+ I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
+ I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
+ I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
+ I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
+
+ dev_priv->pm_imr = 0xffffffff; /* TODO */
+}
+
+static int gen11_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen11_gt_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(dev_priv);
+
+ I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
+
+ I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
+
+ return 0;
+}
+
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4102,6 +4353,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev->driver->irq_handler = gen11_irq_handler;
+ dev->driver->irq_preinstall = gen11_irq_reset;
+ dev->driver->irq_postinstall = gen11_irq_postinstall;
+ dev->driver->irq_uninstall = gen11_irq_reset;
+ dev->driver->enable_vblank = gen8_enable_vblank;
+ dev->driver->disable_vblank = gen8_disable_vblank;
+ dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
} else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 49a0794..79f8ec7 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -96,6 +96,11 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
- if (static_cpu_has(X86_FEATURE_XMM4_1))
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_enable(&has_movntdqa);
}
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index abdf4d0..4abd2e8 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -85,9 +85,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index b69b900..cb6f304 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 368c87d..8641ae3 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
new file mode 100644
index 0000000..792facd
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -0,0 +1,109 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cflgt3.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x00000000 },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x00000000 },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x00000000 },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
+ { _MMIO(0x9888), 0x11810000 },
+ { _MMIO(0x9888), 0x07810013 },
+ { _MMIO(0x9888), 0x1f810000 },
+ { _MMIO(0x9888), 0x1d810000 },
+ { _MMIO(0x9888), 0x1b930040 },
+ { _MMIO(0x9888), 0x07e54000 },
+ { _MMIO(0x9888), 0x1f908000 },
+ { _MMIO(0x9888), 0x11900000 },
+ { _MMIO(0x9888), 0x37900000 },
+ { _MMIO(0x9888), 0x53900000 },
+ { _MMIO(0x9888), 0x45900000 },
+ { _MMIO(0x9888), 0x33900000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
+ "577e8e2c-3fa0-4875-8743-3538d585e3b0",
+ sizeof(dev_priv->perf.oa.test_config.uuid));
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
new file mode 100644
index 0000000..c13b5aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CFLGT3_H__
+#define __I915_OA_CFLGT3_H__
+
+extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 322a3f9..556febb 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
new file mode 100644
index 0000000..ba9140c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -0,0 +1,121 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cnl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x0000ffff },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x0000ffff },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x0000ffff },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0xd04), 0x00000200 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x17060000 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x13034000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x07060066 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x05060000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x0f080040 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x07091000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x0f041000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x1d004000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x35000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x49000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x3d000000 },
+ { _MMIO(0x9884), 0x00000007 },
+ { _MMIO(0x9888), 0x31000000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
+ "db41edd4-d8e7-4730-ad11-b9a2d6833503",
+ sizeof(dev_priv->perf.oa.test_config.uuid));
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
new file mode 100644
index 0000000..fb918b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CNL_H__
+#define __I915_OA_CNL_H__
+
+extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 4ee527e..971db58 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 56b0377..434a9b9 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -113,9 +113,9 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index b6e7cc7..2fa98a4 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index 5576afd..f3cb667 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index 890d558..bf8b8cd 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index 85e51ad..ae534c7 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index bce031e..817fba2 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b4faeb6..08108ce 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_params.h"
#include "i915_drv.h"
@@ -46,17 +48,6 @@ i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-i915_param_named_unsafe(semaphores, int, 0400,
- "Use semaphores for inter-ring sync "
- "(default: -1 (use per-chip defaults))");
-
-i915_param_named_unsafe(enable_rc6, int, 0400,
- "Enable power-saving render C-state 6. "
- "Different stages can be selected via bitmask values "
- "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
- "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
- "default: -1 (use per-chip default)");
-
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -99,10 +90,6 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-i915_param_named_unsafe(enable_execlists, int, 0400,
- "Override execlists usage. "
- "(-1=auto [default], 0=disabled, 1=enabled)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
@@ -162,16 +149,14 @@ i915_param_named_unsafe(edp_vswing, int, 0400,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-i915_param_named_unsafe(enable_guc_loading, int, 0400,
- "Enable GuC firmware loading "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-i915_param_named_unsafe(enable_guc_submission, int, 0400,
- "Enable GuC submission "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
+i915_param_named_unsafe(enable_guc, int, 0400,
+ "Enable GuC load for GuC submission and/or HuC load. "
+ "Required functionality can be selected using bitmask values. "
+ "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
i915_param_named(guc_log_level, int, 0400,
- "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
+ "GuC firmware logging level. Requires GuC to be loaded. "
+ "(-1=auto [default], 0=disable, 1..4=enable with verbosity min..max)");
i915_param_named_unsafe(guc_firmware_path, charp, 0400,
"GuC firmware path to use instead of the default one");
@@ -182,11 +167,44 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400,
i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
+#endif
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+static __always_inline void _print_param(struct drm_printer *p,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
+/**
+ * i915_params_dump - dump i915 modparams
+ * @params: i915 modparams
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+ I915_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c729226..430f5f9 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -25,28 +25,30 @@
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
+#include <linux/bitops.h>
#include <linux/cache.h> /* for __read_mostly */
+struct drm_printer;
+
+#define ENABLE_GUC_SUBMISSION BIT(0)
+#define ENABLE_GUC_LOAD_HUC BIT(1)
+
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
param(int, panel_ignore_lid, 1) \
- param(int, semaphores, -1) \
param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \
- param(int, enable_rc6, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \
- param(int, enable_execlists, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
param(int, invert_brightness, 0) \
- param(int, enable_guc_loading, 0) \
- param(int, enable_guc_submission, 0) \
- param(int, guc_log_level, -1) \
+ param(int, enable_guc, 0) \
+ param(int, guc_log_level, 0) \
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
param(int, mmio_debug, 0) \
@@ -77,5 +79,7 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6458c30..062e91b 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -29,6 +29,9 @@
#include "i915_drv.h"
#include "i915_selftest.h"
+#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
+
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
@@ -63,7 +66,8 @@
.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN2_FEATURES \
- .gen = 2, .num_pipes = 1, \
+ GEN(2), \
+ .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
.has_gmch_display = 1, \
.hws_needs_physical = 1, \
@@ -74,33 +78,35 @@
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i830_info __initconst = {
+static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
- .platform = INTEL_I830,
+ PLATFORM(INTEL_I830),
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_i845g_info __initconst = {
+static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
- .platform = INTEL_I845G,
+ PLATFORM(INTEL_I845G),
};
-static const struct intel_device_info intel_i85x_info __initconst = {
+static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
- .platform = INTEL_I85X, .is_mobile = 1,
+ PLATFORM(INTEL_I85X),
+ .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
.cursor_needs_physical = 1,
.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info __initconst = {
+static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
- .platform = INTEL_I865G,
+ PLATFORM(INTEL_I865G),
};
#define GEN3_FEATURES \
- .gen = 3, .num_pipes = 2, \
+ GEN(3), \
+ .num_pipes = 2, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
@@ -108,17 +114,18 @@ static const struct intel_device_info intel_i865g_info __initconst = {
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i915g_info __initconst = {
+static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
- .platform = INTEL_I915G, .cursor_needs_physical = 1,
+ PLATFORM(INTEL_I915G),
+ .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info __initconst = {
+static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
- .platform = INTEL_I915GM,
+ PLATFORM(INTEL_I915GM),
.is_mobile = 1,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -128,18 +135,19 @@ static const struct intel_device_info intel_i915gm_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info __initconst = {
+static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
- .platform = INTEL_I945G,
+ PLATFORM(INTEL_I945G),
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info __initconst = {
+static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
- .platform = INTEL_I945GM, .is_mobile = 1,
+ PLATFORM(INTEL_I945GM),
+ .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
@@ -148,22 +156,24 @@ static const struct intel_device_info intel_i945gm_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info __initconst = {
+static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
- .platform = INTEL_G33,
+ PLATFORM(INTEL_G33),
.has_hotplug = 1,
.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info __initconst = {
+static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
- .platform = INTEL_PINEVIEW, .is_mobile = 1,
+ PLATFORM(INTEL_PINEVIEW),
+ .is_mobile = 1,
.has_hotplug = 1,
.has_overlay = 1,
};
#define GEN4_FEATURES \
- .gen = 4, .num_pipes = 2, \
+ GEN(4), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
@@ -172,17 +182,17 @@ static const struct intel_device_info intel_pineview_info __initconst = {
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i965g_info __initconst = {
+static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
- .platform = INTEL_I965G,
+ PLATFORM(INTEL_I965G),
.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info __initconst = {
+static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
- .platform = INTEL_I965GM,
+ PLATFORM(INTEL_I965GM),
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
@@ -190,42 +200,46 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
.has_snoop = false,
};
-static const struct intel_device_info intel_g45_info __initconst = {
+static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
- .platform = INTEL_G45,
+ PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_gm45_info __initconst = {
+static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
- .platform = INTEL_GM45,
+ PLATFORM(INTEL_GM45),
.is_mobile = 1, .has_fbc = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
- .gen = 5, .num_pipes = 2, \
+ GEN(5), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
+ /* ilk does support rc6, but we do not implement [power] contexts */ \
+ .has_rc6 = 0, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_ironlake_d_info __initconst = {
+static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
- .platform = INTEL_IRONLAKE,
+ PLATFORM(INTEL_IRONLAKE),
};
-static const struct intel_device_info intel_ironlake_m_info __initconst = {
+static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
- .platform = INTEL_IRONLAKE,
+ PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1, .has_fbc = 1,
};
#define GEN6_FEATURES \
- .gen = 6, .num_pipes = 2, \
+ GEN(6), \
+ .num_pipes = 2, \
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -239,36 +253,37 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
- .platform = INTEL_SANDYBRIDGE
+ PLATFORM(INTEL_SANDYBRIDGE)
-static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
+static const struct intel_device_info intel_sandybridge_d_gt1_info = {
SNB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
+static const struct intel_device_info intel_sandybridge_d_gt2_info = {
SNB_D_PLATFORM,
.gt = 2,
};
#define SNB_M_PLATFORM \
GEN6_FEATURES, \
- .platform = INTEL_SANDYBRIDGE, \
+ PLATFORM(INTEL_SANDYBRIDGE), \
.is_mobile = 1
-static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
+static const struct intel_device_info intel_sandybridge_m_gt1_info = {
SNB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
+static const struct intel_device_info intel_sandybridge_m_gt2_info = {
SNB_M_PLATFORM,
.gt = 2,
};
#define GEN7_FEATURES \
- .gen = 7, .num_pipes = 3, \
+ GEN(7), \
+ .num_pipes = 3, \
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
@@ -283,46 +298,46 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
- .platform = INTEL_IVYBRIDGE, \
+ PLATFORM(INTEL_IVYBRIDGE), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
+static const struct intel_device_info intel_ivybridge_d_gt1_info = {
IVB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
+static const struct intel_device_info intel_ivybridge_d_gt2_info = {
IVB_D_PLATFORM,
.gt = 2,
};
#define IVB_M_PLATFORM \
GEN7_FEATURES, \
- .platform = INTEL_IVYBRIDGE, \
+ PLATFORM(INTEL_IVYBRIDGE), \
.is_mobile = 1, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
+static const struct intel_device_info intel_ivybridge_m_gt1_info = {
IVB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
+static const struct intel_device_info intel_ivybridge_m_gt2_info = {
IVB_M_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info __initconst = {
+static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
+ PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info __initconst = {
- .platform = INTEL_VALLEYVIEW,
- .gen = 7,
+static const struct intel_device_info intel_valleyview_info = {
+ PLATFORM(INTEL_VALLEYVIEW),
+ GEN(7),
.is_lp = 1,
.num_pipes = 2,
.has_psr = 1,
@@ -353,26 +368,27 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
#define HSW_PLATFORM \
G75_FEATURES, \
- .platform = INTEL_HASWELL, \
+ PLATFORM(INTEL_HASWELL), \
.has_l3_dpf = 1
-static const struct intel_device_info intel_haswell_gt1_info __initconst = {
+static const struct intel_device_info intel_haswell_gt1_info = {
HSW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_haswell_gt2_info __initconst = {
+static const struct intel_device_info intel_haswell_gt2_info = {
HSW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_haswell_gt3_info __initconst = {
+static const struct intel_device_info intel_haswell_gt3_info = {
HSW_PLATFORM,
.gt = 3,
};
#define GEN8_FEATURES \
G75_FEATURES, \
+ GEN(8), \
BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
@@ -383,20 +399,19 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
#define BDW_PLATFORM \
GEN8_FEATURES, \
- .gen = 8, \
- .platform = INTEL_BROADWELL
+ PLATFORM(INTEL_BROADWELL)
-static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt1_info = {
BDW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt2_info = {
BDW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
+static const struct intel_device_info intel_broadwell_rsvd_info = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
@@ -404,18 +419,19 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
*/
};
-static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cherryview_info __initconst = {
- .gen = 8, .num_pipes = 3,
+static const struct intel_device_info intel_cherryview_info = {
+ PLATFORM(INTEL_CHERRYVIEW),
+ GEN(8),
+ .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .platform = INTEL_CHERRYVIEW,
.has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
@@ -441,6 +457,7 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
#define GEN9_FEATURES \
GEN8_FEATURES, \
+ GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.has_csr = 1, \
@@ -450,15 +467,14 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
#define SKL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_SKYLAKE
+ PLATFORM(INTEL_SKYLAKE)
-static const struct intel_device_info intel_skylake_gt1_info __initconst = {
+static const struct intel_device_info intel_skylake_gt1_info = {
SKL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_skylake_gt2_info __initconst = {
+static const struct intel_device_info intel_skylake_gt2_info = {
SKL_PLATFORM,
.gt = 2,
};
@@ -468,18 +484,18 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
-static const struct intel_device_info intel_skylake_gt3_info __initconst = {
+static const struct intel_device_info intel_skylake_gt3_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
-static const struct intel_device_info intel_skylake_gt4_info __initconst = {
+static const struct intel_device_info intel_skylake_gt4_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
#define GEN9_LP_FEATURES \
- .gen = 9, \
+ GEN(9), \
.is_lp = 1, \
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
@@ -509,35 +525,34 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
IVB_CURSOR_OFFSETS, \
BDW_COLORS
-static const struct intel_device_info intel_broxton_info __initconst = {
+static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
- .platform = INTEL_BROXTON,
+ PLATFORM(INTEL_BROXTON),
.ddb_size = 512,
};
-static const struct intel_device_info intel_geminilake_info __initconst = {
+static const struct intel_device_info intel_geminilake_info = {
GEN9_LP_FEATURES,
- .platform = INTEL_GEMINILAKE,
+ PLATFORM(INTEL_GEMINILAKE),
.ddb_size = 1024,
GLK_COLORS,
};
#define KBL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_KABYLAKE
+ PLATFORM(INTEL_KABYLAKE)
-static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt1_info = {
KBL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt2_info = {
KBL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -545,20 +560,19 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
#define CFL_PLATFORM \
GEN9_FEATURES, \
- .gen = 9, \
- .platform = INTEL_COFFEELAKE
+ PLATFORM(INTEL_COFFEELAKE)
-static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt1_info = {
CFL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt2_info = {
CFL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -566,17 +580,33 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
#define GEN10_FEATURES \
GEN9_FEATURES, \
+ GEN(10), \
.ddb_size = 1024, \
GLK_COLORS
-static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
+static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES,
- .is_alpha_support = 1,
- .platform = INTEL_CANNONLAKE,
- .gen = 10,
+ PLATFORM(INTEL_CANNONLAKE),
.gt = 2,
};
+#define GEN11_FEATURES \
+ GEN10_FEATURES, \
+ GEN(11), \
+ .ddb_size = 2048, \
+ .has_csr = 0, \
+ .has_logical_ring_elsq = 1
+
+static const struct intel_device_info intel_icelake_11_info = {
+ GEN11_FEATURES,
+ PLATFORM(INTEL_ICELAKE),
+ .is_alpha_support = 1,
+ .has_resource_streamer = 0,
+};
+
+#undef GEN
+#undef PLATFORM
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -631,9 +661,11 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
- INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
- INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
+ INTEL_CNL_IDS(&intel_cannonlake_info),
+ INTEL_ICL_11_IDS(&intel_icelake_11_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index d453756..abaca6e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -207,6 +207,8 @@
#include "i915_oa_kblgt3.h"
#include "i915_oa_glk.h"
#include "i915_oa_cflgt2.h"
+#include "i915_oa_cflgt3.h"
+#include "i915_oa_cnl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -242,7 +244,7 @@
* The two separate pointers let us decouple read()s from tail pointer aging.
*
* The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering POLLIN events)
+ * callback (the same callback that is used for delivering EPOLLIN events)
*
* Initially the tails are marked invalid with %INVALID_TAIL_PTR which
* indicates that an updated tail pointer is needed.
@@ -1214,9 +1216,9 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists)
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
- else {
+ } else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret;
@@ -1260,7 +1262,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915_modparams.enable_execlists) {
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1301,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
*/
mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
free_oa_buffer(dev_priv);
@@ -1628,10 +1629,10 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
* Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
* is only used by the kernel context.
*/
-static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
+static int gen8_emit_oa_config(struct i915_request *rq,
const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -1645,7 +1646,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
u32 *cs;
int i;
- cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
+ cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1683,7 +1684,7 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1693,41 +1694,40 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_timeline *timeline;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- i915_gem_retire_requests(dev_priv);
+ i915_retire_requests(dev_priv);
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- ret = gen8_emit_oa_config(req, oa_config);
+ ret = gen8_emit_oa_config(rq, oa_config);
if (ret) {
- i915_add_request(req);
+ i915_request_add(rq);
return ret;
}
/* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
struct intel_timeline *tl;
tl = &timeline->engine[engine->id];
prev = i915_gem_active_raw(&tl->last_request,
&dev_priv->drm.struct_mutex);
if (prev)
- i915_sw_fence_await_sw_fence_gfp(&req->submit,
+ i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
GFP_KERNEL);
}
- ret = i915_switch_context(req);
- i915_add_request(req);
+ i915_request_add(rq);
- return ret;
+ return 0;
}
/*
@@ -1755,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
* Note: it's only the RCS/Render context that has any OA state.
*/
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config,
- bool interruptible)
+ const struct i915_oa_config *oa_config)
{
struct i915_gem_context *ctx;
int ret;
unsigned int wait_flags = I915_WAIT_LOCKED;
- if (interruptible) {
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
- wait_flags |= I915_WAIT_INTERRUPTIBLE;
- } else {
- mutex_lock(&dev_priv->drm.struct_mutex);
- }
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1818,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
}
out:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return ret;
}
@@ -1851,7 +1840,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -1862,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
+ ret = gen8_configure_all_contexts(dev_priv, oa_config);
if (ret)
return ret;
@@ -1877,13 +1866,23 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL, false);
+ gen8_configure_all_contexts(dev_priv, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
}
+static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+{
+ /* Reset all contexts' slices/subslices configurations. */
+ gen8_configure_all_contexts(dev_priv, NULL);
+
+ /* Make sure we disable noa to save power. */
+ I915_WRITE(RPM_CONFIG1,
+ I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+}
+
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{
/*
@@ -2127,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_oa_buf_alloc;
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ goto err_lock;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config);
if (ret)
@@ -2134,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops;
- /* Lock device for exclusive_stream access late because
- * enable_metric_set() might lock as well on gen8+.
- */
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
dev_priv->perf.oa.exclusive_stream = stream;
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
-err_lock:
+err_enable:
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
-err_enable:
+err_lock:
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
@@ -2281,13 +2278,13 @@ static ssize_t i915_perf_read(struct file *file,
mutex_unlock(&dev_priv->perf.lock);
}
- /* We allow the poll checking to sometimes report false positive POLLIN
+ /* We allow the poll checking to sometimes report false positive EPOLLIN
* events where we might actually report EAGAIN on read() if there's
* not really any data available. In this situation though we don't
- * want to enter a busy loop between poll() reporting a POLLIN event
+ * want to enter a busy loop between poll() reporting a EPOLLIN event
* and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback
- * before reporting another POLLIN event.
+ * before reporting another EPOLLIN event.
*/
if (ret >= 0 || ret == -EAGAIN) {
/* Maybe make ->pollin per-stream state if we support multiple
@@ -2347,7 +2344,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
* samples to read.
*/
if (dev_priv->perf.oa.pollin)
- events |= POLLIN;
+ events |= EPOLLIN;
return events;
}
@@ -2679,8 +2676,8 @@ err:
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{
- return div_u64(1000000000ULL * (2ULL << exponent),
- dev_priv->perf.oa.timestamp_frequency);
+ return div64_u64(1000000000ULL * (2ULL << exponent),
+ 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
}
/**
@@ -2934,6 +2931,10 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
} else if (IS_COFFEELAKE(dev_priv)) {
if (IS_CFL_GT2(dev_priv))
i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@@ -2991,7 +2992,7 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
int i;
for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
- if (flex_eu_regs[i].reg == addr)
+ if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
return true;
}
return false;
@@ -2999,31 +3000,47 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
{
- return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
- (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
- (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
+ return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
+ addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
+ (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
+ addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
+ (addr >= i915_mmio_reg_offset(OACEC0_0) &&
+ addr <= i915_mmio_reg_offset(OACEC7_1));
}
static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
- return addr == HALF_SLICE_CHICKEN2.reg ||
- (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
- (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
- (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
+ return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
+ (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
+ addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
}
static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == WAIT_FOR_RC6_EXIT.reg ||
- (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
+ addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
+ (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
+ addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+}
+
+static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return gen8_is_valid_mux_addr(dev_priv, addr) ||
+ (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
+ addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
}
static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen7_is_valid_mux_addr(dev_priv, addr) ||
(addr >= 0x25100 && addr <= 0x2FF90) ||
- addr == 0x9ec0;
+ (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
+ addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
+ addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
}
static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
@@ -3038,14 +3055,14 @@ static uint32_t mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (HALF_SLICE_CHICKEN2.reg == reg)
+ if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (WAIT_FOR_RC6_EXIT.reg == reg)
+ if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
@@ -3392,8 +3409,6 @@ static struct ctl_table dev_root[] = {
*/
void i915_perf_init(struct drm_i915_private *dev_priv)
{
- dev_priv->perf.oa.timestamp_frequency = 0;
-
if (IS_HASWELL(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
@@ -3409,70 +3424,68 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.oa_hw_tail_read =
gen7_oa_hw_tail_read;
- dev_priv->perf.oa.timestamp_frequency = 12500000;
-
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (i915_modparams.enable_execlists) {
+ } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
- gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
- gen8_is_valid_flex_addr;
+ dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ gen8_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
- if (IS_GEN8(dev_priv)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
-
- dev_priv->perf.oa.timestamp_frequency = 12500000;
-
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- } else if (IS_GEN9(dev_priv)) {
+
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+
+ if (IS_GEN8(dev_priv)) {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+
+ dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+ } else {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+
+ dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ }
+ } else if (IS_GEN10(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ gen10_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
-
- switch (dev_priv->info.platform) {
- case INTEL_BROXTON:
- case INTEL_GEMINILAKE:
- dev_priv->perf.oa.timestamp_frequency = 19200000;
- break;
- case INTEL_SKYLAKE:
- case INTEL_KABYLAKE:
- case INTEL_COFFEELAKE:
- dev_priv->perf.oa.timestamp_frequency = 12000000;
- break;
- default:
- /* Leave timestamp_frequency to 0 so we can
- * detect unsupported platforms.
- */
- break;
- }
}
}
- if (dev_priv->perf.oa.timestamp_frequency) {
+ if (dev_priv->perf.oa.ops.enable_metric_set) {
hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
@@ -3482,8 +3495,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->perf.lock);
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
- oa_sample_rate_hard_limit =
- dev_priv->perf.oa.timestamp_frequency / 2;
+ oa_sample_rate_hard_limit = 1000 *
+ (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
new file mode 100644
index 0000000..d8feb90
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_ringbuffer.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+ (BIT(I915_SAMPLE_BUSY) | \
+ BIT(I915_SAMPLE_WAIT) | \
+ BIT(I915_SAMPLE_SEMA))
+
+#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
+
+static cpumask_t i915_pmu_cpumask;
+
+static u8 engine_config_sample(u64 config)
+{
+ return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+ return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(u64 config)
+{
+ return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_enabled_bit(u64 config)
+{
+ if (is_engine_config(config))
+ return engine_config_sample(config);
+ else
+ return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+}
+
+static u64 config_enabled_mask(u64 config)
+{
+ return BIT_ULL(config_enabled_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+ return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_enabled_bit(struct perf_event *event)
+{
+ return config_enabled_bit(event->attr.config);
+}
+
+static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+{
+ u64 enable;
+
+ /*
+ * Only some counters need the sampling timer.
+ *
+ * We start with a bitmask of all currently enabled events.
+ */
+ enable = i915->pmu.enable;
+
+ /*
+ * Mask out all the ones which do not need the timer, or in
+ * other words keep all the ones that could need the timer.
+ */
+ enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
+ ENGINE_SAMPLE_MASK;
+
+ /*
+ * When the GPU is idle per-engine counters do not need to be
+ * running so clear those bits out.
+ */
+ if (!gpu_active)
+ enable &= ~ENGINE_SAMPLE_MASK;
+ /*
+ * Also there is software busyness tracking available we do not
+ * need the timer for I915_SAMPLE_BUSY counter.
+ *
+ * Use RCS as proxy for all engines.
+ */
+ else if (intel_engine_supports_stats(i915->engine[RCS]))
+ enable &= ~BIT(I915_SAMPLE_BUSY);
+
+ /*
+ * If some bits remain it means we need the sampling timer running.
+ */
+ return enable;
+}
+
+void i915_pmu_gt_parked(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
+static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
+ i915->pmu.timer_enabled = true;
+ hrtimer_start_range_ns(&i915->pmu.timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+void i915_pmu_gt_unparked(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Re-enable sampling timer when GPU goes active.
+ */
+ __i915_pmu_maybe_start_timer(i915);
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
+static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
+{
+ if (!fw)
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ return true;
+}
+
+static void
+update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+{
+ sample->cur += mul_u32_u32(val, unit);
+}
+
+static void engines_sample(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool fw = false;
+
+ if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ return;
+
+ if (!dev_priv->gt.awake)
+ return;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return;
+
+ for_each_engine(engine, dev_priv, id) {
+ u32 current_seqno = intel_engine_get_seqno(engine);
+ u32 last_seqno = intel_engine_last_submit(engine);
+ u32 val;
+
+ val = !i915_seqno_passed(current_seqno, last_seqno);
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ PERIOD, val);
+
+ if (val && (engine->pmu.enable &
+ (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
+ fw = grab_forcewake(dev_priv, fw);
+
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ } else {
+ val = 0;
+ }
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ PERIOD, !!(val & RING_WAIT));
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ }
+
+ if (fw)
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void frequency_sample(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
+
+ val = dev_priv->gt_pm.rps.cur_freq;
+ if (dev_priv->gt.awake &&
+ intel_runtime_pm_get_if_in_use(dev_priv)) {
+ val = intel_get_cagf(dev_priv,
+ I915_READ_NOTRACE(GEN6_RPSTAT1));
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ 1, intel_gpu_freq(dev_priv, val));
+ }
+
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq));
+ }
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+ struct drm_i915_private *i915 =
+ container_of(hrtimer, struct drm_i915_private, pmu.timer);
+
+ if (!READ_ONCE(i915->pmu.timer_enabled))
+ return HRTIMER_NORESTART;
+
+ engines_sample(i915);
+ frequency_sample(i915);
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
+ return HRTIMER_RESTART;
+}
+
+static u64 count_interrupts(struct drm_i915_private *i915)
+{
+ /* open-coded kstat_irqs() */
+ struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
+ u64 sum = 0;
+ int cpu;
+
+ if (!desc || !desc->kstat_irqs)
+ return 0;
+
+ for_each_possible_cpu(cpu)
+ sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+
+ return sum;
+}
+
+static void engine_event_destroy(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ if (WARN_ON_ONCE(!engine))
+ return;
+
+ if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine))
+ intel_disable_engine_stats(engine);
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+ WARN_ON(event->parent);
+
+ if (is_engine_event(event))
+ engine_event_destroy(event);
+}
+
+static int
+engine_event_status(struct intel_engine_cs *engine,
+ enum drm_i915_pmu_engine_sample sample)
+{
+ switch (sample) {
+ case I915_SAMPLE_BUSY:
+ case I915_SAMPLE_WAIT:
+ break;
+ case I915_SAMPLE_SEMA:
+ if (INTEL_GEN(engine->i915) < 6)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int
+config_status(struct drm_i915_private *i915, u64 config)
+{
+ switch (config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ /* Requires a mutex for sampling! */
+ return -ENODEV;
+ /* Fall-through. */
+ case I915_PMU_REQUESTED_FREQUENCY:
+ if (INTEL_GEN(i915) < 6)
+ return -ENODEV;
+ break;
+ case I915_PMU_INTERRUPTS:
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ if (!HAS_RC6(i915))
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+ u8 sample;
+ int ret;
+
+ engine = intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event));
+ if (!engine)
+ return -ENODEV;
+
+ sample = engine_event_sample(event);
+ ret = engine_event_status(engine, sample);
+ if (ret)
+ return ret;
+
+ if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
+ ret = intel_enable_engine_stats(engine);
+
+ return ret;
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ int ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* only allow running on one cpu at a time */
+ if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
+ return -EINVAL;
+
+ if (is_engine_event(event))
+ ret = engine_event_init(event);
+ else
+ ret = config_status(i915, event->attr.config);
+ if (ret)
+ return ret;
+
+ if (!event->parent)
+ event->destroy = i915_pmu_event_destroy;
+
+ return 0;
+}
+
+static u64 __get_rc6(struct drm_i915_private *i915)
+{
+ u64 val;
+
+ val = intel_rc6_residency_ns(i915,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+static u64 get_rc6(struct drm_i915_private *i915)
+{
+#if IS_ENABLED(CONFIG_PM)
+ unsigned long flags;
+ u64 val;
+
+ if (intel_runtime_pm_get_if_in_use(i915)) {
+ val = __get_rc6(i915);
+ intel_runtime_pm_put(i915);
+
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ } else {
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+
+ /*
+ * We are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock(&kdev->power.lock);
+
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
+
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
+
+ spin_unlock(&kdev->power.lock);
+
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ }
+
+ return val;
+#else
+ return __get_rc6(i915);
+#endif
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ u64 val = 0;
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ if (WARN_ON_ONCE(!engine)) {
+ /* Do nothing */
+ } else if (sample == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine)) {
+ val = ktime_to_ns(intel_engine_get_busy_time(engine));
+ } else {
+ val = engine->pmu.sample[sample].cur;
+ }
+ } else {
+ switch (event->attr.config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ FREQUENCY);
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ FREQUENCY);
+ break;
+ case I915_PMU_INTERRUPTS:
+ val = count_interrupts(i915);
+ break;
+ case I915_PMU_RC6_RESIDENCY:
+ val = get_rc6(i915);
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+again:
+ prev = local64_read(&hwc->prev_count);
+ new = __i915_pmu_event_read(event);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
+ goto again;
+
+ local64_add(new - prev, &event->count);
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ /*
+ * Update the bitmask of enabled events and increment
+ * the event reference counter.
+ */
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
+ i915->pmu.enable |= BIT_ULL(bit);
+ i915->pmu.enable_count[bit]++;
+
+ /*
+ * Start the sampling timer if needed and not already enabled.
+ */
+ __i915_pmu_maybe_start_timer(i915);
+
+ /*
+ * For per-engine events the bitmask and reference counting
+ * is stored per engine.
+ */
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ engine->pmu.enable |= BIT(sample);
+
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+ engine->pmu.enable_count[sample]++;
+ }
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--engine->pmu.enable_count[sample] == 0)
+ engine->pmu.enable &= ~BIT(sample);
+ }
+
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--i915->pmu.enable_count[bit] == 0) {
+ i915->pmu.enable &= ~BIT_ULL(bit);
+ i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ }
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+ i915_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+ i915_pmu_disable(event);
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ i915_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+ i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+struct i915_str_attribute {
+ struct device_attribute attr;
+ const char *str;
+};
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_str_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_str_attribute, attr);
+ return sprintf(buf, "%s\n", eattr->str);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct i915_str_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+ .str = _config, } \
+ })[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+ I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = i915_pmu_format_attrs,
+};
+
+struct i915_ext_attribute {
+ struct device_attribute attr;
+ unsigned long val;
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i915_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct i915_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", eattr->val);
+}
+
+static struct attribute_group i915_pmu_events_attr_group = {
+ .name = "events",
+ /* Patch in attrs at runtime. */
+};
+
+static ssize_t
+i915_pmu_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *i915_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_cpumask_attr_group = {
+ .attrs = i915_cpumask_attrs,
+};
+
+static const struct attribute_group *i915_pmu_attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &i915_pmu_events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+};
+
+#define __event(__config, __name, __unit) \
+{ \
+ .config = (__config), \
+ .name = (__name), \
+ .unit = (__unit), \
+}
+
+#define __engine_event(__sample, __name) \
+{ \
+ .sample = (__sample), \
+ .name = (__name), \
+}
+
+static struct i915_ext_attribute *
+add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = i915_pmu_event_show;
+ attr->val = config;
+
+ return ++attr;
+}
+
+static struct perf_pmu_events_attr *
+add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
+ const char *str)
+{
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = perf_event_sysfs_show;
+ attr->event_str = str;
+
+ return ++attr;
+}
+
+static struct attribute **
+create_event_attributes(struct drm_i915_private *i915)
+{
+ static const struct {
+ u64 config;
+ const char *name;
+ const char *unit;
+ } events[] = {
+ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
+ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
+ __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
+ __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+ };
+ static const struct {
+ enum drm_i915_pmu_engine_sample sample;
+ char *name;
+ } engine_events[] = {
+ __engine_event(I915_SAMPLE_BUSY, "busy"),
+ __engine_event(I915_SAMPLE_SEMA, "sema"),
+ __engine_event(I915_SAMPLE_WAIT, "wait"),
+ };
+ unsigned int count = 0;
+ struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
+ struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
+ struct attribute **attr = NULL, **attr_iter;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int i;
+
+ /* Count how many counters we will be exposing. */
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ if (!config_status(i915, events[i].config))
+ count++;
+ }
+
+ for_each_engine(engine, i915, id) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ if (!engine_event_status(engine,
+ engine_events[i].sample))
+ count++;
+ }
+ }
+
+ /* Allocate attribute objects and table. */
+ i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
+ if (!i915_attr)
+ goto err_alloc;
+
+ pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
+ if (!pmu_attr)
+ goto err_alloc;
+
+ /* Max one pointer of each attribute type plus a termination entry. */
+ attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ goto err_alloc;
+
+ i915_iter = i915_attr;
+ pmu_iter = pmu_attr;
+ attr_iter = attr;
+
+ /* Initialize supported non-engine counters. */
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ char *str;
+
+ if (config_status(i915, events[i].config))
+ continue;
+
+ str = kstrdup(events[i].name, GFP_KERNEL);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter = add_i915_attr(i915_iter, str, events[i].config);
+
+ if (events[i].unit) {
+ str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
+ }
+ }
+
+ /* Initialize supported engine counters. */
+ for_each_engine(engine, i915, id) {
+ for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
+ char *str;
+
+ if (engine_event_status(engine,
+ engine_events[i].sample))
+ continue;
+
+ str = kasprintf(GFP_KERNEL, "%s-%s",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter =
+ add_i915_attr(i915_iter, str,
+ __I915_PMU_ENGINE(engine->uabi_class,
+ engine->instance,
+ engine_events[i].sample));
+
+ str = kasprintf(GFP_KERNEL, "%s-%s.unit",
+ engine->name, engine_events[i].name);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
+ }
+ }
+
+ i915->pmu.i915_attr = i915_attr;
+ i915->pmu.pmu_attr = pmu_attr;
+
+ return attr;
+
+err:;
+ for (attr_iter = attr; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+err_alloc:
+ kfree(attr);
+ kfree(i915_attr);
+ kfree(pmu_attr);
+
+ return NULL;
+}
+
+static void free_event_attributes(struct drm_i915_private *i915)
+{
+ struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
+
+ for (; *attr_iter; attr_iter++)
+ kfree((*attr_iter)->name);
+
+ kfree(i915_pmu_events_attr_group.attrs);
+ kfree(i915->pmu.i915_attr);
+ kfree(i915->pmu.pmu_attr);
+
+ i915_pmu_events_attr_group.attrs = NULL;
+ i915->pmu.i915_attr = NULL;
+ i915->pmu.pmu_attr = NULL;
+}
+
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ /* Select the first online CPU as a designated reader. */
+ if (!cpumask_weight(&i915_pmu_cpumask))
+ cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+ return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ unsigned int target;
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+ target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &i915_pmu_cpumask);
+ perf_pmu_migrate_context(&pmu->base, cpu, target);
+ }
+ }
+
+ return 0;
+}
+
+static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+
+static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+{
+ enum cpuhp_state slot;
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/x86/intel/i915:online",
+ i915_pmu_cpu_online,
+ i915_pmu_cpu_offline);
+ if (ret < 0)
+ return ret;
+
+ slot = ret;
+ ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ if (ret) {
+ cpuhp_remove_multi_state(slot);
+ return ret;
+ }
+
+ cpuhp_slot = slot;
+ return 0;
+}
+
+static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+{
+ WARN_ON(cpuhp_slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ cpuhp_remove_multi_state(cpuhp_slot);
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (INTEL_GEN(i915) <= 2) {
+ DRM_INFO("PMU not supported for this GPU.");
+ return;
+ }
+
+ i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
+ if (!i915_pmu_events_attr_group.attrs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915->pmu.base.attr_groups = i915_pmu_attr_groups;
+ i915->pmu.base.task_ctx_nr = perf_invalid_context;
+ i915->pmu.base.event_init = i915_pmu_event_init;
+ i915->pmu.base.add = i915_pmu_event_add;
+ i915->pmu.base.del = i915_pmu_event_del;
+ i915->pmu.base.start = i915_pmu_event_start;
+ i915->pmu.base.stop = i915_pmu_event_stop;
+ i915->pmu.base.read = i915_pmu_event_read;
+ i915->pmu.base.event_idx = i915_pmu_event_event_idx;
+
+ spin_lock_init(&i915->pmu.lock);
+ hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ i915->pmu.timer.function = i915_sample;
+
+ ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ if (ret)
+ goto err;
+
+ ret = i915_pmu_register_cpuhp_state(i915);
+ if (ret)
+ goto err_unreg;
+
+ return;
+
+err_unreg:
+ perf_pmu_unregister(&i915->pmu.base);
+err:
+ i915->pmu.base.event_init = NULL;
+ free_event_attributes(i915);
+ DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ WARN_ON(i915->pmu.enable);
+
+ hrtimer_cancel(&i915->pmu.timer);
+
+ i915_pmu_unregister_cpuhp_state(i915);
+
+ perf_pmu_unregister(&i915->pmu.base);
+ i915->pmu.base.event_init = NULL;
+ free_event_attributes(i915);
+}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
new file mode 100644
index 0000000..aa1b1a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef __I915_PMU_H__
+#define __I915_PMU_H__
+
+enum {
+ __I915_SAMPLE_FREQ_ACT = 0,
+ __I915_SAMPLE_FREQ_REQ,
+ __I915_SAMPLE_RC6,
+ __I915_SAMPLE_RC6_ESTIMATED,
+ __I915_NUM_PMU_SAMPLERS
+};
+
+/**
+ * How many different events we track in the global PMU mask.
+ *
+ * It is also used to know to needed number of event reference counters.
+ */
+#define I915_PMU_MASK_BITS \
+ ((1 << I915_PMU_SAMPLE_BITS) + \
+ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+
+struct i915_pmu_sample {
+ u64 cur;
+};
+
+struct i915_pmu {
+ /**
+ * @node: List node for CPU hotplug handling.
+ */
+ struct hlist_node node;
+ /**
+ * @base: PMU base.
+ */
+ struct pmu base;
+ /**
+ * @lock: Lock protecting enable mask and ref count handling.
+ */
+ spinlock_t lock;
+ /**
+ * @timer: Timer for internal i915 PMU sampling.
+ */
+ struct hrtimer timer;
+ /**
+ * @enable: Bitmask of all currently enabled events.
+ *
+ * Bits are derived from uAPI event numbers in a way that low 16 bits
+ * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
+ * bit 0), and higher bits correspond to other events (for instance
+ * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ *
+ * In other words, low 16 bits are not per engine but per engine
+ * sampler type, while the upper bits are directly mapped to other
+ * event types.
+ */
+ u64 enable;
+ /**
+ * @enable_count: Reference counts for the enabled events.
+ *
+ * Array indices are mapped in the same way as bits in the @enable field
+ * and they are used to control sampling on/off when multiple clients
+ * are using the PMU API.
+ */
+ unsigned int enable_count[I915_PMU_MASK_BITS];
+ /**
+ * @timer_enabled: Should the internal sampling timer be running.
+ */
+ bool timer_enabled;
+ /**
+ * @sample: Current and previous (raw) counters for sampling events.
+ *
+ * These counters are updated from the i915 PMU sampling timer.
+ *
+ * Only global counters are held here, while the per-engine ones are in
+ * struct intel_engine_cs.
+ */
+ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+ /**
+ * @suspended_jiffies_last: Cached suspend time from PM core.
+ */
+ unsigned long suspended_jiffies_last;
+ /**
+ * @i915_attr: Memory block holding device attributes.
+ */
+ void *i915_attr;
+ /**
+ * @pmu_attr: Memory block holding device attributes.
+ */
+ void *pmu_attr;
+};
+
+#ifdef CONFIG_PERF_EVENTS
+void i915_pmu_register(struct drm_i915_private *i915);
+void i915_pmu_unregister(struct drm_i915_private *i915);
+void i915_pmu_gt_parked(struct drm_i915_private *i915);
+void i915_pmu_gt_unparked(struct drm_i915_private *i915);
+#else
+static inline void i915_pmu_register(struct drm_i915_private *i915) {}
+static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_unparked(struct drm_i915_private *i915) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
new file mode 100644
index 0000000..3ace929
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -0,0 +1,125 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_query.h"
+#include <uapi/drm/i915_drm.h>
+
+static int query_topology_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ struct drm_i915_query_topology_info topo;
+ u32 slice_length, subslice_length, eu_length, total_length;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ if (sseu->max_slices == 0)
+ return -ENODEV;
+
+ BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
+
+ slice_length = sizeof(sseu->slice_mask);
+ subslice_length = sseu->max_slices *
+ DIV_ROUND_UP(sseu->max_subslices,
+ sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+ eu_length = sseu->max_slices * sseu->max_subslices *
+ DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+
+ total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
+
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length < total_length)
+ return -EINVAL;
+
+ if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
+ sizeof(topo)))
+ return -EFAULT;
+
+ if (topo.flags != 0)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
+ total_length))
+ return -EFAULT;
+
+ memset(&topo, 0, sizeof(topo));
+ topo.max_slices = sseu->max_slices;
+ topo.max_subslices = sseu->max_subslices;
+ topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
+
+ topo.subslice_offset = slice_length;
+ topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
+ topo.eu_offset = slice_length + subslice_length;
+ topo.eu_stride =
+ DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ &topo, sizeof(topo)))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+ &sseu->slice_mask, slice_length))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) + slice_length),
+ sseu->subslice_mask, subslice_length))
+ return -EFAULT;
+
+ if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu->eu_mask, eu_length))
+ return -EFAULT;
+
+ return total_length;
+}
+
+static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item) = {
+ query_topology_info,
+};
+
+int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_query *args = data;
+ struct drm_i915_query_item __user *user_item_ptr =
+ u64_to_user_ptr(args->items_ptr);
+ u32 i;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ for (i = 0; i < args->num_items; i++, user_item_ptr++) {
+ struct drm_i915_query_item item;
+ u64 func_idx;
+ int ret;
+
+ if (copy_from_user(&item, user_item_ptr, sizeof(item)))
+ return -EFAULT;
+
+ if (item.query_id == 0)
+ return -EINVAL;
+
+ func_idx = item.query_id - 1;
+
+ if (func_idx < ARRAY_SIZE(i915_query_funcs))
+ ret = i915_query_funcs[func_idx](dev_priv, &item);
+ else
+ ret = -EINVAL;
+
+ /* Only write the length back to userspace if they differ. */
+ if (ret != item.length && put_user(ret, &user_item_ptr->length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_query.h b/drivers/gpu/drm/i915/i915_query.h
new file mode 100644
index 0000000..31dcef1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_query.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_QUERY_H_
+#define _I915_QUERY_H_
+
+struct drm_device;
+struct drm_file;
+
+int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7923dfd..e6a8c0e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -178,6 +178,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define BCS_HW 2
#define VECS_HW 3
#define VCS2_HW 4
+#define VCS3_HW 6
+#define VCS4_HW 7
+#define VECS2_HW 12
/* Engine class */
@@ -186,6 +189,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VIDEO_ENHANCEMENT_CLASS 2
#define COPY_ENGINE_CLASS 3
#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+
+#define MAX_ENGINE_INSTANCE 3
/* PCI config space */
@@ -355,9 +361,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
-#define GEN8_CONFIG0 _MMIO(0xD00)
-#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
-
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -382,6 +385,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_2M (1 << 7)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
+#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
/* VGA stuff */
@@ -1109,16 +1113,50 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
#define OA_PERFCNT2_LO _MMIO(0x91C0)
#define OA_PERFCNT2_HI _MMIO(0x91C4)
+#define OA_PERFCNT3_LO _MMIO(0x91C8)
+#define OA_PERFCNT3_HI _MMIO(0x91CC)
+#define OA_PERFCNT4_LO _MMIO(0x91D8)
+#define OA_PERFCNT4_HI _MMIO(0x91DC)
#define OA_PERFMATRIX_LO _MMIO(0x91C8)
#define OA_PERFMATRIX_HI _MMIO(0x91CC)
/* RPM unit config (Gen8+) */
#define RPM_CONFIG0 _MMIO(0x0D00)
-#define RPM_CONFIG1 _MMIO(0x0D04)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
+#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
+#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
-/* RPC unit config (Gen8+) */
-#define RPM_CONFIG _MMIO(0x0D08)
+#define RPM_CONFIG1 _MMIO(0x0D04)
+#define GEN10_GT_NOA_ENABLE (1 << 9)
+
+/* GPM unit config (Gen9+) */
+#define CTC_MODE _MMIO(0xA26C)
+#define CTC_SOURCE_PARAMETER_MASK 1
+#define CTC_SOURCE_CRYSTAL_CLOCK 0
+#define CTC_SOURCE_DIVIDE_LOGIC 1
+#define CTC_SHIFT_PARAMETER_SHIFT 1
+#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+
+/* RCP unit config (Gen8+) */
+#define RCP_CONFIG _MMIO(0x0D08)
+
+/* NOA (HSW) */
+#define HSW_MBVID2_NOA0 _MMIO(0x9E80)
+#define HSW_MBVID2_NOA1 _MMIO(0x9E84)
+#define HSW_MBVID2_NOA2 _MMIO(0x9E88)
+#define HSW_MBVID2_NOA3 _MMIO(0x9E8C)
+#define HSW_MBVID2_NOA4 _MMIO(0x9E90)
+#define HSW_MBVID2_NOA5 _MMIO(0x9E94)
+#define HSW_MBVID2_NOA6 _MMIO(0x9E98)
+#define HSW_MBVID2_NOA7 _MMIO(0x9E9C)
+#define HSW_MBVID2_NOA8 _MMIO(0x9EA0)
+#define HSW_MBVID2_NOA9 _MMIO(0x9EA4)
+
+#define HSW_MBVID2_MISR0 _MMIO(0x9EC0)
/* NOA (Gen8+) */
#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4)
@@ -1269,6 +1307,7 @@ enum i915_power_well_id {
SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D,
+ CNL_DISP_PW_DDI_F = 6,
GLK_DISP_PW_AUX_A = 8,
GLK_DISP_PW_AUX_B,
@@ -1277,6 +1316,7 @@ enum i915_power_well_id {
CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
CNL_DISP_PW_AUX_D,
+ CNL_DISP_PW_AUX_F,
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
@@ -1869,6 +1909,11 @@ enum i915_power_well_id {
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
+#define _ICL_PORT_CL_DW5_A 0x162014
+#define _ICL_PORT_CL_DW5_B 0x6C014
+#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
+ _ICL_PORT_CL_DW5_B)
+
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1928,7 +1973,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW2_LN0_B 0x162648
#define _CNL_PORT_TX_DW2_LN0_C 0x162C48
#define _CNL_PORT_TX_DW2_LN0_D 0x162E48
-#define _CNL_PORT_TX_DW2_LN0_F 0x162A48
+#define _CNL_PORT_TX_DW2_LN0_F 0x162848
#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW2_GRP_AE, \
_CNL_PORT_TX_DW2_GRP_B, \
@@ -1992,7 +2037,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
#define _CNL_PORT_TX_DW5_LN0_B 0x162654
#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
+#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
#define _CNL_PORT_TX_DW5_LN0_F 0x162854
#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW5_GRP_AE, \
@@ -2023,7 +2068,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
+#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW7_GRP_AE, \
@@ -2067,6 +2112,28 @@ enum i915_power_well_id {
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
+#define _ICL_PORT_COMP_DW0_A 0x162100
+#define _ICL_PORT_COMP_DW0_B 0x6C100
+#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
+ _ICL_PORT_COMP_DW0_B)
+#define _ICL_PORT_COMP_DW1_A 0x162104
+#define _ICL_PORT_COMP_DW1_B 0x6C104
+#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
+ _ICL_PORT_COMP_DW1_B)
+#define _ICL_PORT_COMP_DW3_A 0x16210C
+#define _ICL_PORT_COMP_DW3_B 0x6C10C
+#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
+ _ICL_PORT_COMP_DW3_B)
+#define _ICL_PORT_COMP_DW9_A 0x162124
+#define _ICL_PORT_COMP_DW9_B 0x6C124
+#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
+ _ICL_PORT_COMP_DW9_B)
+#define _ICL_PORT_COMP_DW10_A 0x162128
+#define _ICL_PORT_COMP_DW10_B 0x6C128
+#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
+ _ICL_PORT_COMP_DW10_A, \
+ _ICL_PORT_COMP_DW10_B)
+
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C
@@ -2278,7 +2345,13 @@ enum i915_power_well_id {
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
#define GEN8_BSD2_RING_BASE 0x1c000
+#define GEN11_BSD_RING_BASE 0x1c0000
+#define GEN11_BSD2_RING_BASE 0x1c4000
+#define GEN11_BSD3_RING_BASE 0x1d0000
+#define GEN11_BSD4_RING_BASE 0x1d4000
#define VEBOX_RING_BASE 0x1a000
+#define GEN11_VEBOX_RING_BASE 0x1c8000
+#define GEN11_VEBOX2_RING_BASE 0x1d8000
#define BLT_RING_BASE 0x22000
#define RING_TAIL(base) _MMIO((base)+0x30)
#define RING_HEAD(base) _MMIO((base)+0x34)
@@ -2329,6 +2402,8 @@ enum i915_power_well_id {
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@@ -2452,6 +2527,8 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define FAULT_VA_HIGH_BITS (0xf << 0)
+#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -2551,6 +2628,8 @@ enum i915_power_well_id {
#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
#define GFX_FORWARD_VBLANK_COND (2<<5)
+#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
+
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
@@ -2609,6 +2688,31 @@ enum i915_power_well_id {
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
+#define MBUS_ABOX_CTL _MMIO(0x45038)
+#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
+#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
+#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
+#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
+#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
+#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
+#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
+
+#define _PIPEA_MBUS_DBOX_CTL 0x7003C
+#define _PIPEB_MBUS_DBOX_CTL 0x7103C
+#define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \
+ _PIPEB_MBUS_DBOX_CTL)
+#define MBUS_DBOX_BW_CREDIT_MASK (3 << 14)
+#define MBUS_DBOX_BW_CREDIT(x) ((x) << 14)
+#define MBUS_DBOX_B_CREDIT_MASK (0x1F << 8)
+#define MBUS_DBOX_B_CREDIT(x) ((x) << 8)
+#define MBUS_DBOX_A_CREDIT_MASK (0xF << 0)
+#define MBUS_DBOX_A_CREDIT(x) ((x) << 0)
+
+#define MBUS_UBOX_CTL _MMIO(0x4503C)
+#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
+#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
+
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
*/
@@ -2712,6 +2816,13 @@ enum i915_power_well_id {
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
/* Fuse readout registers for GT */
+#define HSW_PAVP_FUSE1 _MMIO(0x911C)
+#define HSW_F1_EU_DIS_SHIFT 16
+#define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT)
+#define HSW_F1_EU_DIS_10EUS 0
+#define HSW_F1_EU_DIS_8EUS 1
+#define HSW_F1_EU_DIS_6EUS 2
+
#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
@@ -3006,6 +3117,7 @@ enum i915_power_well_id {
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
+#define GMBUS_AKSV_SELECT (1<<11)
#define GMBUS_RATE_100KHZ (0<<8)
#define GMBUS_RATE_50KHZ (1<<8)
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
@@ -3024,7 +3136,12 @@ enum i915_power_well_id {
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
-#define GMBUS_NUM_PINS 7 /* including 0 */
+#define GMBUS_PIN_9_TC1_ICP 9
+#define GMBUS_PIN_10_TC2_ICP 10
+#define GMBUS_PIN_11_TC3_ICP 11
+#define GMBUS_PIN_12_TC4_ICP 12
+
+#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -3241,6 +3358,7 @@ enum i915_power_well_id {
# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
@@ -3395,6 +3513,7 @@ enum i915_power_well_id {
#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
+#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
@@ -3793,6 +3912,12 @@ enum {
#define GEN8_CTX_ID_SHIFT 32
#define GEN8_CTX_ID_WIDTH 21
+#define GEN11_SW_CTX_ID_SHIFT 37
+#define GEN11_SW_CTX_ID_WIDTH 11
+#define GEN11_ENGINE_CLASS_SHIFT 61
+#define GEN11_ENGINE_CLASS_WIDTH 3
+#define GEN11_ENGINE_INSTANCE_SHIFT 48
+#define GEN11_ENGINE_INSTANCE_WIDTH 6
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -3816,9 +3941,13 @@ enum {
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
+#define DARBF_GATING_DIS (1 << 27)
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
@@ -3834,6 +3963,13 @@ enum {
*/
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
+#define RCCUNIT_CLKGATE_DIS (1 << 7)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
+#define GWUNIT_CLKGATE_DIS (1 << 16)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS (1 << 20)
/*
* Display engine regs
@@ -4017,7 +4153,7 @@ enum {
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
-#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -4044,7 +4180,7 @@ enum {
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
+#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
@@ -4067,7 +4203,7 @@ enum {
#define EDP_PSR2_IDLE_MASK 0xf
#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
-#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
+#define EDP_PSR2_STATUS _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
@@ -5229,8 +5365,15 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
-#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
-#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
+#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
+#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
+#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c)
+#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
+#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
+
+#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
+#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
@@ -6014,6 +6157,7 @@ enum {
#define _DVSACNTR 0x72180
#define DVS_ENABLE (1<<31)
#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27)
#define DVS_PIXFORMAT_MASK (3<<25)
#define DVS_FORMAT_YUV422 (0<<25)
#define DVS_FORMAT_RGBX101010 (1<<25)
@@ -6022,6 +6166,7 @@ enum {
#define DVS_PIPE_CSC_ENABLE (1<<24)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_XBGR (1<<20)
+#define DVS_YUV_FORMAT_BT709 (1<<18)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
#define DVS_YUV_ORDER_YUYV (0<<16)
#define DVS_YUV_ORDER_UYVY (1<<16)
@@ -6081,6 +6226,7 @@ enum {
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28)
#define SPRITE_PIXFORMAT_MASK (7<<25)
#define SPRITE_FORMAT_YUV422 (0<<25)
#define SPRITE_FORMAT_RGBX101010 (1<<25)
@@ -6092,7 +6238,7 @@ enum {
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
-#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
#define SPRITE_YUV_ORDER_YUYV (0<<16)
#define SPRITE_YUV_ORDER_UYVY (1<<16)
@@ -6168,6 +6314,7 @@ enum {
#define SP_FORMAT_RGBA8888 (0xf<<26)
#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
#define SP_SOURCE_KEY (1<<22)
+#define SP_YUV_FORMAT_BT709 (1<<18)
#define SP_YUV_BYTE_ORDER_MASK (3<<16)
#define SP_YUV_ORDER_YUYV (0<<16)
#define SP_YUV_ORDER_UYVY (1<<16)
@@ -6187,6 +6334,12 @@ enum {
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
#define SP_CONST_ALPHA_ENABLE (1<<31)
+#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
+#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
+#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
+#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
+#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
+#define SP_SH_COS(x) (x) /* u3.7 */
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
@@ -6200,6 +6353,8 @@ enum {
#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
+#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
@@ -6216,6 +6371,8 @@ enum {
#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
+#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
/*
@@ -6260,7 +6417,13 @@ enum {
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
-#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
+#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+/*
+ * ICL+ uses the same PLANE_CTL_FORMAT bits, but the field definition
+ * expanded to include bit 23 as well. However, the shift-24 based values
+ * correctly map to the same formats in ICL, as long as bit 23 is set to 0
+ */
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
@@ -6270,12 +6433,14 @@ enum {
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
-#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
+#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
+#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
@@ -6283,13 +6448,14 @@ enum {
#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
-#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
-#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
+#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
+#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
@@ -6327,8 +6493,18 @@ enum {
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
+#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
+#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17)
+#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 (3 << 17)
+#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 (4 << 17)
#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13)
+#define PLANE_COLOR_ALPHA_MASK (0x3 << 4)
+#define PLANE_COLOR_ALPHA_DISABLE (0 << 4)
+#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY (2 << 4)
+#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY (3 << 4)
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
@@ -6879,6 +7055,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
@@ -6903,6 +7080,69 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
+#define GEN11_MASTER_IRQ (1 << 31)
+#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_DISPLAY_IRQ (1 << 16)
+#define GEN11_GT_DW_IRQ(x) (1 << (x))
+#define GEN11_GT_DW1_IRQ (1 << 1)
+#define GEN11_GT_DW0_IRQ (1 << 0)
+
+#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
+#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
+#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
+#define GEN11_DE_PCH_IRQ (1 << 23)
+#define GEN11_DE_MISC_IRQ (1 << 22)
+#define GEN11_DE_PORT_IRQ (1 << 20)
+#define GEN11_DE_PIPE_C (1 << 18)
+#define GEN11_DE_PIPE_B (1 << 17)
+#define GEN11_DE_PIPE_A (1 << 16)
+
+#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
+#define GEN11_CSME (31)
+#define GEN11_GUNIT (28)
+#define GEN11_GUC (25)
+#define GEN11_WDPERF (20)
+#define GEN11_KCR (19)
+#define GEN11_GTPM (16)
+#define GEN11_BCS (15)
+#define GEN11_RCS0 (0)
+
+#define GEN11_GT_INTR_DW1 _MMIO(0x19001c)
+#define GEN11_VECS(x) (31 - (x))
+#define GEN11_VCS(x) (x)
+
+#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
+
+#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
+#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
+#define GEN11_INTR_DATA_VALID (1 << 31)
+#define GEN11_INTR_ENGINE_MASK (0xffff)
+
+#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
+
+#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
+#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
+
+#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
+
+#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
+#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
+#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
+#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
+#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
+#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
+
+#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
+#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
+#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
+#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
+#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
+#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
+#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
+#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
+#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -6957,8 +7197,12 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
-#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
+#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
+#define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31)
@@ -6968,6 +7212,8 @@ enum {
#define DISP_DATA_PARTITION_5_6 (1<<6)
#define DISP_IPC_ENABLE (1<<3)
#define DBUF_CTL _MMIO(0x45008)
+#define DBUF_CTL_S1 _MMIO(0x45008)
+#define DBUF_CTL_S2 _MMIO(0x44FE8)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -6977,8 +7223,9 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
-#define MASK_WAKEMEM (1<<13)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
+#define MASK_WAKEMEM (1 << 13)
+#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
@@ -6990,8 +7237,12 @@ enum {
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define SKL_DSSM _MMIO(0x51004)
-#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
+#define SKL_DSSM _MMIO(0x51004)
+#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29)
+#define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29)
#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
@@ -7297,6 +7548,8 @@ enum {
#define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
+#define ICP_RAWCLK_DEN(den) ((den) << 26)
+#define ICP_RAWCLK_NUM(num) ((num) << 11)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -7511,6 +7764,7 @@ enum {
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
@@ -7668,8 +7922,8 @@ enum {
#define _PCH_DPD_AUX_CH_DATA4 0xe4320
#define _PCH_DPD_AUX_CH_DATA5 0xe4324
-#define PCH_DP_AUX_CH_CTL(port) _MMIO_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
-#define PCH_DP_AUX_CH_DATA(port, i) _MMIO(_PORT((port) - PORT_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define PCH_DP_AUX_CH_CTL(aux_ch) _MMIO_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_CTL, _PCH_DPC_AUX_CH_CTL)
+#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */
#define PORT_TRANS_A_SEL_CPT 0
@@ -7769,13 +8023,18 @@ enum {
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
+#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
+#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
-#define FORCEWAKE_KERNEL 0x1
-#define FORCEWAKE_USER 0x2
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -7905,6 +8164,7 @@ enum {
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
+#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0)
#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
#define GEN6_RC_SLEEP _MMIO(0xA0B0)
@@ -7991,6 +8251,7 @@ enum {
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
#define SKL_CDCLK_READY_FOR_CHANGE 0x1
@@ -8036,11 +8297,18 @@ enum {
#define CHV_EU311_PG_ENABLE (1<<1)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
+#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
+ ((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
+#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
+#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
+#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
+ ((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@@ -8092,6 +8360,7 @@ enum {
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
+#define DISABLE_EARLY_EOT (1<<1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
@@ -8285,6 +8554,101 @@ enum skl_power_gate {
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
+#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
+#define _CNL_AUX_ANAOVRD1_B 0x162250
+#define _CNL_AUX_ANAOVRD1_C 0x162210
+#define _CNL_AUX_ANAOVRD1_D 0x1622D0
+#define _CNL_AUX_ANAOVRD1_F 0x162A90
+#define CNL_AUX_ANAOVRD1(pw) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \
+ _CNL_AUX_ANAOVRD1_B, \
+ _CNL_AUX_ANAOVRD1_C, \
+ _CNL_AUX_ANAOVRD1_D, \
+ _CNL_AUX_ANAOVRD1_F))
+#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
+#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
+
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS BIT(7)
+#define HDCP_FUSE_ERROR BIT(6)
+#define HDCP_FUSE_DONE BIT(5)
+#define HDCP_KEY_LOAD_STATUS BIT(1)
+#define HDCP_KEY_LOAD_DONE BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_DDIB_REP_PRESENT BIT(30)
+#define HDCP_DDIA_REP_PRESENT BIT(29)
+#define HDCP_DDIC_REP_PRESENT BIT(28)
+#define HDCP_DDID_REP_PRESENT BIT(27)
+#define HDCP_DDIF_REP_PRESENT BIT(26)
+#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY BIT(16)
+#define HDCP_SHA1_READY BIT(17)
+#define HDCP_SHA1_COMPLETE BIT(18)
+#define HDCP_SHA1_V_MATCH BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + x)
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define HDCP_CONF_CAPTURE_AN BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define HDCP_STATUS_STREAM_A_ENC BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC BIT(28)
+#define HDCP_STATUS_AUTH BIT(21)
+#define HDCP_STATUS_ENC BIT(20)
+#define HDCP_STATUS_RI_MATCH BIT(19)
+#define HDCP_STATUS_R0_READY BIT(18)
+#define HDCP_STATUS_AN_READY BIT(17)
+#define HDCP_STATUS_CIPHER BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff)
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -8316,6 +8680,7 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_HDCP_SIGNALLING (1<<9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
@@ -8514,20 +8879,21 @@ enum skl_power_gate {
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK (3<<26)
-#define CDCLK_FREQ_450_432 (0<<26)
-#define CDCLK_FREQ_540 (1<<26)
-#define CDCLK_FREQ_337_308 (2<<26)
-#define CDCLK_FREQ_675_617 (3<<26)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
-#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
-#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
+#define CDCLK_FREQ_SEL_MASK (3 << 26)
+#define CDCLK_FREQ_450_432 (0 << 26)
+#define CDCLK_FREQ_540 (1 << 26)
+#define CDCLK_FREQ_337_308 (2 << 26)
+#define CDCLK_FREQ_675_617 (3 << 26)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
-#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
@@ -8598,10 +8964,12 @@ enum skl_power_gate {
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
-#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port)+10))
-#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << ((port)*2))
-#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port)*2)
-#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << ((port)*2))
+#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
+ (port)+10))
+#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
+ (port)*2)
+#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
+#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
@@ -8697,6 +9065,7 @@ enum skl_power_gate {
#define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_CRT_DISABLED (1<<6)
+#define SFUSE_STRAP_DDIF_DETECTED (1<<3)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
@@ -8838,6 +9207,12 @@ enum skl_power_gate {
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
+#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
+
#define _PIPE_FRMTMSTMP_A 0x70048
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
@@ -9427,4 +9802,10 @@ enum skl_power_gate {
#define MMCD_PCLA (1 << 31)
#define MMCD_HOTSPOT_EN (1 << 27)
+#define _ICL_PHY_MISC_A 0x64C00
+#define _ICL_PHY_MISC_B 0x64C04
+#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
+ _ICL_PHY_MISC_B)
+#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_request.c
index d140fcf..282f576 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -37,7 +37,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
- /* The timeline struct (as part of the ppgtt underneath a context)
+ /*
+ * The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
* We could extend the life of a context to beyond that of all
* fences, possibly keeping the hw resource around indefinitely,
@@ -53,7 +54,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
static bool i915_fence_signaled(struct dma_fence *fence)
{
- return i915_gem_request_completed(to_request(fence));
+ return i915_request_completed(to_request(fence));
}
static bool i915_fence_enable_signaling(struct dma_fence *fence)
@@ -69,22 +70,23 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_wait_request(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence), interruptible, timeout);
}
static void i915_fence_release(struct dma_fence *fence)
{
- struct drm_i915_gem_request *req = to_request(fence);
+ struct i915_request *rq = to_request(fence);
- /* The request is put onto a RCU freelist (i.e. the address
+ /*
+ * The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now.
* Otherwise the debugobjects for the fences are only marked as
* freed when the slab cache itself is freed, and so we would get
* caught trying to reuse dead objects.
*/
- i915_sw_fence_fini(&req->submit);
+ i915_sw_fence_fini(&rq->submit);
- kmem_cache_free(req->i915->requests, req);
+ kmem_cache_free(rq->i915->requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -97,7 +99,7 @@ const struct dma_fence_ops i915_fence_ops = {
};
static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+i915_request_remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
@@ -161,12 +163,16 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
GEM_BUG_ON(!list_empty(&pt->link));
- /* Everyone we depended upon (the fences we wait to be signaled)
+ /*
+ * Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
+ GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
@@ -174,6 +180,9 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != pt);
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep);
@@ -208,9 +217,9 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
- /* spin until threads are complete */
- while (intel_breadcrumbs_busy(engine))
- cond_resched();
+ /* Flush any waiters before we reuse the seqno */
+ intel_engine_disarm_breadcrumbs(engine);
+ GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
}
/* Check we are idle before we fiddle with hw state! */
@@ -231,17 +240,15 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
- /* HWS page needs to be set less than what we
- * will inject to ring
- */
- return reset_all_global_seqno(dev_priv, seqno - 1);
+ /* HWS page needs to be set less than what we will inject to ring */
+ return reset_all_global_seqno(i915, seqno - 1);
}
static void mark_busy(struct drm_i915_private *i915)
@@ -252,12 +259,33 @@ static void mark_busy(struct drm_i915_private *i915)
GEM_BUG_ON(!i915->gt.active_requests);
intel_runtime_pm_get_noresume(i915);
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
i915->gt.awake = true;
+ if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
+ i915->gt.epoch = 1;
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
+ i915_pmu_gt_unparked(i915);
+
+ intel_engines_unpark(i915);
+
+ i915_queue_hangcheck(i915);
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
@@ -303,16 +331,17 @@ static void unreserve_engine(struct intel_engine_cs *engine)
}
void i915_gem_retire_noop(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/* Space left intentionally blank */
}
-static void advance_ring(struct drm_i915_gem_request *request)
+static void advance_ring(struct i915_request *request)
{
unsigned int tail;
- /* We know the GPU must have read the request to have
+ /*
+ * We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
@@ -321,7 +350,8 @@ static void advance_ring(struct drm_i915_gem_request *request)
* completion order.
*/
if (list_is_last(&request->ring_link, &request->ring->request_list)) {
- /* We may race here with execlists resubmitting this request
+ /*
+ * We may race here with execlists resubmitting this request
* as we retire it. The resubmission will move the ring->tail
* forwards (to request->wa_tail). We either read the
* current value that was written to hw, or the value that
@@ -337,30 +367,30 @@ static void advance_ring(struct drm_i915_gem_request *request)
request->ring->head = tail;
}
-static void free_capture_list(struct drm_i915_gem_request *request)
+static void free_capture_list(struct i915_request *request)
{
- struct i915_gem_capture_list *capture;
+ struct i915_capture_list *capture;
capture = request->capture_list;
while (capture) {
- struct i915_gem_capture_list *next = capture->next;
+ struct i915_capture_list *next = capture->next;
kfree(capture);
capture = next;
}
}
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+static void i915_request_retire(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_gem_request_completed(request));
+ GEM_BUG_ON(!i915_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests);
- trace_i915_gem_request_retire(request);
+ trace_i915_request_retire(request);
spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
@@ -371,7 +401,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
free_capture_list(request);
- /* Walk through the active list, calling retire on each. This allows
+ /*
+ * Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
* when their *last* active request is completed (updating state
* tracking lists for eviction, active references for GEM, etc).
@@ -381,7 +412,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* the node after the callback).
*/
list_for_each_entry_safe(active, next, &request->active_list, link) {
- /* In microbenchmarks or focusing upon time inside the kernel,
+ /*
+ * In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
@@ -398,15 +430,16 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
active->retire(active, request);
}
- i915_gem_request_remove_from_client(request);
+ i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->ctx->ban_score);
- /* The backing object for the context is done after switching to the
+ /*
+ * The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. However, since we
- * cannot take the required locks at i915_gem_request_submit() we
+ * cannot take the required locks at i915_request_submit() we
* defer the unpinning of the active context to now, retirement of
* the subsequent request.
*/
@@ -415,32 +448,37 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
engine->last_retired_context = request->ctx;
spin_lock_irq(&request->lock);
- if (request->waitboost)
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
+ dma_fence_signal_locked(&request->fence);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_cancel_signaling(request);
+ if (request->waitboost) {
+ GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
atomic_dec(&request->i915->gt_pm.rps.num_waiters);
- dma_fence_signal_locked(&request->fence);
+ }
spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
- i915_gem_request_put(request);
+ i915_request_put(request);
}
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *tmp;
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_request *tmp;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_gem_request_completed(req));
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&req->link))
+ if (list_empty(&rq->link))
return;
do {
tmp = list_first_entry(&engine->timeline->requests,
typeof(*tmp), link);
- i915_gem_request_retire(tmp);
- } while (tmp != req);
+ i915_request_retire(tmp);
+ } while (tmp != rq);
}
static u32 timeline_get_seqno(struct intel_timeline *tl)
@@ -448,7 +486,7 @@ static u32 timeline_get_seqno(struct intel_timeline *tl)
return ++tl->seqno;
}
-void __i915_gem_request_submit(struct drm_i915_gem_request *request)
+void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
@@ -457,11 +495,10 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- trace_i915_gem_request_execute(request);
-
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
+ GEM_BUG_ON(request->global_seqno);
seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
@@ -481,10 +518,12 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
+ trace_i915_request_execute(request);
+
wake_up_all(&request->execute);
}
-void i915_gem_request_submit(struct drm_i915_gem_request *request)
+void i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -492,12 +531,12 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_submit(request);
+ __i915_request_submit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
@@ -505,10 +544,14 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
- /* Only unwind in reverse order, required so that the per-context list
+ /*
+ * Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
+ GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
+ request->global_seqno));
engine->timeline->seqno--;
/* We may be recursing from the signal callback of another i915 fence */
@@ -526,15 +569,16 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
list_move(&request->link, &timeline->requests);
spin_unlock(&timeline->lock);
- /* We don't need to wake_up any waiters on request->execute, they
+ /*
+ * We don't need to wake_up any waiters on request->execute, they
* will get woken by any other event or us re-adding this request
- * to the engine timeline (__i915_gem_request_submit()). The waiters
+ * to the engine timeline (__i915_request_submit()). The waiters
* should be quite adapt at finding that the request now has a new
* global_seqno to the one they went to sleep on.
*/
}
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+void i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -542,7 +586,7 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- __i915_gem_request_unsubmit(request);
+ __i915_request_unsubmit(request);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
@@ -550,18 +594,19 @@ void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct drm_i915_gem_request *request =
+ struct i915_request *request =
container_of(fence, typeof(*request), submit);
switch (state) {
case FENCE_COMPLETE:
- trace_i915_gem_request_submit(request);
+ trace_i915_request_submit(request);
/*
- * We need to serialize use of the submit_request() callback with its
- * hotplugging performed during an emergency i915_gem_set_wedged().
- * We use the RCU mechanism to mark the critical section in order to
- * force i915_gem_set_wedged() to wait until the submit_request() is
- * completed before proceeding.
+ * We need to serialize use of the submit_request() callback
+ * with its hotplugging performed during an emergency
+ * i915_gem_set_wedged(). We use the RCU mechanism to mark the
+ * critical section in order to force i915_gem_set_wedged() to
+ * wait until the submit_request() is completed before
+ * proceeding.
*/
rcu_read_lock();
request->engine->submit_request(request);
@@ -569,7 +614,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
break;
case FENCE_FREE:
- i915_gem_request_put(request);
+ i915_request_put(request);
break;
}
@@ -577,7 +622,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
}
/**
- * i915_gem_request_alloc - allocate a request structure
+ * i915_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
@@ -585,31 +630,32 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+struct i915_request *
+i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *req;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq;
struct intel_ring *ring;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/*
* Preempt contexts are reserved for exclusive use to inject a
* preemption context switch. They are never to be used for any trivial
* request!
*/
- GEM_BUG_ON(ctx == dev_priv->preempt_context);
+ GEM_BUG_ON(ctx == i915->preempt_context);
- /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(&i915->gpu_error))
return ERR_PTR(-EIO);
- /* Pinning the contexts may generate requests in order to acquire
+ /*
+ * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
@@ -622,13 +668,18 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
goto err_unpin;
+ ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+ if (ret)
+ goto err_unreserve;
+
/* Move the oldest request to the slab-cache (if not in use!) */
- req = list_first_entry_or_null(&engine->timeline->requests,
- typeof(*req), link);
- if (req && i915_gem_request_completed(req))
- i915_gem_request_retire(req);
+ rq = list_first_entry_or_null(&engine->timeline->requests,
+ typeof(*rq), link);
+ if (rq && i915_request_completed(rq))
+ i915_request_retire(rq);
- /* Beware: Dragons be flying overhead.
+ /*
+ * Beware: Dragons be flying overhead.
*
* We use RCU to look up requests in flight. The lookups may
* race with the request being allocated from the slab freelist.
@@ -656,73 +707,104 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*
* Do not use kmem_cache_zalloc() here!
*/
- req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto err_unreserve;
+ rq = kmem_cache_alloc(i915->requests,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(!rq)) {
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_INTERRUPTIBLE);
+ if (ret)
+ goto err_unreserve;
+
+ /*
+ * We've forced the client to stall and catch up with whatever
+ * backlog there might have been. As we are assuming that we
+ * caused the mempressure, now is an opportune time to
+ * recover as much memory from the request pool as is possible.
+ * Having already penalized the client to stall, we spend
+ * a little extra time to re-optimise page allocation.
+ */
+ kmem_cache_shrink(i915->requests);
+ rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
+
+ rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ if (!rq) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
}
- req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
- GEM_BUG_ON(req->timeline == engine->timeline);
+ rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
+ GEM_BUG_ON(rq->timeline == engine->timeline);
- spin_lock_init(&req->lock);
- dma_fence_init(&req->fence,
+ spin_lock_init(&rq->lock);
+ dma_fence_init(&rq->fence,
&i915_fence_ops,
- &req->lock,
- req->timeline->fence_context,
- timeline_get_seqno(req->timeline));
+ &rq->lock,
+ rq->timeline->fence_context,
+ timeline_get_seqno(rq->timeline));
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
- init_waitqueue_head(&req->execute);
+ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ init_waitqueue_head(&rq->execute);
- i915_priotree_init(&req->priotree);
+ i915_priotree_init(&rq->priotree);
- INIT_LIST_HEAD(&req->active_list);
- req->i915 = dev_priv;
- req->engine = engine;
- req->ctx = ctx;
- req->ring = ring;
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->i915 = i915;
+ rq->engine = engine;
+ rq->ctx = ctx;
+ rq->ring = ring;
/* No zalloc, must clear what we need by hand */
- req->global_seqno = 0;
- req->file_priv = NULL;
- req->batch = NULL;
- req->capture_list = NULL;
- req->waitboost = false;
+ rq->global_seqno = 0;
+ rq->signaling.wait.seqno = 0;
+ rq->file_priv = NULL;
+ rq->batch = NULL;
+ rq->capture_list = NULL;
+ rq->waitboost = false;
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
- * i915_add_request() call can't fail. Note that the reserve may need
+ * i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
- req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+ GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
- ret = engine->request_alloc(req);
- if (ret)
- goto err_ctx;
-
- /* Record the position of the start of the request so that
+ /*
+ * Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->emit;
+ rq->head = rq->ring->emit;
+
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (ret)
+ goto err_unwind;
+
+ ret = engine->request_alloc(rq);
+ if (ret)
+ goto err_unwind;
/* Check that we didn't interrupt ourselves with a new request */
- GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
- return req;
+ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ return rq;
+
+err_unwind:
+ rq->ring->emit = rq->head;
-err_ctx:
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&req->active_list));
- GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
- GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+ GEM_BUG_ON(!list_empty(&rq->active_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
+ GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
- kmem_cache_free(dev_priv->requests, req);
+ kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_engine(engine);
err_unpin:
@@ -731,15 +813,14 @@ err_unpin:
}
static int
-i915_gem_request_await_request(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from)
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
GEM_BUG_ON(to == from);
GEM_BUG_ON(to->timeline == from->timeline);
- if (i915_gem_request_completed(from))
+ if (i915_request_completed(from))
return 0;
if (to->engine->schedule) {
@@ -753,7 +834,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
if (to->engine == from->engine) {
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -762,7 +843,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
GEM_BUG_ON(!from->engine->semaphore.signal);
- seqno = i915_gem_request_global_seqno(from);
+ seqno = i915_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
@@ -781,19 +862,19 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
await_dma_fence:
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
int
-i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence)
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
- /* Note that if the fence-array was created in signal-on-any mode,
+ /*
+ * Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
* we don't currently store which mode the fence-array is operating
* in. Fortunately, the only user of signal-on-any is private to
@@ -815,40 +896,39 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
/*
* Requests on the same timeline are explicitly ordered, along
- * with their dependencies, by i915_add_request() which ensures
+ * with their dependencies, by i915_request_add() which ensures
* that requests are submitted in-order through each ring.
*/
- if (fence->context == req->fence.context)
+ if (fence->context == rq->fence.context)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != req->i915->mm.unordered_timeline &&
- intel_timeline_sync_is_later(req->timeline, fence))
+ if (fence->context != rq->i915->mm.unordered_timeline &&
+ intel_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
- ret = i915_gem_request_await_request(req,
- to_request(fence));
+ ret = i915_request_await_request(rq, to_request(fence));
else
- ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
+ ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != req->i915->mm.unordered_timeline)
- intel_timeline_sync_set(req->timeline, fence);
+ if (fence->context != rq->i915->mm.unordered_timeline)
+ intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
}
/**
- * i915_gem_request_await_object - set this request to (async) wait upon a bo
- *
+ * i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
* @obj: object which may be in use on another ring.
+ * @write: whether the wait is on behalf of a writer
*
* This code is meant to abstract object synchronization with the GPU.
* Conceptually we serialise writes between engines inside the GPU.
@@ -865,9 +945,9 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
- struct drm_i915_gem_object *obj,
- bool write)
+i915_request_await_object(struct i915_request *to,
+ struct drm_i915_gem_object *obj,
+ bool write)
{
struct dma_fence *excl;
int ret = 0;
@@ -882,7 +962,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
return ret;
for (i = 0; i < count; i++) {
- ret = i915_gem_request_await_dma_fence(to, shared[i]);
+ ret = i915_request_await_dma_fence(to, shared[i]);
if (ret)
break;
@@ -898,7 +978,7 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
if (excl) {
if (ret == 0)
- ret = i915_gem_request_await_dma_fence(to, excl);
+ ret = i915_request_await_dma_fence(to, excl);
dma_fence_put(excl);
}
@@ -911,20 +991,21 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+void __i915_request_add(struct i915_request *request, bool flush_caches)
{
struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
- trace_i915_gem_request_add(request);
+ trace_i915_request_add(request);
- /* Make sure that no request gazumped us - if it was allocated after
- * our i915_gem_request_alloc() and called __i915_add_request() before
+ /*
+ * Make sure that no request gazumped us - if it was allocated after
+ * our i915_request_alloc() and called __i915_request_add() before
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
@@ -950,7 +1031,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
WARN(err, "engine->emit_flush() failed: %d!\n", err);
}
- /* Record the position of the start of the breadcrumb so that
+ /*
+ * Record the position of the start of the breadcrumb so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
@@ -959,7 +1041,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /* Seal the request and mark it as pending execution. Note that
+ /*
+ * Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
* see a more recent value in the hws than we are tracking.
@@ -967,7 +1050,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
prev = i915_gem_active_raw(&timeline->last_request,
&request->i915->drm.struct_mutex);
- if (prev) {
+ if (prev && !i915_request_completed(prev)) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
@@ -987,7 +1070,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
- /* Let the backend know a new request has arrived that may need
+ /*
+ * Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
* request - i.e. we may want to preempt the current request in order
* to run a high priority dependency chain *before* we can execute this
@@ -997,19 +1081,42 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
+ rcu_read_lock();
if (engine->schedule)
engine->schedule(request, request->ctx->priority);
+ rcu_read_unlock();
local_bh_disable();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /*
+ * In typical scenarios, we do not expect the previous request on
+ * the timeline to be still tracked by timeline->last_request if it
+ * has been completed. If the completed request is still here, that
+ * implies that request retirement is a long way behind submission,
+ * suggesting that we haven't been retiring frequently enough from
+ * the combination of retire-before-alloc, waiters and the background
+ * retirement worker. So if the last request on this timeline was
+ * already completed, do a catch up pass, flushing the retirement queue
+ * up to this client. Since we have now moved the heaviest operations
+ * during retirement onto secondary workers, such as freeing objects
+ * or contexts, retiring a bunch of requests is mostly list management
+ * (and cache misses), and so we should not be overly penalizing this
+ * client by performing excess work, though we may still performing
+ * work on behalf of others -- but instead we should benefit from
+ * improved resource management. (Well, that's the theory at least.)
+ */
+ if (prev && i915_request_completed(prev))
+ i915_request_retire_upto(prev);
}
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
- /* Cheaply and approximately convert from nanoseconds to microseconds.
+ /*
+ * Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
@@ -1037,10 +1144,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+static bool __i915_spin_request(const struct i915_request *rq,
u32 seqno, int state, unsigned long timeout_us)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
unsigned int irq, cpu;
GEM_BUG_ON(!seqno);
@@ -1059,7 +1166,8 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
return false;
- /* When waiting for high frequency requests, e.g. during synchronous
+ /*
+ * When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
@@ -1073,9 +1181,10 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu);
do {
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
- return seqno == i915_gem_request_global_seqno(req);
+ return seqno == i915_request_global_seqno(rq);
- /* Seqno are meant to be ordered *before* the interrupt. If
+ /*
+ * Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
@@ -1095,7 +1204,7 @@ static bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
-static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
+static bool __i915_wait_request_check_and_reset(struct i915_request *request)
{
if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
return false;
@@ -1106,12 +1215,12 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
}
/**
- * i915_wait_request - wait until execution of request has finished
- * @req: the request to wait upon
+ * i915_request_wait - wait until execution of request has finished
+ * @rq: the request to wait upon
* @flags: how to wait
* @timeout: how long to wait in jiffies
*
- * i915_wait_request() waits for the request to be completed, for a
+ * i915_request_wait() waits for the request to be completed, for a
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
@@ -1124,13 +1233,13 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
* pending before the request completes.
*/
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+ wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
DEFINE_WAIT_FUNC(reset, default_wake_function);
DEFINE_WAIT_FUNC(exec, default_wake_function);
struct intel_wait wait;
@@ -1138,33 +1247,33 @@ long i915_wait_request(struct drm_i915_gem_request *req,
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
GEM_BUG_ON(timeout < 0);
- if (i915_gem_request_completed(req))
+ if (i915_request_completed(rq))
return timeout;
if (!timeout)
return -ETIME;
- trace_i915_gem_request_wait_begin(req, flags);
+ trace_i915_request_wait_begin(rq, flags);
- add_wait_queue(&req->execute, &exec);
+ add_wait_queue(&rq->execute, &exec);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset);
- intel_wait_init(&wait, req);
+ intel_wait_init(&wait, rq);
restart:
do {
set_current_state(state);
- if (intel_wait_update_request(&wait, req))
+ if (intel_wait_update_request(&wait, rq))
break;
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
if (signal_pending_state(state, current)) {
@@ -1181,22 +1290,23 @@ restart:
} while (1);
GEM_BUG_ON(!intel_wait_has_seqno(&wait));
- GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
/* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(req, wait.seqno, state, 5))
+ if (__i915_spin_request(rq, wait.seqno, state, 5))
goto complete;
set_current_state(state);
- if (intel_engine_add_wait(req->engine, &wait))
- /* In order to check that we haven't missed the interrupt
+ if (intel_engine_add_wait(rq->engine, &wait))
+ /*
+ * In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
* coherent check on the seqno before we sleep.
*/
goto wakeup;
if (flags & I915_WAIT_LOCKED)
- __i915_wait_request_check_and_reset(req);
+ __i915_wait_request_check_and_reset(rq);
for (;;) {
if (signal_pending_state(state, current)) {
@@ -1212,21 +1322,23 @@ restart:
timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait) &&
- intel_wait_check_request(&wait, req))
+ intel_wait_check_request(&wait, rq))
break;
set_current_state(state);
wakeup:
- /* Carefully check if the request is complete, giving time
+ /*
+ * Carefully check if the request is complete, giving time
* for the seqno to be visible following the interrupt.
* We also have to check in case we are kicked by the GPU
* reset in order to drop the struct_mutex.
*/
- if (__i915_request_irq_complete(req))
+ if (__i915_request_irq_complete(rq))
break;
- /* If the GPU is hung, and we hold the lock, reset the GPU
+ /*
+ * If the GPU is hung, and we hold the lock, reset the GPU
* and then check for completion. On a full reset, the engine's
* HW seqno will be advanced passed us and we are complete.
* If we do a partial reset, we have to wait for the GPU to
@@ -1237,33 +1349,33 @@ wakeup:
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(req))
+ __i915_wait_request_check_and_reset(rq))
continue;
/* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(req, wait.seqno, state, 2))
+ if (__i915_spin_request(rq, wait.seqno, state, 2))
break;
- if (!intel_wait_check_request(&wait, req)) {
- intel_engine_remove_wait(req->engine, &wait);
+ if (!intel_wait_check_request(&wait, rq)) {
+ intel_engine_remove_wait(rq->engine, &wait);
goto restart;
}
}
- intel_engine_remove_wait(req->engine, &wait);
+ intel_engine_remove_wait(rq->engine, &wait);
complete:
__set_current_state(TASK_RUNNING);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(errq, &reset);
- remove_wait_queue(&req->execute, &exec);
- trace_i915_gem_request_wait_end(req);
+ remove_wait_queue(&rq->execute, &exec);
+ trace_i915_request_wait_end(rq);
return timeout;
}
static void engine_retire_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request, *next;
+ struct i915_request *request, *next;
u32 seqno = intel_engine_get_seqno(engine);
LIST_HEAD(retire);
@@ -1278,24 +1390,24 @@ static void engine_retire_requests(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next, &retire, link)
- i915_gem_request_retire(request);
+ i915_request_retire(request);
}
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- if (!dev_priv->gt.active_requests)
+ if (!i915->gt.active_requests)
return;
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, i915, id)
engine_retire_requests(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
-#include "selftests/i915_gem_request.c"
+#include "selftests/i915_request.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_request.h
index 26249f3..7d6eb82 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2008-2015 Intel Corporation
+ * Copyright © 2008-2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,8 +22,8 @@
*
*/
-#ifndef I915_GEM_REQUEST_H
-#define I915_GEM_REQUEST_H
+#ifndef I915_REQUEST_H
+#define I915_REQUEST_H
#include <linux/dma-fence.h>
@@ -34,18 +34,18 @@
struct drm_file;
struct drm_i915_gem_object;
-struct drm_i915_gem_request;
+struct i915_request;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
u32 seqno;
};
struct intel_signal_node {
- struct rb_node node;
struct intel_wait wait;
+ struct list_head link;
};
struct i915_dependency {
@@ -57,7 +57,12 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
-/* Requests exist in a complex web of interdependencies. Each request
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
* has to wait for some other request to complete before it is ready to be run
* (e.g. we have to wait until the pixels have been rendering into a texture
* before we can copy from it). We track the readiness of a request in terms
@@ -81,8 +86,8 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
-struct i915_gem_capture_list {
- struct i915_gem_capture_list *next;
+struct i915_capture_list {
+ struct i915_capture_list *next;
struct i915_vma *vma;
};
@@ -106,7 +111,7 @@ struct i915_gem_capture_list {
*
* The requests are reference counted.
*/
-struct drm_i915_gem_request {
+struct i915_request {
struct dma_fence fence;
spinlock_t lock;
@@ -120,7 +125,7 @@ struct drm_i915_gem_request {
* it persists while any request is linked to it. Requests themselves
* are also refcounted, so the request will only be freed when the last
* reference to it is dismissed, and the code in
- * i915_gem_request_free() will then decrement the refcount on the
+ * i915_request_free() will then decrement the refcount on the
* context.
*/
struct i915_gem_context *ctx;
@@ -129,7 +134,8 @@ struct drm_i915_gem_request {
struct intel_timeline *timeline;
struct intel_signal_node signaling;
- /* Fences for the various phases in the request's lifetime.
+ /*
+ * Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run.
@@ -139,7 +145,8 @@ struct drm_i915_gem_request {
wait_queue_entry_t submitq;
wait_queue_head_t execute;
- /* A list of everyone we wait upon, and everyone who waits upon us.
+ /*
+ * A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the
* submit fence is signaled (it waits for all external events as well
* as our own requests), the scheduler still needs to know the
@@ -150,7 +157,8 @@ struct drm_i915_gem_request {
struct i915_priotree priotree;
struct i915_dependency dep;
- /** GEM sequence number associated with this request on the
+ /**
+ * GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
@@ -180,12 +188,13 @@ struct drm_i915_gem_request {
* error state dump only).
*/
struct i915_vma *batch;
- /** Additional buffers requested by userspace to be captured upon
+ /**
+ * Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
* active reference - all objects on this list must also be
* on the active_list (of their final request).
*/
- struct i915_gem_capture_list *capture_list;
+ struct i915_capture_list *capture_list;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
@@ -204,6 +213,8 @@ struct drm_i915_gem_request {
struct list_head client_link;
};
+#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
extern const struct dma_fence_ops i915_fence_ops;
static inline bool dma_fence_is_i915(const struct dma_fence *fence)
@@ -211,52 +222,40 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+struct i915_request * __must_check
+i915_request_alloc(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+void i915_request_retire_upto(struct i915_request *rq);
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
to_request(struct dma_fence *fence)
{
/* We assume that NULL fence/request are interoperable */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+ BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
- return container_of(fence, struct drm_i915_gem_request, fence);
+ return container_of(fence, struct i915_request, fence);
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get(struct i915_request *rq)
{
- return to_request(dma_fence_get(&req->fence));
+ return to_request(dma_fence_get(&rq->fence));
}
-static inline struct drm_i915_gem_request *
-i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+static inline struct i915_request *
+i915_request_get_rcu(struct i915_request *rq)
{
- return to_request(dma_fence_get_rcu(&req->fence));
+ return to_request(dma_fence_get_rcu(&rq->fence));
}
static inline void
-i915_gem_request_put(struct drm_i915_gem_request *req)
+i915_request_put(struct i915_request *rq)
{
- dma_fence_put(&req->fence);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
- struct drm_i915_gem_request *src)
-{
- if (src)
- i915_gem_request_get(src);
-
- if (*pdst)
- i915_gem_request_put(*pdst);
-
- *pdst = src;
+ dma_fence_put(&rq->fence);
}
/**
- * i915_gem_request_global_seqno - report the current global seqno
+ * i915_request_global_seqno - report the current global seqno
* @request - the request
*
* A request is assigned a global seqno only when it is on the hardware
@@ -274,34 +273,28 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
* after the read, it is indeed complete).
*/
static u32
-i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
+i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
}
-int
-i915_gem_request_await_object(struct drm_i915_gem_request *to,
+int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
-int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
- struct dma_fence *fence);
-
-void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
-#define i915_add_request(req) \
- __i915_add_request(req, false)
+int i915_request_await_dma_fence(struct i915_request *rq,
+ struct dma_fence *fence);
-void __i915_gem_request_submit(struct drm_i915_gem_request *request);
-void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_request_add(struct i915_request *rq, bool flush_caches);
+#define i915_request_add(rq) \
+ __i915_request_add(rq, false)
-void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
-void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void __i915_request_submit(struct i915_request *request);
+void i915_request_submit(struct i915_request *request);
-struct intel_rps_client;
-#define NO_WAITBOOST ERR_PTR(-1)
-#define IS_RPS_CLIENT(p) (!IS_ERR(p))
-#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+void __i915_request_unsubmit(struct i915_request *request);
+void i915_request_unsubmit(struct i915_request *request);
-long i915_wait_request(struct drm_i915_gem_request *req,
+long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
__attribute__((nonnull(1)));
@@ -320,26 +313,48 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
+__i915_request_completed(const struct i915_request *rq, u32 seqno)
{
GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
- seqno == i915_gem_request_global_seqno(req);
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+ seqno == i915_request_global_seqno(rq);
}
-static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+static inline bool i915_request_completed(const struct i915_request *rq)
{
u32 seqno;
- seqno = i915_gem_request_global_seqno(req);
+ seqno = i915_request_global_seqno(rq);
if (!seqno)
return false;
- return __i915_gem_request_completed(req, seqno);
+ return __i915_request_completed(rq, seqno);
}
-/* We treat requests as fences. This is not be to confused with our
+static inline bool i915_request_started(const struct i915_request *rq)
+{
+ u32 seqno;
+
+ seqno = i915_request_global_seqno(rq);
+ if (!seqno)
+ return false;
+
+ return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
+ seqno - 1);
+}
+
+static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
+{
+ const struct i915_request *rq =
+ container_of(pt, const struct i915_request, priotree);
+
+ return i915_request_completed(rq);
+}
+
+void i915_retire_requests(struct drm_i915_private *i915);
+
+/*
+ * We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
* GPU, for example, we should not rewrite an object's PTE whilst the GPU
@@ -369,16 +384,16 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
struct i915_gem_active;
typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
- struct drm_i915_gem_request *);
+ struct i915_request *);
struct i915_gem_active {
- struct drm_i915_gem_request __rcu *request;
+ struct i915_request __rcu *request;
struct list_head link;
i915_gem_retire_fn retire;
};
void i915_gem_retire_noop(struct i915_gem_active *,
- struct drm_i915_gem_request *request);
+ struct i915_request *request);
/**
* init_request_active - prepares the activity tracker for use
@@ -410,7 +425,7 @@ init_request_active(struct i915_gem_active *active,
*/
static inline void
i915_gem_active_set(struct i915_gem_active *active,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request);
@@ -435,10 +450,11 @@ i915_gem_active_set_retire_fn(struct i915_gem_active *active,
active->retire = fn ?: i915_gem_retire_noop;
}
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{
- /* Inside the error capture (running with the driver in an unknown
+ /*
+ * Inside the error capture (running with the driver in an unknown
* state), we want to bend the rules slightly (a lot).
*
* Work is in progress to make it safer, in the meantime this keeps
@@ -455,7 +471,7 @@ __i915_gem_active_peek(const struct i915_gem_active *active)
* It does not obtain a reference on the request for the caller, so the caller
* must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
{
return rcu_dereference_protected(active->request,
@@ -470,13 +486,13 @@ i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
* still active, or NULL. It does not obtain a reference on the request
* for the caller, so the caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = i915_gem_active_raw(active, mutex);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
return request;
@@ -489,10 +505,10 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
* i915_gem_active_get() returns a reference to the active request, or NULL
* if the active tracker is idle. The caller must hold struct_mutex.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
{
- return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+ return i915_request_get(i915_gem_active_peek(active, mutex));
}
/**
@@ -503,10 +519,11 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
* if the active tracker is idle. The caller must hold the RCU read lock, but
* the returned pointer is safe to use outside of RCU.
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
- /* Performing a lockless retrieval of the active request is super
+ /*
+ * Performing a lockless retrieval of the active request is super
* tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
@@ -514,13 +531,13 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
*
* Thread A Thread B
*
- * req = active.request
- * retire(req) -> free(req);
- * (req is now first on the slab freelist)
+ * rq = active.request
+ * retire(rq) -> free(rq);
+ * (rq is now first on the slab freelist)
* active.request = NULL
*
- * req = new submission on a new object
- * ref(req)
+ * rq = new submission on a new object
+ * ref(rq)
*
* To prevent the request from being reused whilst the caller
* uses it, we take a reference like normal. Whilst acquiring
@@ -549,32 +566,34 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
*
* It is then imperative that we do not zero the request on
* reallocation, so that we can chase the dangling pointers!
- * See i915_gem_request_alloc().
+ * See i915_request_alloc().
*/
do {
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
request = rcu_dereference(active->request);
- if (!request || i915_gem_request_completed(request))
+ if (!request || i915_request_completed(request))
return NULL;
- /* An especially silly compiler could decide to recompute the
- * result of i915_gem_request_completed, more specifically
+ /*
+ * An especially silly compiler could decide to recompute the
+ * result of i915_request_completed, more specifically
* re-emit the load for request->fence.seqno. A race would catch
* a later seqno value, which could flip the result from true to
* false. Which means part of the instructions below might not
* be executed, while later on instructions are executed. Due to
* barriers within the refcounting the inconsistency can't reach
- * past the call to i915_gem_request_get_rcu, but not executing
- * that while still executing i915_gem_request_put() creates
+ * past the call to i915_request_get_rcu, but not executing
+ * that while still executing i915_request_put() creates
* havoc enough. Prevent this with a compiler barrier.
*/
barrier();
- request = i915_gem_request_get_rcu(request);
+ request = i915_request_get_rcu(request);
- /* What stops the following rcu_access_pointer() from occurring
- * before the above i915_gem_request_get_rcu()? If we were
+ /*
+ * What stops the following rcu_access_pointer() from occurring
+ * before the above i915_request_get_rcu()? If we were
* to read the value before pausing to get the reference to
* the request, we may not notice a change in the active
* tracker.
@@ -588,9 +607,9 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* compiler.
*
* The atomic operation at the heart of
- * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
+ * i915_request_get_rcu(), see dma_fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_gem_request_get_rcu()
+ * when successful. That is, if i915_request_get_rcu()
* returns the request (and so with the reference counted
* incremented) then the following read for rcu_access_pointer()
* must occur after the atomic operation and so confirm
@@ -602,7 +621,7 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
if (!request || request == rcu_access_pointer(active->request))
return rcu_pointer_handoff(request);
- i915_gem_request_put(request);
+ i915_request_put(request);
} while (1);
}
@@ -614,12 +633,12 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_gem_request_put().
+ * The reference should be freed with i915_request_put().
*/
-static inline struct drm_i915_gem_request *
+static inline struct i915_request *
i915_gem_active_get_unlocked(const struct i915_gem_active *active)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
rcu_read_lock();
request = __i915_gem_active_get_rcu(active);
@@ -659,7 +678,7 @@ i915_gem_active_isset(const struct i915_gem_active *active)
* can then wait upon the request, and afterwards release our reference,
* free of any locking.
*
- * This function wraps i915_wait_request(), see it for the full details on
+ * This function wraps i915_request_wait(), see it for the full details on
* the arguments.
*
* Returns 0 if successful, or a negative error code.
@@ -667,13 +686,13 @@ i915_gem_active_isset(const struct i915_gem_active *active)
static inline int
i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret = 0;
request = i915_gem_active_get_unlocked(active);
if (request) {
- ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(request);
+ ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
}
return ret < 0 ? ret : 0;
@@ -692,14 +711,14 @@ static inline int __must_check
i915_gem_active_retire(struct i915_gem_active *active,
struct mutex *mutex)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
long ret;
request = i915_gem_active_raw(active, mutex);
if (!request)
return 0;
- ret = i915_wait_request(request,
+ ret = i915_request_wait(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
@@ -716,4 +735,4 @@ i915_gem_active_retire(struct i915_gem_active *active,
#define for_each_active(mask, idx) \
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
-#endif /* I915_GEM_REQUEST_H */
+#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 78e1a1b..9766e80 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
- __igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+ __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index ac236b8..1de5173 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -303,6 +303,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (i915_sw_fence_done(signaler))
return 0;
@@ -364,18 +365,31 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_dma_fence_cb {
struct dma_fence_cb base;
struct i915_sw_fence *fence;
+};
+
+struct i915_sw_dma_fence_cb_timer {
+ struct i915_sw_dma_fence_cb base;
struct dma_fence *dma;
struct timer_list timer;
struct irq_work work;
struct rcu_head rcu;
};
+static void dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+ i915_sw_fence_complete(cb->fence);
+ kfree(cb);
+}
+
static void timer_i915_sw_fence_wake(struct timer_list *t)
{
- struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
+ struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
struct i915_sw_fence *fence;
- fence = xchg(&cb->fence, NULL);
+ fence = xchg(&cb->base.fence, NULL);
if (!fence)
return;
@@ -387,13 +401,14 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
i915_sw_fence_complete(fence);
}
-static void dma_i915_sw_fence_wake(struct dma_fence *dma,
- struct dma_fence_cb *data)
+static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
+ struct dma_fence_cb *data)
{
- struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_dma_fence_cb_timer *cb =
+ container_of(data, typeof(*cb), base.base);
struct i915_sw_fence *fence;
- fence = xchg(&cb->fence, NULL);
+ fence = xchg(&cb->base.fence, NULL);
if (fence)
i915_sw_fence_complete(fence);
@@ -402,7 +417,8 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
static void irq_i915_sw_fence_work(struct irq_work *wrk)
{
- struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work);
+ struct i915_sw_dma_fence_cb_timer *cb =
+ container_of(wrk, typeof(*cb), work);
del_timer_sync(&cb->timer);
dma_fence_put(cb->dma);
@@ -416,14 +432,19 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp)
{
struct i915_sw_dma_fence_cb *cb;
+ dma_fence_func_t func;
int ret;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (dma_fence_is_signaled(dma))
return 0;
- cb = kmalloc(sizeof(*cb), gfp);
+ cb = kmalloc(timeout ?
+ sizeof(struct i915_sw_dma_fence_cb_timer) :
+ sizeof(struct i915_sw_dma_fence_cb),
+ gfp);
if (!cb) {
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
@@ -434,19 +455,26 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
cb->fence = fence;
i915_sw_fence_await(fence);
- cb->dma = NULL;
- timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
- init_irq_work(&cb->work, irq_i915_sw_fence_work);
+ func = dma_i915_sw_fence_wake;
if (timeout) {
- cb->dma = dma_fence_get(dma);
- mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+ struct i915_sw_dma_fence_cb_timer *timer =
+ container_of(cb, typeof(*timer), base);
+
+ timer->dma = dma_fence_get(dma);
+ init_irq_work(&timer->work, irq_i915_sw_fence_work);
+
+ timer_setup(&timer->timer,
+ timer_i915_sw_fence_wake, TIMER_IRQSAFE);
+ mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
+
+ func = dma_i915_sw_fence_wake_timer;
}
- ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ ret = dma_fence_add_callback(dma, &cb->base, func);
if (ret == 0) {
ret = 1;
} else {
- dma_i915_sw_fence_wake(dma, &cb->base);
+ func(dma, &cb->base);
if (ret == -ENOENT) /* fence already signaled */
ret = 0;
}
@@ -465,6 +493,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
int ret = 0, pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (write) {
struct dma_fence **shared;
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 0087acf..58f8d0c 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -86,7 +86,7 @@ struct i915_syncmap {
/**
* i915_syncmap_init -- initialise the #i915_syncmap
- * @root - pointer to the #i915_syncmap
+ * @root: pointer to the #i915_syncmap
*/
void i915_syncmap_init(struct i915_syncmap **root)
{
@@ -139,9 +139,9 @@ static inline bool seqno_later(u32 a, u32 b)
/**
* i915_syncmap_is_later -- compare against the last know sync point
- * @root - pointer to the #i915_syncmap
- * @id - the context id (other timeline) we are synchronising to
- * @seqno - the sequence number along the other timeline
+ * @root: pointer to the #i915_syncmap
+ * @id: the context id (other timeline) we are synchronising to
+ * @seqno: the sequence number along the other timeline
*
* If we have already synchronised this @root timeline with another (@id) then
* we can omit any repeated or earlier synchronisation requests. If the two
@@ -339,9 +339,9 @@ found:
/**
* i915_syncmap_set -- mark the most recent syncpoint between contexts
- * @root - pointer to the #i915_syncmap
- * @id - the context id (other timeline) we have synchronised to
- * @seqno - the sequence number along the other timeline
+ * @root: pointer to the #i915_syncmap
+ * @id: the context id (other timeline) we have synchronised to
+ * @seqno: the sequence number along the other timeline
*
* When we synchronise this @root timeline with another (@id), we also know
* that we have synchronized with all previous seqno along that timeline. If
@@ -382,7 +382,7 @@ static void __sync_free(struct i915_syncmap *p)
/**
* i915_syncmap_free -- free all memory associated with the syncmap
- * @root - pointer to the #i915_syncmap
+ * @root: pointer to the #i915_syncmap
*
* Either when the timeline is to be freed and we no longer need the sync
* point tracking, or when the fences are all known to be signaled and the
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 791759f..e5e6f6b 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,14 +42,30 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
- 1000);
+ u64 res;
+
+ intel_runtime_pm_get(dev_priv);
+ res = intel_rc6_residency_us(dev_priv, reg);
+ intel_runtime_pm_put(dev_priv);
+
+ return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ unsigned int mask;
+
+ mask = 0;
+ if (HAS_RC6(dev_priv))
+ mask |= BIT(0);
+ if (HAS_RC6p(dev_priv))
+ mask |= BIT(1);
+ if (HAS_RC6pp(dev_priv))
+ mask |= BIT(2);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", mask);
}
static ssize_t
@@ -252,14 +268,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
- u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (INTEL_GEN(dev_priv) >= 9)
- ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- ret = intel_gpu_freq(dev_priv, ret);
+ ret = intel_gpu_freq(dev_priv,
+ intel_get_cagf(dev_priv,
+ I915_READ(GEN6_RPSTAT1)));
}
mutex_unlock(&dev_priv->pcu_lock);
@@ -293,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
+ bool boost = false;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
@@ -306,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return -EINVAL;
mutex_lock(&dev_priv->pcu_lock);
- rps->boost_freq = val;
+ if (val != rps->boost_freq) {
+ rps->boost_freq = val;
+ boost = atomic_read(&rps->num_waiters);
+ }
mutex_unlock(&dev_priv->pcu_lock);
+ if (boost)
+ schedule_work(&rps->work);
return count;
}
@@ -434,13 +451,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return ret ?: count;
}
-static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
-static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
-static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR_RO(gt_act_freq_mhz);
+static DEVICE_ATTR_RO(gt_cur_freq_mhz);
+static DEVICE_ATTR_RW(gt_boost_freq_mhz);
+static DEVICE_ATTR_RW(gt_max_freq_mhz);
+static DEVICE_ATTR_RW(gt_min_freq_mhz);
-static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
+static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 4e76768..408827b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -586,8 +586,7 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct drm_i915_gem_request *to,
- struct drm_i915_gem_request *from),
+ TP_PROTO(struct i915_request *to, struct i915_request *from),
TP_ARGS(to, from),
TP_STRUCT__entry(
@@ -610,12 +609,13 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->seqno)
);
-TRACE_EVENT(i915_gem_request_queue,
- TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_queue,
+ TP_PROTO(struct i915_request *rq, u32 flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -623,116 +623,120 @@ TRACE_EVENT(i915_gem_request_queue,
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->flags)
);
-DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req),
+DECLARE_EVENT_CLASS(i915_request,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq),
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ctx)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_add,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
-DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_submit,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_execute,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-DECLARE_EVENT_CLASS(i915_gem_request_hw,
- TP_PROTO(struct drm_i915_gem_request *req,
- unsigned int port),
- TP_ARGS(req, port),
+DECLARE_EVENT_CLASS(i915_request_hw,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global_seqno)
- __field(u32, ctx)
__field(u32, port)
),
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global_seqno = req->global_seqno;
- __entry->port = port;
- ),
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global_seqno = rq->global_seqno;
+ __entry->port = port;
+ ),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
- __entry->dev, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+ __entry->dev, __entry->hw_id, __entry->ring,
+ __entry->ctx, __entry->seqno,
+ __entry->global_seqno, __entry->port)
);
-DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
- TP_ARGS(req, port)
+DEFINE_EVENT(i915_request_hw, i915_request_in,
+ TP_PROTO(struct i915_request *rq, unsigned int port),
+ TP_ARGS(rq, port)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_out,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
#else
#if !defined(TRACE_HEADER_MULTI_READ)
static inline void
-trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
+trace_i915_request_submit(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
+trace_i915_request_execute(struct i915_request *rq)
{
}
static inline void
-trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
+trace_i915_request_in(struct i915_request *rq, unsigned int port)
{
}
static inline void
-trace_i915_gem_request_out(struct drm_i915_gem_request *req)
+trace_i915_request_out(struct i915_request *rq)
{
}
#endif
@@ -761,17 +765,18 @@ TRACE_EVENT(intel_engine_notify,
__entry->waiters)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_retire,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
- TP_ARGS(req, flags),
+TRACE_EVENT(i915_request_wait_begin,
+ TP_PROTO(struct i915_request *rq, unsigned int flags),
+ TP_ARGS(rq, flags),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -786,23 +791,24 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->ctx = req->fence.context;
- __entry->seqno = req->fence.seqno;
- __entry->global = req->global_seqno;
+ __entry->dev = rq->i915->drm.primary->index;
+ __entry->hw_id = rq->ctx->hw_id;
+ __entry->ring = rq->engine->id;
+ __entry->ctx = rq->fence.context;
+ __entry->seqno = rq->fence.seqno;
+ __entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global,
+ !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
+DEFINE_EVENT(i915_request, i915_request_wait_end,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
);
TRACE_EVENT(i915_flip_request,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index af3d7cc..51dbfe5 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -83,8 +83,11 @@
(typeof(ptr))(__v & -BIT(n)); \
})
-#define ptr_pack_bits(ptr, bits, n) \
- ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+#define ptr_pack_bits(ptr, bits, n) ({ \
+ unsigned long __bits = (bits); \
+ GEM_BUG_ON(__bits & -BIT(n)); \
+ ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
+})
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
@@ -137,4 +140,19 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index fbfab2f..4bda3bd 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -31,8 +31,7 @@
#include <drm/drm_gem.h>
static void
-i915_vma_retire(struct i915_gem_active *active,
- struct drm_i915_gem_request *rq)
+i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
@@ -142,6 +141,12 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+ /*
+ * We put the GGTT vma at the start of the vma-list, followed
+ * by the ppGGTT vma. This allows us to break early when
+ * iterating over only the GGTT vma for an object, see
+ * for_each_ggtt_vma()
+ */
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -305,7 +310,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = vma->iomap;
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
if (ptr == NULL) {
@@ -322,6 +327,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (err)
goto err_unpin;
+ i915_vma_set_ggtt_write(vma);
return ptr;
err_unpin:
@@ -330,12 +336,24 @@ err:
return IO_ERR_PTR(err);
}
+void i915_vma_flush_writes(struct i915_vma *vma)
+{
+ if (!i915_vma_has_ggtt_write(vma))
+ return;
+
+ i915_gem_flush_ggtt_writes(vma->vm->i915);
+
+ i915_vma_unset_ggtt_write(vma);
+}
+
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_flush_writes(vma);
+
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
}
@@ -466,6 +484,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
+ GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -640,15 +659,17 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (ret)
goto err_unpin;
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
goto err_remove;
+ GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
@@ -656,6 +677,7 @@ err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
i915_vma_remove(vma);
GEM_BUG_ON(vma->pages);
+ GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
}
err_unpin:
__i915_vma_unpin(vma);
@@ -675,7 +697,9 @@ static void i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+ list_del(&vma->obj_link);
list_del(&vma->vm_link);
+
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -687,7 +711,6 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
- list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -740,6 +763,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
+ might_sleep();
active = i915_vma_get_active(vma);
if (active) {
int idx;
@@ -786,6 +810,15 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Check that we have flushed all writes through the GGTT
+ * before the unbind, other due to non-strict nature of those
+ * indirect writes they may end up referencing the GGTT PTE
+ * after the unbind.
+ */
+ i915_vma_flush_writes(vma);
+ GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
+
/* release the fence reg _after_ flushing */
ret = i915_vma_put_fence(vma);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1e2bc9b..8c50220 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -32,8 +32,8 @@
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
-#include "i915_gem_request.h"
+#include "i915_request.h"
enum i915_cache_level;
@@ -90,6 +90,7 @@ struct i915_vma {
#define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
+#define I915_VMA_GGTT_WRITE BIT(12)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -138,6 +139,24 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return vma->flags & I915_VMA_GGTT;
}
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ vma->flags |= I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+{
+ vma->flags &= ~I915_VMA_GGTT_WRITE;
+}
+
+void i915_vma_flush_writes(struct i915_vma *vma);
+
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CAN_FENCE;
@@ -389,5 +408,19 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-#endif
+#define for_each_until(cond) if (cond) break; else
+
+/**
+ * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
+ * @V: the #i915_vma iterator
+ * @OBJ: the #drm_i915_gem_object
+ *
+ * GGTT VMA are placed at the being of the object's vma_list, see
+ * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
+ * or the list is empty ofc.
+ */
+#define for_each_ggtt_vma(V, OBJ) \
+ list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \
+ for_each_until(!i915_vma_is_ggtt(V))
+#endif
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 36d4e63..e9fb6920 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
to_intel_digital_connector_state(old_state);
struct drm_crtc_state *crtc_state;
+ intel_hdcp_atomic_check(conn, old_state, new_state);
+
if (!new_state->crtc)
return 0;
@@ -186,13 +188,14 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
/**
* intel_crtc_destroy_state - destroy crtc state
* @crtc: drm crtc
+ * @state: the state to destroy
*
* Destroys the crtc state (both common and Intel-specific) for the
* specified crtc.
*/
void
intel_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_crtc_state *state)
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
@@ -200,7 +203,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
- * @crtc: intel crtc
+ * @intel_crtc: intel crtc
* @crtc_state: incoming crtc_state to validate and setup scalers
*
* This function sets up scalers based on staged scaling requests for
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 8e6dc15..7481ce8 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,7 +56,6 @@ intel_create_plane_state(struct drm_plane *plane)
state->base.plane = plane;
state->base.rotation = DRM_MODE_ROTATE_0;
- state->ckey.flags = I915_SET_COLORKEY_NONE;
return state;
}
@@ -86,6 +85,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->vma = NULL;
+ intel_state->flags = 0;
return state;
}
@@ -129,14 +129,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
- intel_state->clip.x1 = 0;
- intel_state->clip.y1 = 0;
- intel_state->clip.x2 =
- crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
- intel_state->clip.y2 =
- crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
-
if (state->fb && drm_rotation_90_or_270(state->rotation)) {
struct drm_format_name_buf format_name;
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 0ddba16..709d6ca 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -102,13 +102,13 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
};
static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
+audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
if (rate == dp_aud_n_m[i].sample_rate &&
- intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
+ crtc_state->port_clock == dp_aud_n_m[i].clock)
return &dp_aud_n_m[i];
}
@@ -157,8 +157,10 @@ static const struct {
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
-static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
+static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -179,9 +181,11 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
-static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
+static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
@@ -220,7 +224,9 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
return true;
}
-static void g4x_audio_codec_disable(struct intel_encoder *encoder)
+static void g4x_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t eldv, tmp;
@@ -239,11 +245,12 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder)
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
-static void g4x_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+static void g4x_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
@@ -279,16 +286,20 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
}
static void
-hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_dp_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- int rate = acomp ? acomp->aud_sample_rate[port] : 0;
- const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+ const struct dp_aud_n_m *nm;
+ int rate;
u32 tmp;
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
else
@@ -323,23 +334,26 @@ hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
-hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- int rate = acomp ? acomp->aud_sample_rate[port] : 0;
- enum pipe pipe = intel_crtc->pipe;
- int n;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+ int n, rate;
u32 tmp;
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- n = audio_config_hdmi_get_n(adjusted_mode, rate);
+ n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
DRM_DEBUG_KMS("using N %d\n", n);
@@ -363,20 +377,22 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
-hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
- const struct drm_display_mode *adjusted_mode)
+hsw_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ hsw_dp_audio_config_update(encoder, crtc_state);
else
- hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
+ hsw_hdmi_audio_config_update(encoder, crtc_state);
}
-static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+static void hsw_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
uint32_t tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@@ -389,7 +405,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@@ -402,14 +418,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->av_mutex);
}
-static void hsw_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *intel_encoder,
- const struct drm_display_mode *adjusted_mode)
+static void hsw_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ enum pipe pipe = crtc->pipe;
const uint8_t *eld = connector->eld;
uint32_t tmp;
int len, i;
@@ -448,17 +464,19 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
- hsw_audio_config_update(intel_crtc, port, adjusted_mode);
+ hsw_audio_config_update(encoder, crtc_state);
mutex_unlock(&dev_priv->av_mutex);
}
-static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
+static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -485,7 +503,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
@@ -497,14 +515,15 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
I915_WRITE(aud_cntrl_st2, tmp);
}
-static void ilk_audio_codec_enable(struct drm_connector *connector,
- struct intel_encoder *intel_encoder,
- const struct drm_display_mode *adjusted_mode)
+static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
- enum port port = intel_encoder->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@@ -568,36 +587,36 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
- tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
I915_WRITE(aud_config, tmp);
}
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
- * @intel_encoder: encoder on which to enable audio
+ * @encoder: encoder on which to enable audio
* @crtc_state: pointer to the current crtc state.
* @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
-void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- struct drm_connector *connector;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum port port = intel_encoder->port;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
- connector = conn_state->connector;
- if (!connector || !connector->eld[0])
+ if (!connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -609,19 +628,20 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
- dev_priv->display.audio_codec_enable(connector, intel_encoder,
- adjusted_mode);
+ dev_priv->display.audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
mutex_lock(&dev_priv->av_mutex);
- intel_encoder->audio_connector = connector;
+ encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->av_enc_map[pipe] = intel_encoder;
+ dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
- if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@@ -629,36 +649,41 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
crtc_state->port_clock,
- intel_encoder->type == INTEL_OUTPUT_DP);
+ intel_crtc_has_dp_encoder(crtc_state));
}
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
- * @intel_encoder: encoder on which to disable audio
+ * @encoder: encoder on which to disable audio
+ * @old_crtc_state: pointer to the old crtc state.
+ * @old_conn_state: pointer to the old connector state.
*
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
-void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- enum port port = intel_encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
- dev_priv->display.audio_codec_disable(intel_encoder);
+ dev_priv->display.audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
mutex_lock(&dev_priv->av_mutex);
- intel_encoder->audio_connector = NULL;
+ encoder->audio_connector = NULL;
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
- if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@@ -679,7 +704,7 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
- } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
+ } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -754,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
- return NULL;
-
/* MST */
if (pipe >= 0) {
+ if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ return NULL;
+
encoder = dev_priv->av_enc_map[pipe];
/*
* when bootup, audio driver may not know it is
@@ -793,10 +818,9 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct intel_encoder *intel_encoder;
- struct intel_crtc *crtc;
- struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
int err = 0;
if (!HAS_DDI(dev_priv))
@@ -806,23 +830,19 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
- intel_encoder = get_saved_enc(dev_priv, port, pipe);
- if (!intel_encoder || !intel_encoder->base.crtc) {
+ encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!encoder || !encoder->base.crtc) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
- /* pipe passed from the audio driver will be -1 for Non-MST case */
- crtc = to_intel_crtc(intel_encoder->base.crtc);
- pipe = crtc->pipe;
-
- adjusted_mode = &crtc->config->base.adjusted_mode;
+ crtc = to_intel_crtc(encoder->base.crtc);
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
- hsw_audio_config_update(crtc, port, adjusted_mode);
+ hsw_audio_config_update(encoder, crtc->config);
unlock:
mutex_unlock(&dev_priv->av_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fd23023..c5c7530 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -391,7 +391,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv,
bool alternate)
{
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 2:
return alternate ? 66667 : 48000;
case 3:
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
return 0;
}
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+ const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+ switch (data[index]) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ return index == 1 ? 0 : index;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 5; /* 1 byte for operand + uint32 */
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+ u8 *init_otp;
+ int len;
+
+ /* Limit this to VLV for now. */
+ if (!IS_VALLEYVIEW(dev_priv))
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+ dev_priv->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+ if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+ len = get_init_otp_deassert_fragment_len(dev_priv);
+ if (!len)
+ return;
+
+ DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+ init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.deassert_seq)
+ return;
+ dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ dev_priv->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
+ fixup_mipi_sequences(dev_priv);
+
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
@@ -1107,6 +1189,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
}
static const u8 cnp_ddc_pin_map[] = {
+ [0] = 0, /* N/A */
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
@@ -1115,9 +1198,14 @@ static const u8 cnp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv) &&
- vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
- return cnp_ddc_pin_map[vbt_pin];
+ if (HAS_PCH_CNP(dev_priv)) {
+ if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
+ return cnp_ddc_pin_map[vbt_pin];
+ } else {
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
+ return 0;
+ }
+ }
return vbt_pin;
}
@@ -1140,6 +1228,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+ {DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
};
/*
@@ -1234,6 +1323,30 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->hdmi_level_shift = hdmi_level_shift;
}
+ if (bdb_version >= 204) {
+ int max_tmds_clock;
+
+ switch (child->hdmi_max_data_rate) {
+ default:
+ MISSING_CASE(child->hdmi_max_data_rate);
+ /* fall through */
+ case HDMI_MAX_DATA_RATE_PLATFORM:
+ max_tmds_clock = 0;
+ break;
+ case HDMI_MAX_DATA_RATE_297:
+ max_tmds_clock = 297000;
+ break;
+ case HDMI_MAX_DATA_RATE_165:
+ max_tmds_clock = 165000;
+ break;
+ }
+
+ if (max_tmds_clock)
+ DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ port_name(port), max_tmds_clock);
+ info->max_tmds_clock = max_tmds_clock;
+ }
+
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
@@ -1243,6 +1356,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
+
+ /* DP max link rate for CNL+ */
+ if (bdb_version >= 216) {
+ switch (child->dp_max_link_rate) {
+ default:
+ case VBT_DP_MAX_LINK_RATE_HBR3:
+ info->dp_max_link_rate = 810000;
+ break;
+ case VBT_DP_MAX_LINK_RATE_HBR2:
+ info->dp_max_link_rate = 540000;
+ break;
+ case VBT_DP_MAX_LINK_RATE_HBR:
+ info->dp_max_link_rate = 270000;
+ break;
+ case VBT_DP_MAX_LINK_RATE_LBR:
+ info->dp_max_link_rate = 162000;
+ break;
+ }
+ DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
+ port_name(port), info->dp_max_link_rate);
+ }
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
@@ -1299,11 +1433,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
- } else if (bdb->version <= 197) {
+ } else if (bdb->version <= 215) {
expected_size = 38;
+ } else if (bdb->version <= 216) {
+ expected_size = 39;
} else {
- expected_size = 38;
- BUILD_BUG_ON(sizeof(*child) < 38);
+ expected_size = sizeof(*child);
+ BUILD_BUG_ON(sizeof(*child) < 39);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
@@ -1557,6 +1693,29 @@ out:
}
/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+ kfree(dev_priv->vbt.dsi.pps);
+ dev_priv->vbt.dsi.pps = NULL;
+ kfree(dev_priv->vbt.dsi.config);
+ dev_priv->vbt.dsi.config = NULL;
+ kfree(dev_priv->vbt.dsi.deassert_seq);
+ dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
* intel_bios_is_tv_present - is integrated TV present in VBT
* @dev_priv: i915 device instance
*
@@ -1664,6 +1823,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
int i;
@@ -1702,6 +1862,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_C] = DVO_PORT_DPC,
[PORT_D] = DVO_PORT_DPD,
[PORT_E] = DVO_PORT_DPE,
+ [PORT_F] = DVO_PORT_DPF,
};
int i;
@@ -1737,6 +1898,7 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
@@ -1903,6 +2065,11 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
if (port == PORT_D)
return true;
break;
+ case DVO_PORT_DPF:
+ case DVO_PORT_HDMIF:
+ if (port == PORT_F)
+ return true;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index bcbc7ab..1f79e7a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,6 +27,12 @@
#include "i915_drv.h"
+#ifdef CONFIG_SMP
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
+#else
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
+#endif
+
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
struct intel_wait *wait;
@@ -36,8 +42,20 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
wait = b->irq_wait;
if (wait) {
+ /*
+ * N.B. Since task_asleep() and ttwu are not atomic, the
+ * waiter may actually go to sleep after the check, causing
+ * us to suppress a valid wakeup. We prefer to reduce the
+ * number of false positive missed_breadcrumb() warnings
+ * at the expense of a few false negatives, as it it easy
+ * to trigger a false positive under heavy load. Enough
+ * signal should remain from genuine missed_breadcrumb()
+ * for us to detect in CI.
+ */
+ bool was_asleep = task_asleep(wait->tsk);
+
result = ENGINE_WAKEUP_WAITER;
- if (wake_up_process(wait->tsk))
+ if (wake_up_process(wait->tsk) && was_asleep)
result |= ENGINE_WAKEUP_ASLEEP;
}
@@ -64,20 +82,21 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
- engine->name, __builtin_return_address(0),
- yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine));
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s missed breadcrumb at %pS\n",
+ engine->name, __builtin_return_address(0));
+ }
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{
- struct intel_engine_cs *engine = from_timer(engine, t,
- breadcrumbs.hangcheck);
+ struct intel_engine_cs *engine =
+ from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed)
@@ -103,7 +122,7 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
*/
if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
missed_breadcrumb(engine);
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ mod_timer(&b->fake_irq, jiffies + 1);
} else {
mod_timer(&b->hangcheck, wait_timeout());
}
@@ -123,28 +142,25 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
*/
spin_lock_irq(&b->irq_lock);
- if (!__intel_breadcrumbs_wakeup(b))
+ if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irq(&b->irq_lock);
if (!b->irq_armed)
return;
mod_timer(&b->fake_irq, jiffies + 1);
-
- /* Ensure that even if the GPU hangs, we get woken up.
- *
- * However, note that if no one is waiting, we never notice
- * a gpu hang. Eventually, we will have to wait for a resource
- * held by the GPU and so trigger a hangcheck. In the most
- * pathological case, this will be upon memory starvation! To
- * prevent this, we also queue the hangcheck from the retire
- * worker.
- */
- i915_queue_hangcheck(engine->i915);
}
static void irq_enable(struct intel_engine_cs *engine)
{
+ /*
+ * FIXME: Ideally we want this on the API boundary, but for the
+ * sake of testing with mock breadcrumbs (no HW so unable to
+ * enable irqs) we place it deep within the bowels, at the point
+ * of no return.
+ */
+ GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
+
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
@@ -152,17 +168,21 @@ static void irq_enable(struct intel_engine_cs *engine)
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
- engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ if (engine->irq_enable) {
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(&engine->i915->irq_lock);
+ }
}
static void irq_disable(struct intel_engine_cs *engine)
{
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
- engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ if (engine->irq_disable) {
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(&engine->i915->irq_lock);
+ }
}
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -171,51 +191,70 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(b->irq_wait);
+ GEM_BUG_ON(!b->irq_armed);
- if (b->irq_enabled) {
+ GEM_BUG_ON(!b->irq_enabled);
+ if (!--b->irq_enabled)
irq_disable(engine);
- b->irq_enabled = false;
- }
b->irq_armed = false;
}
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ spin_lock_irq(&b->irq_lock);
+ if (!b->irq_enabled++)
+ irq_enable(engine);
+ GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
+ spin_unlock_irq(&b->irq_lock);
+}
+
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ spin_lock_irq(&b->irq_lock);
+ GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
+ if (!--b->irq_enabled)
+ irq_disable(engine);
+ spin_unlock_irq(&b->irq_lock);
+}
+
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait, *n, *first;
+ struct intel_wait *wait, *n;
if (!b->irq_armed)
- goto wakeup_signaler;
+ return;
- /* We only disarm the irq when we are idle (all requests completed),
+ /*
+ * We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
* completion.
*/
+ if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
+ missed_breadcrumb(engine);
spin_lock_irq(&b->rb_lock);
spin_lock(&b->irq_lock);
- first = fetch_and_zero(&b->irq_wait);
- __intel_engine_disarm_breadcrumbs(engine);
+ b->irq_wait = NULL;
+ if (b->irq_armed)
+ __intel_engine_disarm_breadcrumbs(engine);
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
+ GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
+ wait->seqno));
RB_CLEAR_NODE(&wait->node);
- if (wake_up_process(wait->tsk) && wait == first)
- missed_breadcrumb(engine);
+ wake_up_process(wait->tsk);
}
b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock);
-
- /*
- * The signaling thread may be asleep holding a reference to a request,
- * that had its signaling cancelled prior to being preempted. We need
- * to kick the signaler, just in case, to release any such reference.
- */
-wakeup_signaler:
- wake_up_process(b->signaler);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -249,6 +288,7 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
struct drm_i915_private *i915 = engine->i915;
+ bool enabled;
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
@@ -260,7 +300,6 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
* the irq.
*/
b->irq_armed = true;
- GEM_BUG_ON(b->irq_enabled);
if (I915_SELFTEST_ONLY(b->mock)) {
/* For our mock objects we want to avoid interaction
@@ -281,14 +320,15 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
*/
/* No interrupts? Kick the waiter every jiffie! */
- if (intel_irqs_enabled(i915)) {
- if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
- irq_enable(engine);
- b->irq_enabled = true;
+ enabled = false;
+ if (!b->irq_enabled++ &&
+ !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
+ irq_enable(engine);
+ enabled = true;
}
enable_fake_irq(b);
- return true;
+ return enabled;
}
static inline struct intel_wait *to_wait(struct rb_node *node)
@@ -302,7 +342,8 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
lockdep_assert_held(&b->rb_lock);
GEM_BUG_ON(b->irq_wait == wait);
- /* This request is completed, so remove it from the tree, mark it as
+ /*
+ * This request is completed, so remove it from the tree, mark it as
* complete, and *then* wake up the associated task. N.B. when the
* task wakes up, it will find the empty rb_node, discern that it
* has already been removed from the tree and skip the serialisation
@@ -313,7 +354,8 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
rb_erase(&wait->node, &b->waiters);
RB_CLEAR_NODE(&wait->node);
- wake_up_process(wait->tsk); /* implicit smp_wmb() */
+ if (wait->tsk->state != TASK_RUNNING)
+ wake_up_process(wait->tsk); /* implicit smp_wmb() */
}
static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
@@ -343,6 +385,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
bool first, armed;
u32 seqno;
+ GEM_BUG_ON(!wait->seqno);
+
/* Insert the request into the retirement ordered list
* of waiters by walking the rbtree. If we are the oldest
* seqno in the tree (the first to be retired), then
@@ -552,36 +596,6 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
spin_unlock_irq(&b->rb_lock);
}
-static bool signal_valid(const struct drm_i915_gem_request *request)
-{
- return intel_wait_check_request(&request->signaling.wait, request);
-}
-
-static bool signal_complete(const struct drm_i915_gem_request *request)
-{
- if (!request)
- return false;
-
- /* If another process served as the bottom-half it may have already
- * signalled that this wait is already completed.
- */
- if (intel_wait_complete(&request->signaling.wait))
- return signal_valid(request);
-
- /* Carefully check if the request is complete, giving time for the
- * seqno to be visible or if the GPU hung.
- */
- if (__i915_request_irq_complete(request))
- return true;
-
- return false;
-}
-
-static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
-{
- return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
-}
-
static void signaler_set_rtpriority(void)
{
struct sched_param param = { .sched_priority = 1 };
@@ -593,17 +607,22 @@ static int intel_breadcrumbs_signaler(void *arg)
{
struct intel_engine_cs *engine = arg;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_gem_request *request;
+ struct i915_request *rq, *n;
/* Install ourselves with high priority to reduce signalling latency */
signaler_set_rtpriority();
do {
bool do_schedule = true;
+ LIST_HEAD(list);
+ u32 seqno;
set_current_state(TASK_INTERRUPTIBLE);
+ if (list_empty(&b->signals))
+ goto sleep;
- /* We are either woken up by the interrupt bottom-half,
+ /*
+ * We are either woken up by the interrupt bottom-half,
* or by a client adding a new signaller. In both cases,
* the GPU seqno may have advanced beyond our oldest signal.
* If it has, propagate the signal, remove the waiter and
@@ -611,44 +630,45 @@ static int intel_breadcrumbs_signaler(void *arg)
* need to wait for a new interrupt from the GPU or for
* a new client.
*/
- rcu_read_lock();
- request = rcu_dereference(b->first_signal);
- if (request)
- request = i915_gem_request_get_rcu(request);
- rcu_read_unlock();
- if (signal_complete(request)) {
- local_bh_disable();
- dma_fence_signal(&request->fence);
- local_bh_enable(); /* kick start the tasklets */
+ seqno = intel_engine_get_seqno(engine);
- spin_lock_irq(&b->rb_lock);
+ spin_lock_irq(&b->rb_lock);
+ list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
+ u32 this = rq->signaling.wait.seqno;
- /* Wake up all other completed waiters and select the
- * next bottom-half for the next user interrupt.
- */
- __intel_engine_remove_wait(engine,
- &request->signaling.wait);
-
- /* Find the next oldest signal. Note that as we have
- * not been holding the lock, another client may
- * have installed an even older signal than the one
- * we just completed - so double check we are still
- * the oldest before picking the next one.
- */
- if (request == rcu_access_pointer(b->first_signal)) {
- struct rb_node *rb =
- rb_next(&request->signaling.node);
- rcu_assign_pointer(b->first_signal,
- rb ? to_signaler(rb) : NULL);
- }
- rb_erase(&request->signaling.node, &b->signals);
- RB_CLEAR_NODE(&request->signaling.node);
+ GEM_BUG_ON(!rq->signaling.wait.seqno);
- spin_unlock_irq(&b->rb_lock);
+ if (!i915_seqno_passed(seqno, this))
+ break;
- i915_gem_request_put(request);
+ if (likely(this == i915_request_global_seqno(rq))) {
+ __intel_engine_remove_wait(engine,
+ &rq->signaling.wait);
- /* If the engine is saturated we may be continually
+ rq->signaling.wait.seqno = 0;
+ __list_del_entry(&rq->signaling.link);
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags)) {
+ list_add_tail(&rq->signaling.link,
+ &list);
+ i915_request_get(rq);
+ }
+ }
+ }
+ spin_unlock_irq(&b->rb_lock);
+
+ if (!list_empty(&list)) {
+ local_bh_disable();
+ list_for_each_entry_safe(rq, n, &list, signaling.link) {
+ dma_fence_signal(&rq->fence);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_request_put(rq);
+ }
+ local_bh_enable(); /* kick start the tasklets */
+
+ /*
+ * If the engine is saturated we may be continually
* processing completed requests. This angers the
* NMI watchdog if we never let anything else
* have access to the CPU. Let's pretend to be nice
@@ -659,31 +679,65 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
+ /* Before we sleep, check for a missed seqno */
+ if (current->state & TASK_NORMAL &&
+ !list_empty(&b->signals) &&
+ engine->irq_seqno_barrier &&
+ test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)) {
+ engine->irq_seqno_barrier(engine);
+ intel_engine_wakeup(engine);
+ }
+
+sleep:
if (kthread_should_park())
kthread_parkme();
- if (unlikely(kthread_should_stop())) {
- i915_gem_request_put(request);
+ if (unlikely(kthread_should_stop()))
break;
- }
schedule();
}
- i915_gem_request_put(request);
} while (1);
__set_current_state(TASK_RUNNING);
return 0;
}
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup)
+static void insert_signal(struct intel_breadcrumbs *b,
+ struct i915_request *request,
+ const u32 seqno)
+{
+ struct i915_request *iter;
+
+ lockdep_assert_held(&b->rb_lock);
+
+ /*
+ * A reasonable assumption is that we are called to add signals
+ * in sequence, as the requests are submitted for execution and
+ * assigned a global_seqno. This will be the case for the majority
+ * of internally generated signals (inter-engine signaling).
+ *
+ * Out of order waiters triggering random signaling enabling will
+ * be more problematic, but hopefully rare enough and the list
+ * small enough that the O(N) insertion sort is not an issue.
+ */
+
+ list_for_each_entry_reverse(iter, &b->signals, signaling.link)
+ if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
+ break;
+
+ list_add(&request->signaling.link, &iter->signaling.link);
+}
+
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
u32 seqno;
- /* Note that we may be called from an interrupt handler on another
+ /*
+ * Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
* to submit a request, and so enable signaling). As such,
* we need to make sure that all other users of b->rb_lock protect
@@ -694,18 +748,17 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- seqno = i915_gem_request_global_seqno(request);
- if (!seqno)
+ seqno = i915_request_global_seqno(request);
+ if (!seqno) /* will be enabled later upon execution */
return;
+ GEM_BUG_ON(request->signaling.wait.seqno);
request->signaling.wait.tsk = b->signaler;
request->signaling.wait.request = request;
request->signaling.wait.seqno = seqno;
- i915_gem_request_get(request);
-
- spin_lock(&b->rb_lock);
- /* First add ourselves into the list of waiters, but register our
+ /*
+ * Add ourselves into the list of waiters, but registering our
* bottom-half as the signaller thread. As per usual, only the oldest
* waiter (not just signaller) is tasked as the bottom-half waking
* up all completed waiters after the user interrupt.
@@ -713,73 +766,31 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
* If we are the oldest waiter, enable the irq (after which we
* must double check that the seqno did not complete).
*/
+ spin_lock(&b->rb_lock);
+ insert_signal(b, request, seqno);
wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
-
- if (!__i915_gem_request_completed(request, seqno)) {
- struct rb_node *parent, **p;
- bool first;
-
- /* Now insert ourselves into the retirement ordered list of
- * signals on this engine. We track the oldest seqno as that
- * will be the first signal to complete.
- */
- parent = NULL;
- first = true;
- p = &b->signals.rb_node;
- while (*p) {
- parent = *p;
- if (i915_seqno_passed(seqno,
- to_signaler(parent)->signaling.wait.seqno)) {
- p = &parent->rb_right;
- first = false;
- } else {
- p = &parent->rb_left;
- }
- }
- rb_link_node(&request->signaling.node, parent, p);
- rb_insert_color(&request->signaling.node, &b->signals);
- if (first)
- rcu_assign_pointer(b->first_signal, request);
- } else {
- __intel_engine_remove_wait(engine, &request->signaling.wait);
- i915_gem_request_put(request);
- wakeup = false;
- }
-
spin_unlock(&b->rb_lock);
if (wakeup)
wake_up_process(b->signaler);
}
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+void intel_engine_cancel_signaling(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- GEM_BUG_ON(!request->signaling.wait.seqno);
-
- spin_lock(&b->rb_lock);
- if (!RB_EMPTY_NODE(&request->signaling.node)) {
- if (request == rcu_access_pointer(b->first_signal)) {
- struct rb_node *rb =
- rb_next(&request->signaling.node);
- rcu_assign_pointer(b->first_signal,
- rb ? to_signaler(rb) : NULL);
- }
- rb_erase(&request->signaling.node, &b->signals);
- RB_CLEAR_NODE(&request->signaling.node);
- i915_gem_request_put(request);
- }
+ if (!READ_ONCE(request->signaling.wait.seqno))
+ return;
+ spin_lock(&b->rb_lock);
__intel_engine_remove_wait(engine, &request->signaling.wait);
-
+ if (fetch_and_zero(&request->signaling.wait.seqno))
+ __list_del_entry(&request->signaling.link);
spin_unlock(&b->rb_lock);
-
- request->signaling.wait.seqno = 0;
}
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -793,6 +804,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
+ INIT_LIST_HEAD(&b->signals);
+
/* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current
* task for handling the bottom-half to the user interrupt, therefore
@@ -852,8 +865,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
/* The engines should be idle and all requests accounted for! */
WARN_ON(READ_ONCE(b->irq_wait));
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
- WARN_ON(rcu_access_pointer(b->first_signal));
- WARN_ON(!RB_EMPTY_ROOT(&b->signals));
+ WARN_ON(!list_empty(&b->signals));
if (!IS_ERR_OR_NULL(b->signaler))
kthread_stop(b->signaler);
@@ -861,28 +873,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- bool busy = false;
-
- spin_lock_irq(&b->rb_lock);
-
- if (b->irq_wait) {
- wake_up_process(b->irq_wait->tsk);
- busy = true;
- }
-
- if (rcu_access_pointer(b->first_signal)) {
- wake_up_process(b->signaler);
- busy = true;
- }
-
- spin_unlock_irq(&b->rb_lock);
-
- return busy;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_breadcrumbs.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 60cf4e5..dc7db8a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -437,13 +437,45 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
return 200000;
}
+static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+{
+ if (IS_VALLEYVIEW(dev_priv)) {
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+ return 2;
+ else if (cdclk >= 266667)
+ return 1;
+ else
+ return 0;
+ } else {
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ }
+}
+
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
+ u32 val;
+
cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_state->vco);
+
+ mutex_lock(&dev_priv->pcu_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ if (IS_VALLEYVIEW(dev_priv))
+ cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ DSPFREQGUAR_SHIFT;
+ else
+ cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ DSPFREQGUAR_SHIFT_CHV;
}
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
@@ -486,7 +518,19 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- u32 val, cmd;
+ u32 val, cmd = cdclk_state->voltage_level;
+
+ switch (cdclk) {
+ case 400000:
+ case 333333:
+ case 320000:
+ case 266667:
+ case 200000:
+ break;
+ default:
+ MISSING_CASE(cdclk);
+ return;
+ }
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
@@ -496,13 +540,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
- cmd = 2;
- else if (cdclk == 266667)
- cmd = 1;
- else
- cmd = 0;
-
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
@@ -562,7 +599,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- u32 val, cmd;
+ u32 val, cmd = cdclk_state->voltage_level;
switch (cdclk) {
case 333333:
@@ -583,13 +620,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- /*
- * Specs are full of misinformation, but testing on actual
- * hardware has shown that we just need to write the desired
- * CCK divider into the Punit register.
- */
- cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@@ -621,6 +651,21 @@ static int bdw_calc_cdclk(int min_cdclk)
return 337500;
}
+static u8 bdw_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 337500:
+ return 2;
+ case 450000:
+ return 0;
+ case 540000:
+ return 1;
+ case 675000:
+ return 3;
+ }
+}
+
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -639,13 +684,20 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 337500;
else
cdclk_state->cdclk = 675000;
+
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ bdw_calc_voltage_level(cdclk_state->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- uint32_t val, data;
+ uint32_t val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@@ -681,25 +733,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ /* fall through */
+ case 337500:
+ val |= LCPLL_CLK_FREQ_337_5_BDW;
+ break;
case 450000:
val |= LCPLL_CLK_FREQ_450;
- data = 0;
break;
case 540000:
val |= LCPLL_CLK_FREQ_54O_BDW;
- data = 1;
- break;
- case 337500:
- val |= LCPLL_CLK_FREQ_337_5_BDW;
- data = 2;
break;
case 675000:
val |= LCPLL_CLK_FREQ_675_BDW;
- data = 3;
break;
- default:
- WARN(1, "invalid cdclk frequency\n");
- return;
}
I915_WRITE(LCPLL_CTL, val);
@@ -713,16 +761,13 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
-
- WARN(cdclk != dev_priv->cdclk.hw.cdclk,
- "cdclk requested %d kHz but got %d kHz\n",
- cdclk, dev_priv->cdclk.hw.cdclk);
}
static int skl_calc_cdclk(int min_cdclk, int vco)
@@ -748,6 +793,24 @@ static int skl_calc_cdclk(int min_cdclk, int vco)
}
}
+static u8 skl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 308571:
+ case 337500:
+ return 0;
+ case 450000:
+ case 432000:
+ return 1;
+ case 540000:
+ return 2;
+ case 617143:
+ case 675000:
+ return 3;
+ }
+}
+
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -795,10 +858,10 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
skl_dpll0_update(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->ref;
+ cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
cdctl = I915_READ(CDCLK_CTL);
@@ -839,6 +902,14 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
break;
}
}
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ skl_calc_voltage_level(cdclk_state->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -917,11 +988,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack, cdclk_ctl;
+ u32 freq_select, cdclk_ctl;
int ret;
- WARN_ON((cdclk == 24000) != (vco == 0));
-
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -936,25 +1005,24 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* Choose frequency for this cdclk */
switch (cdclk) {
+ default:
+ WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+ WARN_ON(vco != 0);
+ /* fall through */
+ case 308571:
+ case 337500:
+ freq_select = CDCLK_FREQ_337_308;
+ break;
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
- pcu_ack = 1;
break;
case 540000:
freq_select = CDCLK_FREQ_540;
- pcu_ack = 2;
- break;
- case 308571:
- case 337500:
- default:
- freq_select = CDCLK_FREQ_337_308;
- pcu_ack = 0;
break;
case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
- pcu_ack = 3;
break;
}
@@ -993,7 +1061,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -1012,9 +1081,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
+ dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1072,6 +1143,7 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
if (cdclk_state.vco == 0)
cdclk_state.vco = 8100000;
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
+ cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1087,8 +1159,9 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.ref;
+ cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1117,16 +1190,22 @@ static int glk_calc_cdclk(int min_cdclk)
return 79200;
}
+static u8 bxt_calc_voltage_level(int cdclk)
+{
+ return DIV_ROUND_UP(cdclk, 25000);
+}
+
static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
- if (cdclk == dev_priv->cdclk.hw.ref)
+ if (cdclk == dev_priv->cdclk.hw.bypass)
return 0;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 144000:
case 288000:
case 384000:
@@ -1145,12 +1224,13 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
- if (cdclk == dev_priv->cdclk.hw.ref)
+ if (cdclk == dev_priv->cdclk.hw.bypass)
return 0;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 79200:
case 158400:
case 316800:
@@ -1188,10 +1268,10 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_update(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->ref;
+ cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1215,6 +1295,14 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ bxt_calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1263,31 +1351,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+ default:
+ WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
+ WARN_ON(vco != 0);
+ /* fall through */
+ case 2:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 4:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
- WARN_ON(vco != 0);
-
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 8:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
- /* Inform power controller of upcoming frequency change */
+ /*
+ * Inform power controller of upcoming frequency change. BSpec
+ * requires us to wait up to 150usec, but that leads to timeouts;
+ * the 2ms used here is based on experiment.
+ */
mutex_lock(&dev_priv->pcu_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000);
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
@@ -1318,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->pcu_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(cdclk, 25000));
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed until
+ * the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level, 150, 2);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
@@ -1336,9 +1434,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
+ dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1411,6 +1510,7 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = bxt_calc_cdclk(0);
cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
}
+ cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1426,8 +1526,9 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.ref;
+ cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1442,6 +1543,19 @@ static int cnl_calc_cdclk(int min_cdclk)
return 168000;
}
+static u8 cnl_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 168000:
+ return 0;
+ case 336000:
+ return 1;
+ case 528000:
+ return 2;
+ }
+}
+
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@@ -1472,10 +1586,10 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_update(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->ref;
+ cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0)
- return;
+ goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1492,6 +1606,14 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_state->voltage_level =
+ cnl_calc_voltage_level(cdclk_state->cdclk);
}
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@@ -1532,7 +1654,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 val, divider, pcu_ack;
+ u32 val, divider;
int ret;
mutex_lock(&dev_priv->pcu_lock);
@@ -1549,30 +1671,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,2} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
+ WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
WARN_ON(vco != 0);
-
+ /* fall through */
+ case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- }
-
- switch (cdclk) {
- case 528000:
- pcu_ack = 2;
- break;
- case 336000:
- pcu_ack = 1;
- break;
- case 168000:
- default:
- pcu_ack = 0;
+ case 4:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
}
@@ -1593,22 +1700,30 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
+
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
- if (cdclk == dev_priv->cdclk.hw.ref)
+ if (cdclk == dev_priv->cdclk.hw.bypass)
return 0;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
+ /* fall through */
case 168000:
case 336000:
ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
@@ -1626,9 +1741,10 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
+ dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1662,6 +1778,199 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
+static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
+{
+ int ranges_24[] = { 312000, 552000, 648000 };
+ int ranges_19_38[] = { 307200, 556800, 652800 };
+ int *ranges;
+
+ switch (ref) {
+ default:
+ MISSING_CASE(ref);
+ case 24000:
+ ranges = ranges_24;
+ break;
+ case 19200:
+ case 38400:
+ ranges = ranges_19_38;
+ break;
+ }
+
+ if (min_cdclk > ranges[1])
+ return ranges[2];
+ else if (min_cdclk > ranges[0])
+ return ranges[1];
+ else
+ return ranges[0];
+}
+
+static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 307200:
+ case 556800:
+ case 652800:
+ WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+ dev_priv->cdclk.hw.ref != 38400);
+ break;
+ case 312000:
+ case 552000:
+ case 648000:
+ WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ }
+
+ ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
+
+ return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void icl_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ unsigned int cdclk = cdclk_state->cdclk;
+ unsigned int vco = cdclk_state->vco;
+ int ret;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->pcu_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
+ return;
+ }
+
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
+
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
+ skl_cdclk_decimal(cdclk));
+
+ mutex_lock(&dev_priv->pcu_lock);
+ /* TODO: add proper DVFS support. */
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ intel_update_cdclk(dev_priv);
+}
+
+static void icl_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 val;
+
+ cdclk_state->bypass = 50000;
+
+ val = I915_READ(SKL_DSSM);
+ switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
+ default:
+ MISSING_CASE(val);
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
+ break;
+ }
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
+ cdclk_state->cdclk = cdclk_state->bypass;
+ return;
+ }
+
+ cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+
+ val = I915_READ(CDCLK_CTL);
+ WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
+
+ cdclk_state->cdclk = cdclk_state->vco / 2;
+}
+
+/**
+ * icl_init_cdclk - Initialize CDCLK on ICL
+ * @dev_priv: i915 device
+ *
+ * Initialize CDCLK for ICL. This consists mainly of initializing
+ * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
+ * is generally done only during the display core initialization sequence, after
+ * which the DMC will take care of turning CDCLK off/on as needed.
+ */
+void icl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state sanitized_state;
+ u32 val;
+
+ /* This sets dev_priv->cdclk.hw. */
+ intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+
+ /* This means CDCLK disabled. */
+ if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ goto sanitize;
+
+ val = I915_READ(CDCLK_CTL);
+
+ if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
+ goto sanitize;
+
+ if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
+ skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
+ goto sanitize;
+
+ return;
+
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+ sanitized_state.ref = dev_priv->cdclk.hw.ref;
+ sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
+ sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
+ sanitized_state.cdclk);
+
+ icl_set_cdclk(dev_priv, &sanitized_state);
+}
+
+/**
+ * icl_uninit_cdclk - Uninitialize CDCLK on ICL
+ * @dev_priv: i915 device
+ *
+ * Uninitialize CDCLK for ICL. This is done only during the display core
+ * uninitialization sequence.
+ */
+void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+ cdclk_state.cdclk = cdclk_state.bypass;
+ cdclk_state.vco = 0;
+
+ icl_set_cdclk(dev_priv, &cdclk_state);
+}
+
/**
* cnl_init_cdclk - Initialize CDCLK on CNL
* @dev_priv: i915 device
@@ -1685,6 +1994,7 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cnl_calc_cdclk(0);
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
@@ -1700,24 +2010,51 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.ref;
+ cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
+ cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
/**
- * intel_cdclk_state_compare - Determine if two CDCLK states differ
+ * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
* @a: first CDCLK state
* @b: second CDCLK state
*
* Returns:
- * True if the CDCLK states are identical, false if they differ.
+ * True if the CDCLK states require pipes to be off during reprogramming, false if not.
*/
-bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b)
{
- return memcmp(a, b, sizeof(*a)) == 0;
+ return a->cdclk != b->cdclk ||
+ a->vco != b->vco ||
+ a->ref != b->ref;
+}
+
+/**
+ * intel_cdclk_changed - Determine if two CDCLK states are different
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states don't match, false if they do.
+ */
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ return intel_cdclk_needs_modeset(a, b) ||
+ a->voltage_level != b->voltage_level;
+}
+
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context)
+{
+ DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
+ context, cdclk_state->cdclk, cdclk_state->vco,
+ cdclk_state->ref, cdclk_state->bypass,
+ cdclk_state->voltage_level);
}
/**
@@ -1731,29 +2068,28 @@ bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
- if (intel_cdclk_state_compare(&dev_priv->cdclk.hw, cdclk_state))
+ if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
return;
- DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz, VCO %d kHz, ref %d kHz\n",
- cdclk_state->cdclk, cdclk_state->vco,
- cdclk_state->ref);
+ intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+
+ if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
+ "cdclk state doesn't match!\n")) {
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
+ intel_dump_cdclk_state(cdclk_state, "[sw state]");
+ }
}
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
if (INTEL_GEN(dev_priv) >= 10)
- /*
- * FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
- * once DDI clock voltage requirements are
- * handled correctly.
- */
- return pixel_rate;
+ return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
@@ -1783,7 +2119,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
@@ -1810,6 +2146,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_VALLEYVIEW(dev_priv))
+ min_cdclk = max(320000, min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -1846,6 +2190,43 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
return min_cdclk;
}
+/*
+ * Note that this functions assumes that 0 is
+ * the lowest voltage value, and higher values
+ * correspond to increasingly higher voltages.
+ *
+ * Should that relationship no longer hold on
+ * future platforms this code will need to be
+ * adjusted.
+ */
+static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u8 min_voltage_level;
+ int i;
+ enum pipe pipe;
+
+ memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
+ sizeof(state->min_voltage_level));
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->base.enable)
+ state->min_voltage_level[i] =
+ crtc_state->min_voltage_level;
+ else
+ state->min_voltage_level[i] = 0;
+ }
+
+ min_voltage_level = 0;
+ for_each_pipe(dev_priv, pipe)
+ min_voltage_level = max(state->min_voltage_level[pipe],
+ min_voltage_level);
+
+ return min_voltage_level;
+}
+
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -1859,11 +2240,15 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
cdclk = vlv_calc_cdclk(dev_priv, 0);
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1888,11 +2273,15 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = bdw_calc_cdclk(0);
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ bdw_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1923,12 +2312,16 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = skl_calc_cdclk(0, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ skl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1957,6 +2350,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ bxt_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
@@ -1969,6 +2364,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ bxt_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -1992,6 +2389,9 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
+ intel_state->cdclk.logical.voltage_level =
+ max(cnl_calc_voltage_level(cdclk),
+ cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = cnl_calc_cdclk(0);
@@ -1999,6 +2399,8 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
+ intel_state->cdclk.actual.voltage_level =
+ cnl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@@ -2007,17 +2409,42 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
+static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ unsigned int ref = intel_state->cdclk.logical.ref;
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ cdclk = icl_calc_cdclk(min_cdclk, ref);
+ vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ intel_state->cdclk.logical.vco = vco;
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ cdclk = icl_calc_cdclk(0, ref);
+ vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ intel_state->cdclk.actual.vco = vco;
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual = intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
if (INTEL_GEN(dev_priv) >= 10)
- /*
- * FIXME: Allow '2 * max_cdclk_freq'
- * once DDI clock voltage requirements are
- * handled correctly.
- */
- return max_cdclk_freq;
+ return 2 * max_cdclk_freq;
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
@@ -2029,7 +2456,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
- else if (INTEL_INFO(dev_priv)->gen < 4)
+ else if (INTEL_GEN(dev_priv) < 4)
return 2*max_cdclk_freq*90/100;
else
return max_cdclk_freq*90/100;
@@ -2045,7 +2472,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ if (dev_priv->cdclk.hw.ref == 24000)
+ dev_priv->max_cdclk_freq = 648000;
+ else
+ dev_priv->max_cdclk_freq = 652800;
+ } else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -2116,10 +2548,6 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
- dev_priv->cdclk.hw.cdclk, dev_priv->cdclk.hw.vco,
- dev_priv->cdclk.hw.ref);
-
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
* Programmng [sic] note: bit[9:2] should be programmed to the number
@@ -2155,6 +2583,30 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
return divider + fraction;
}
+static int icp_rawclk(struct drm_i915_private *dev_priv)
+{
+ u32 rawclk;
+ int divider, numerator, denominator, frequency;
+
+ if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ frequency = 24000;
+ divider = 23;
+ numerator = 0;
+ denominator = 0;
+ } else {
+ frequency = 19200;
+ divider = 18;
+ numerator = 1;
+ denominator = 4;
+ }
+
+ rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
+ ICP_RAWCLK_DEN(denominator);
+
+ I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+ return frequency;
+}
+
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
@@ -2202,8 +2654,9 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
-
- if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ dev_priv->rawclk_freq = icp_rawclk(dev_priv);
+ else if (HAS_PCH_CNP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@@ -2248,9 +2701,14 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
cnl_modeset_calc_cdclk;
+ } else if (IS_ICELAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = icl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
}
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ dev_priv->display.get_cdclk = icl_get_cdclk;
+ else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index b8315bc..c6a7bea 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -39,7 +39,7 @@
#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
-#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
+#define LEGACY_LUT_LENGTH 256
/* Post offset values for RGB->YCBCR conversion */
#define POSTOFF_RGB_TO_YUV_HI 0x800
@@ -66,48 +66,49 @@
* of the CTM coefficient and we write the value from bit 3. We also round the
* value.
*/
-#define I9XX_CSC_COEFF_FP(coeff, fbits) \
+#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define I9XX_CSC_COEFF_LIMITED_RANGE \
- I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define I9XX_CSC_COEFF_1_0 \
- ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE \
+ ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define ILK_CSC_COEFF_1_0 \
+ ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{
return !state->degamma_lut &&
!state->ctm &&
state->gamma_lut &&
- state->gamma_lut->length == LEGACY_LUT_LENGTH;
+ drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
}
/*
* When using limited range, multiply the matrix given by userspace by
- * the matrix that we would use for the limited range. We do the
- * multiplication in U2.30 format.
+ * the matrix that we would use for the limited range.
*/
-static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
{
int i;
- for (i = 0; i < 9; i++)
- result[i] = 0;
+ for (i = 0; i < 9; i++) {
+ u64 user_coeff = input[i];
+ u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+ u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+ CTM_COEFF_4_0 - 1) >> 2;
- for (i = 0; i < 3; i++) {
- int64_t user_coeff = input[i * 3 + i];
- uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
- uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
- 0,
- CTM_COEFF_4_0 - 1) >> 2;
-
- result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
- if (CTM_COEFF_NEGATIVE(user_coeff))
- result[i * 3 + i] |= CTM_COEFF_SIGN;
+ /*
+ * By scaling every co-efficient with limited range (16-235)
+ * vs full range (0-255) the final o/p will be scaled down to
+ * fit in the limited range supported by the panel.
+ */
+ result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+ result[i] |= user_coeff & CTM_COEFF_SIGN;
}
+
+ return result;
}
-static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
{
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
@@ -131,8 +132,7 @@ static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
}
-/* Set up the pipe CSC unit. */
-static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -140,21 +140,27 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+ bool limited_color_range = false;
+
+ /*
+ * FIXME if there's a gamma LUT after the CSC, we should
+ * do the range compression using the gamma LUT instead.
+ */
+ if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ limited_color_range = intel_crtc_state->limited_color_range;
if (intel_crtc_state->ycbcr420) {
- i9xx_load_ycbcr_conversion_matrix(intel_crtc);
+ ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
- struct drm_color_ctm *ctm =
- (struct drm_color_ctm *)crtc_state->ctm->data;
- uint64_t input[9] = { 0, };
-
- if (intel_crtc_state->limited_color_range) {
- ctm_mult_by_limited(input, ctm->matrix);
- } else {
- for (i = 0; i < ARRAY_SIZE(input); i++)
- input[i] = ctm->matrix[i];
- }
+ struct drm_color_ctm *ctm = crtc_state->ctm->data;
+ const u64 *input;
+ u64 temp[9];
+
+ if (limited_color_range)
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
@@ -175,21 +181,21 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (abs_coeff < CTM_COEFF_0_125)
coeffs[i] |= (3 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 12);
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
coeffs[i] |= (2 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 11);
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
coeffs[i] |= (1 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 10);
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
coeffs[i] |= (7 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 8);
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
else
coeffs[i] |= (6 << 12) |
- I9XX_CSC_COEFF_FP(abs_coeff, 7);
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
}
} else {
/*
@@ -201,11 +207,11 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
* into consideration.
*/
for (i = 0; i < 3; i++) {
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
coeffs[i * 3 + i] =
- I9XX_CSC_COEFF_LIMITED_RANGE;
+ ILK_CSC_COEFF_LIMITED_RANGE;
else
- coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+ coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
}
}
@@ -225,7 +231,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) > 6) {
uint16_t postoff = 0;
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -236,7 +242,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc_state->limited_color_range)
+ if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -255,8 +261,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
uint32_t mode;
if (state->ctm) {
- struct drm_color_ctm *ctm =
- (struct drm_color_ctm *) state->ctm->data;
+ struct drm_color_ctm *ctm = state->ctm->data;
uint16_t coeffs[9] = { 0, };
int i;
@@ -323,7 +328,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
if (blob) {
- struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
+ struct drm_color_lut *lut = blob->data;
for (i = 0; i < 256; i++) {
uint32_t word =
(drm_color_lut_extract(lut[i].red, 8) << 16) |
@@ -370,7 +375,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
*/
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(intel_crtc_state);
reenable_ips = true;
}
@@ -380,7 +385,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
i9xx_load_luts(crtc_state);
if (reenable_ips)
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(intel_crtc_state);
}
static void bdw_load_degamma_lut(struct drm_crtc_state *state)
@@ -393,8 +398,7 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
if (state->degamma_lut) {
- struct drm_color_lut *lut =
- (struct drm_color_lut *) state->degamma_lut->data;
+ struct drm_color_lut *lut = state->degamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
@@ -428,8 +432,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
offset);
if (state->gamma_lut) {
- struct drm_color_lut *lut =
- (struct drm_color_lut *) state->gamma_lut->data;
+ struct drm_color_lut *lut = state->gamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
@@ -561,7 +564,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
if (state->degamma_lut) {
- lut = (struct drm_color_lut *) state->degamma_lut->data;
+ lut = state->degamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
@@ -576,7 +579,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
if (state->gamma_lut) {
- lut = (struct drm_color_lut *) state->gamma_lut->data;
+ lut = state->gamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
@@ -616,19 +619,17 @@ int intel_color_check(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
- sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
- sizeof(struct drm_color_lut);
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
/*
* We allow both degamma & gamma luts at the right size or
* NULL.
*/
if ((!crtc_state->degamma_lut ||
- crtc_state->degamma_lut->length == degamma_length) &&
+ drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
(!crtc_state->gamma_lut ||
- crtc_state->gamma_lut->length == gamma_length))
+ drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
return 0;
/*
@@ -651,14 +652,14 @@ void intel_color_init(struct drm_crtc *crtc)
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 437339f..c0a8805 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -119,6 +119,8 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
@@ -217,11 +219,9 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -245,46 +245,42 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
}
static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
static void hsw_pre_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
+ dev_priv->display.fdi_link_train(crtc, crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
- WARN_ON(!intel_crtc->config->has_pch_encoder);
+ WARN_ON(!crtc_state->has_pch_encoder);
- intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
@@ -293,10 +289,10 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
}
static void intel_enable_crt(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status
@@ -308,9 +304,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
@@ -481,14 +474,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
-/**
- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
- *
- * Not for i915G/i915GM
- *
- * \return true if CRT is connected.
- * \return false if CRT is disconnected.
- */
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -814,10 +799,11 @@ intel_crt_detect(struct drm_connector *connector,
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, ctx);
- } else if (ret == 0)
+ } else if (ret == 0) {
status = connector_status_unknown;
- else if (ret < 0)
+ } else {
status = ret;
+ }
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain);
@@ -970,8 +956,10 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
if (I915_HAS_HOTPLUG(dev_priv) &&
- !dmi_check_system(intel_spurious_crt_detect))
+ !dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
+ crt->base.hotplug = intel_encoder_hotplug;
+ }
if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index da9de47..41e6c75 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -37,16 +37,17 @@
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_04.bin"
-#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
+MODULE_FIRMWARE(I915_CSR_CNL);
+#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
-#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
@@ -198,6 +199,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
si = bxt_stepping_info;
} else {
size = 0;
+ si = NULL;
}
if (INTEL_REVID(dev_priv) < size)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 58a3755..8c2d778 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
@@ -492,24 +493,6 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
-enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
-{
- switch (encoder->type) {
- case INTEL_OUTPUT_DP_MST:
- return enc_to_mst(&encoder->base)->primary->port;
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_EDP:
- case INTEL_OUTPUT_HDMI:
- case INTEL_OUTPUT_UNKNOWN:
- return enc_to_dig_port(&encoder->base)->port;
- case INTEL_OUTPUT_ANALOG:
- return PORT_E;
- default:
- MISSING_CASE(encoder->type);
- return PORT_A;
- }
-}
-
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -811,31 +794,24 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_entries;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
- switch (encoder->type) {
- case INTEL_OUTPUT_EDP:
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
+ &n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
- break;
- case INTEL_OUTPUT_DP:
+ else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
- break;
- case INTEL_OUTPUT_ANALOG:
- ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
- &n_entries);
- break;
- default:
- MISSING_CASE(encoder->type);
- return;
- }
/* If we're boosting the current, set bit 31 of trans1 */
if (IS_GEN9_BC(dev_priv) &&
@@ -861,7 +837,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int n_entries;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
@@ -937,7 +913,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_dp_ddi_buffers(encoder);
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
}
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@@ -1448,19 +1424,16 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
-static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
{
- struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
- if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
+ if (WARN_ON(!crtc_state->shared_dpll))
return 0;
- pll = &dev_priv->shared_dplls[pll_id];
- state = &pll->state.hw_state;
+ state = &crtc_state->dpll_hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
@@ -1474,19 +1447,15 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
}
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
- enum intel_dpll_id pll_id = port;
-
- pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
+ pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
ddi_dotclock_get(pipe_config);
}
-void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1504,33 +1473,34 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- int type = encoder->type;
- uint32_t temp;
+ u32 temp;
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ if (!intel_crtc_has_dp_encoder(crtc_state))
+ return;
- temp = TRANS_MSA_SYNC_CLK;
- switch (crtc_state->pipe_bpp) {
- case 18:
- temp |= TRANS_MSA_6_BPC;
- break;
- case 24:
- temp |= TRANS_MSA_8_BPC;
- break;
- case 30:
- temp |= TRANS_MSA_10_BPC;
- break;
- case 36:
- temp |= TRANS_MSA_12_BPC;
- break;
- default:
- BUG();
- }
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ WARN_ON(transcoder_is_dsi(cpu_transcoder));
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
}
+
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@@ -1540,6 +1510,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@@ -1555,8 +1526,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = intel_ddi_get_encoder_port(encoder);
- int type = encoder->type;
+ enum port port = encoder->port;
uint32_t temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
@@ -1611,7 +1581,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
}
}
- if (type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
@@ -1621,19 +1591,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- } else if (type == INTEL_OUTPUT_ANALOG) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (type == INTEL_OUTPUT_DP ||
- type == INTEL_OUTPUT_EDP) {
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- } else if (type == INTEL_OUTPUT_DP_MST) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
- WARN(1, "Invalid encoder type %d for pipe %c\n",
- encoder->type, pipe_name(pipe));
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1650,13 +1616,42 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val);
}
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = 0;
+ int ret = 0;
+ uint32_t tmp;
+
+ if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
+ intel_encoder->power_domain)))
+ return -ENXIO;
+
+ if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+ if (enable)
+ tmp |= TRANS_DDI_HDCP_SIGNALLING;
+ else
+ tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+out:
+ intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ return ret;
+}
+
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
@@ -1715,9 +1710,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
+ enum pipe p;
u32 tmp;
- int i;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
@@ -1752,15 +1747,17 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ for_each_pipe(dev_priv, p) {
+ enum transcoder cpu_transcoder = (enum transcoder) p;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
goto out;
- *pipe = i;
+ *pipe = p;
ret = true;
goto out;
@@ -1800,7 +1797,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@@ -1836,8 +1833,8 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
uint8_t iboost;
if (type == INTEL_OUTPUT_HDMI)
@@ -1939,8 +1936,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
const struct cnl_ddi_buf_trans *ddi_translations;
+ enum port port = encoder->port;
int n_entries, ln;
u32 val;
@@ -2003,7 +2000,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int width, rate, ln;
u32 val;
@@ -2122,13 +2119,13 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
uint32_t val;
if (WARN_ON(!pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll_lock);
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
@@ -2150,13 +2147,13 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
- } else if (INTEL_INFO(dev_priv)->gen < 9) {
+ } else if (INTEL_GEN(dev_priv) < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
@@ -2166,7 +2163,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
if (IS_CANNONLAKE(dev_priv))
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2184,7 +2181,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@@ -2205,11 +2202,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
else
- intel_prepare_dp_ddi_buffers(encoder);
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder);
- if (!is_mst)
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
@@ -2222,7 +2218,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
@@ -2254,6 +2250,19 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
+ /*
+ * When called from DP MST code:
+ * - conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - crtc_state will be the state of the first stream to
+ * be activated on this port, and it may not be the same
+ * stream that will be deactivated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
WARN_ON(crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -2267,7 +2276,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
bool wait = false;
u32 val;
@@ -2294,19 +2303,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_dp *intel_dp = &dig_port->dp;
- /*
- * old_crtc_state and old_conn_state are NULL when called from
- * DP_MST. The main connector associated with this port is never
- * bound to a crtc for MST.
- */
- bool is_mst = !old_crtc_state;
/*
* Power down sink before disabling the port, otherwise we end
* up getting interrupts from the sink on detecting link loss.
*/
- if (!is_mst)
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_disable_ddi_buf(encoder);
@@ -2343,12 +2345,19 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/*
- * old_crtc_state and old_conn_state are NULL when called from
- * DP_MST. The main connector associated with this port is never
- * bound to a crtc for MST.
+ * When called from DP MST code:
+ * - old_conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - old_crtc_state will be the state of the last stream to
+ * be deactivated on this port, and it may not be the same
+ * stream that was activated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
*/
- if (old_crtc_state &&
- intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_post_disable_hdmi(encoder,
old_crtc_state, old_conn_state);
else
@@ -2396,7 +2405,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
@@ -2415,13 +2424,55 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum port port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
intel_hdmi_handle_sink_scrambling(encoder,
conn_state->connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling);
+ /* Display WA #1143: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv)) {
+ /*
+ * For some reason these chicken bits have been
+ * stuffed into a transcoder register, event though
+ * the bits affect a specific DDI port rather than
+ * a specific transcoder.
+ */
+ static const enum transcoder port_to_transcoder[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
+ };
+ enum transcoder transcoder = port_to_transcoder[port];
+ u32 val;
+
+ val = I915_READ(CHICKEN_TRANS(transcoder));
+
+ if (port == PORT_E)
+ val |= DDIE_TRAINING_OVERRIDE_ENABLE |
+ DDIE_TRAINING_OVERRIDE_VALUE;
+ else
+ val |= DDI_TRAINING_OVERRIDE_ENABLE |
+ DDI_TRAINING_OVERRIDE_VALUE;
+
+ I915_WRITE(CHICKEN_TRANS(transcoder), val);
+ POSTING_READ(CHICKEN_TRANS(transcoder));
+
+ udelay(1);
+
+ if (port == PORT_E)
+ val &= ~(DDIE_TRAINING_OVERRIDE_ENABLE |
+ DDIE_TRAINING_OVERRIDE_VALUE);
+ else
+ val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
+ DDI_TRAINING_OVERRIDE_VALUE);
+
+ I915_WRITE(CHICKEN_TRANS(transcoder), val);
+ }
+
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
@@ -2441,6 +2492,11 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
else
intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+
+ /* Enable hdcp if it's desired */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector));
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -2449,8 +2505,11 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp->link_trained = false;
+
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
@@ -2462,7 +2521,8 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_hdmi_handle_sink_scrambling(encoder,
old_conn_state->connector,
@@ -2473,6 +2533,8 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
+
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
else
@@ -2493,7 +2555,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
uint32_t val;
bool wait = false;
@@ -2534,24 +2596,31 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc)
+static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- u32 temp;
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return false;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- return true;
- }
- return false;
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
+ return false;
+
+ return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+ AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+}
+
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state)
+{
+ if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
@@ -2604,12 +2673,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
case TRANS_DDI_MODE_SELECT_FDI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2619,7 +2699,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+ intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
@@ -2646,6 +2726,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+}
+
+static enum intel_output_type
+intel_ddi_compute_output_type(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ switch (conn_state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ return INTEL_OUTPUT_HDMI;
+ case DRM_MODE_CONNECTOR_eDP:
+ return INTEL_OUTPUT_EDP;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ return INTEL_OUTPUT_DP;
+ default:
+ MISSING_CASE(conn_state->connector->connector_type);
+ return INTEL_OUTPUT_UNUSED;
+ }
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
@@ -2653,24 +2753,22 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int type = encoder->type;
- int port = intel_ddi_get_encoder_port(encoder);
+ enum port port = encoder->port;
int ret;
- WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
-
if (port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (type == INTEL_OUTPUT_HDMI)
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
- bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
- pipe_config->lane_count);
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return ret;
@@ -2685,7 +2783,7 @@ static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@@ -2700,11 +2798,155 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
+static int modeset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto out;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto out;
+
+ return 0;
+
+ out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+static int intel_hdmi_reset_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ u8 config;
+ int ret;
+
+ if (!connector || connector->base.status != connector_status_connected)
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+
+ if (!crtc_state->base.active)
+ return 0;
+
+ if (!crtc_state->hdmi_high_tmds_clock_ratio &&
+ !crtc_state->hdmi_scrambling)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) ==
+ crtc_state->hdmi_high_tmds_clock_ratio &&
+ !!(config & SCDC_SCRAMBLING_ENABLE) ==
+ crtc_state->hdmi_scrambling)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return modeset_pipe(&crtc->base, ctx);
+}
+
+static bool intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ bool changed;
+ int ret;
+
+ changed = intel_encoder_hotplug(encoder, connector);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA)
+ ret = intel_hdmi_reset_link(encoder, &ctx);
+ else
+ ret = intel_dp_retrain_link(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+
+ return changed;
+}
+
static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@@ -2716,39 +2958,73 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
+{
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+
+ if (dport->base.port != PORT_A)
+ return false;
+
+ if (dport->saved_port_bits & DDI_A_4_LANES)
+ return false;
+
+ /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
+ * supported configuration
+ */
+ if (IS_GEN9_LP(dev_priv))
+ return true;
+
+ /* Cannonlake: Most of SKUs don't support DDI_E, and the only
+ * one who does also have a full A/E split called
+ * DDI_F what makes DDI_E useless. However for this
+ * case let's trust VBT info.
+ */
+ if (IS_CANNONLAKE(dev_priv) &&
+ !intel_bios_is_port_present(dev_priv, PORT_E))
+ return true;
+
+ return false;
+}
+
+static int
+intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev);
+ enum port port = intel_dport->base.port;
+ int max_lanes = 4;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return max_lanes;
+
+ if (port == PORT_A || port == PORT_E) {
+ if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ max_lanes = port == PORT_A ? 4 : 0;
+ else
+ /* Both A and E share 2 lanes */
+ max_lanes = 2;
+ }
+
+ /*
+ * Some BIOS might fail to set this bit on port A if eDP
+ * wasn't lit up at boot. Force this bit set when needed
+ * so we use the proper lane count for our calculations.
+ */
+ if (intel_ddi_a_force_4_lanes(intel_dport)) {
+ DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
+ intel_dport->saved_port_bits |= DDI_A_4_LANES;
+ max_lanes = 4;
+ }
+
+ return max_lanes;
+}
+
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- int max_lanes;
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
- switch (port) {
- case PORT_A:
- max_lanes = 4;
- break;
- case PORT_E:
- max_lanes = 0;
- break;
- default:
- max_lanes = 4;
- break;
- }
- } else {
- switch (port) {
- case PORT_A:
- max_lanes = 2;
- break;
- case PORT_E:
- max_lanes = 2;
- break;
- default:
- max_lanes = 4;
- break;
- }
- }
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@@ -2782,6 +3058,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
+ intel_encoder->hotplug = intel_ddi_hotplug;
+ intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
if (IS_GEN9_LP(dev_priv))
@@ -2793,11 +3071,20 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
+ intel_encoder->type = INTEL_OUTPUT_DDI;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->port = port;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = 0;
- intel_dig_port->port = port;
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
- (DDI_BUF_PORT_REVERSAL |
- DDI_A_4_LANES);
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
+ else
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+ intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
switch (port) {
case PORT_A:
@@ -2820,33 +3107,14 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_E_IO;
break;
+ case PORT_F:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_F_IO;
+ break;
default:
MISSING_CASE(port);
}
- /*
- * Bspec says that DDI_A_4_LANES is the only supported configuration
- * for Broxton. Yet some BIOS fail to set this bit on port A if eDP
- * wasn't lit up at boot. Force this bit on in our internal
- * configuration so that we use the proper lane count for our
- * calculations.
- */
- if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
- if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
- DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
- intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
- max_lanes = 4;
- }
- }
-
- intel_dig_port->max_lanes = max_lanes;
-
- intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = 0;
-
intel_infoframe_init(intel_dig_port);
if (init_dp) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 875d428..3dd350f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -22,6 +22,9 @@
*
*/
+#include <drm/drm_print.h>
+
+#include "intel_device_info.h"
#include "i915_drv.h"
#define PLATFORM_NAME(x) [INTEL_##x] = #x
@@ -53,6 +56,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(GEMINILAKE),
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
+ PLATFORM_NAME(ICELAKE),
};
#undef PLATFORM_NAME
@@ -67,37 +71,153 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump(struct drm_i915_private *dev_priv)
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p)
{
- const struct intel_device_info *info = &dev_priv->info;
-
- DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
- intel_platform_name(info->platform),
- info->gen,
- dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision);
-#define PRINT_FLAG(name) \
- DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
}
+static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ int s;
+
+ drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
+ drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+ drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) {
+ drm_printf(p, "slice%d %u subslices mask=%04x\n",
+ s, hweight8(sseu->subslice_mask[s]),
+ sseu->subslice_mask[s]);
+ }
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ sseu_dump(&info->sseu, p);
+
+ drm_printf(p, "CS timestamp frequency: %u kHz\n",
+ info->cs_timestamp_frequency_khz);
+}
+
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
+
+ drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(info->platform),
+ info->gen);
+
+ intel_device_info_dump_flags(info, p);
+}
+
+void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ int s, ss;
+
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ return;
+ }
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ s, hweight8(sseu->subslice_mask[s]),
+ sseu->subslice_mask[s]);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, s, ss);
+
+ drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
+ ss, hweight16(enabled_eus), enabled_eus);
+ }
+ }
+}
+
+static u16 compute_eu_total(const struct sseu_dev_info *sseu)
+{
+ u16 i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
+ total += hweight8(sseu->eu_mask[i]);
+
+ return total;
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const u32 fuse2 = I915_READ(GEN8_FUSE2);
+ int s, ss;
+ const int eu_mask = 0xff;
+ u32 subslice_mask, eu_en;
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->subslice_mask = (1 << 4) - 1;
- sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
+ sseu->max_slices = 6;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
- sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0));
- sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1));
- sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2));
- sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) &
- GEN10_EU_DIS_SS_MASK));
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ sseu->subslice_mask[0] = subslice_mask;
+ for (s = 1; s < sseu->max_slices; s++)
+ sseu->subslice_mask[s] = subslice_mask & 0x3;
+
+ /* Slice0 */
+ eu_en = ~I915_READ(GEN8_EU_DISABLE0);
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
+ /* Slice1 */
+ sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN8_EU_DISABLE1);
+ sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
+ /* Slice2 */
+ sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
+ /* Slice3 */
+ sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN8_EU_DISABLE2);
+ sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
+ /* Slice4 */
+ sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
+ sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
+ /* Slice5 */
+ sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
+ eu_en = ~I915_READ(GEN10_EU_DISABLE3);
+ sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
+
+ /* Do a second pass where we mark the subslices disabled if all their
+ * eus are off.
+ */
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ if (sseu_get_eus(sseu, s, ss) == 0)
+ sseu->subslice_mask[s] &= ~BIT(ss);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
/*
* CNL is expected to always have a uniform distribution
@@ -118,26 +238,39 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
- u32 fuse, eu_dis;
+ u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
+ sseu->max_slices = 1;
+ sseu->max_subslices = 2;
+ sseu->max_eus_per_subslice = 8;
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- sseu->subslice_mask |= BIT(0);
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK);
- sseu->eu_total += 8 - hweight32(eu_dis);
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
+
+ sseu->subslice_mask[0] |= BIT(0);
+ sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- sseu->subslice_mask |= BIT(1);
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- sseu->eu_total += 8 - hweight32(eu_dis);
+ u8 disabled_mask =
+ ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
+ (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
+ CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
+
+ sseu->subslice_mask[0] |= BIT(1);
+ sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* CHV expected to always have a uniform distribution of EU
* across subslices.
@@ -159,41 +292,52 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct sseu_dev_info *sseu = &info->sseu;
- int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable;
- u8 eu_mask = 0xff;
+ u32 fuse2, eu_disable, subslice_mask;
+ const u8 eu_mask = 0xff;
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ /* BXT has a single slice and at most 3 subslices. */
+ sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
+ sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
+ sseu->max_eus_per_subslice = 8;
+
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- sseu->subslice_mask = (1 << ss_max) - 1;
- sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT);
+ subslice_mask = (1 << sseu->max_subslices) - 1;
+ subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < sseu->max_slices; s++) {
if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
+ sseu->subslice_mask[s] = subslice_mask;
+
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
- for (ss = 0; ss < ss_max; ss++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
+ u8 eu_disabled_mask;
- if (!(sseu->subslice_mask & BIT(ss)))
+ if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */
continue;
- eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
- eu_mask);
+ eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ eu_per_ss = sseu->max_eus_per_subslice -
+ hweight8(eu_disabled_mask);
/*
* Record which subslice(s) has(have) 7 EUs. we
@@ -202,11 +346,11 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
*/
if (eu_per_ss == 7)
sseu->subslice_7eu[s] |= BIT(ss);
-
- sseu->eu_total += eu_per_ss;
}
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* SKL is expected to always have a uniform distribution
* of EU across subslices with the exception that any one
@@ -232,18 +376,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) {
-#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
-
- /*
- * There is a HW issue in 2x6 fused down parts that requires
- * Pooled EU to be enabled as a WA. The pool configuration
- * changes depending upon which subslice is fused down. This
- * doesn't affect if the device has all 3 subslices enabled.
- */
- /* WaEnablePooledEuFor2x6:bxt */
- info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
@@ -261,19 +395,22 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
- const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable[3]; /* s_max */
+ u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ sseu->max_slices = 3;
+ sseu->max_subslices = 3;
+ sseu->max_eus_per_subslice = 8;
+
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- sseu->subslice_mask = GENMASK(ss_max - 1, 0);
- sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
- GEN8_F2_SS_DIS_SHIFT);
+ subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
+ subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -287,30 +424,38 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
- for (s = 0; s < s_max; s++) {
+ for (s = 0; s < sseu->max_slices; s++) {
if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
- for (ss = 0; ss < ss_max; ss++) {
+ sseu->subslice_mask[s] = subslice_mask;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask & BIT(ss)))
+ if (!(sseu->subslice_mask[ss] & BIT(ss)))
/* skip disabled subslice */
continue;
- n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+ eu_disabled_mask =
+ eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
+
+ sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
+
+ n_disabled = hweight8(eu_disabled_mask);
/*
* Record which subslices have 7 EUs.
*/
- if (eu_max - n_disabled == 7)
+ if (sseu->max_eus_per_subslice - n_disabled == 7)
sseu->subslice_7eu[s] |= 1 << ss;
-
- sseu->eu_total += eu_max - n_disabled;
}
}
+ sseu->eu_total = compute_eu_total(sseu);
+
/*
* BDW is expected to always have a uniform distribution of EU across
* subslices with the exception that any one EU in any one subslice may
@@ -329,7 +474,177 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
-/*
+static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct sseu_dev_info *sseu = &info->sseu;
+ u32 fuse1;
+ int s, ss;
+
+ /*
+ * There isn't a register to tell us how many slices/subslices. We
+ * work off the PCI-ids here.
+ */
+ switch (info->gt) {
+ default:
+ MISSING_CASE(info->gt);
+ /* fall through */
+ case 1:
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] = BIT(0);
+ break;
+ case 2:
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ break;
+ case 3:
+ sseu->slice_mask = BIT(0) | BIT(1);
+ sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ break;
+ }
+
+ sseu->max_slices = hweight8(sseu->slice_mask);
+ sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
+
+ fuse1 = I915_READ(HSW_PAVP_FUSE1);
+ switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ default:
+ MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
+ HSW_F1_EU_DIS_SHIFT);
+ /* fall through */
+ case HSW_F1_EU_DIS_10EUS:
+ sseu->eu_per_subslice = 10;
+ break;
+ case HSW_F1_EU_DIS_8EUS:
+ sseu->eu_per_subslice = 8;
+ break;
+ case HSW_F1_EU_DIS_6EUS:
+ sseu->eu_per_subslice = 6;
+ break;
+ }
+ sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ sseu_set_eus(sseu, s, ss,
+ (1UL << sseu->eu_per_subslice) - 1);
+ }
+ }
+
+ sseu->eu_total = compute_eu_total(sseu);
+
+ /* No powergating for you. */
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
+}
+
+static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
+{
+ u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
+ base_freq *= 1000;
+
+ frac_freq = ((ts_override &
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
+ GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
+ frac_freq = 1000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
+{
+ u32 f12_5_mhz = 12500;
+ u32 f19_2_mhz = 19200;
+ u32 f24_mhz = 24000;
+
+ if (INTEL_GEN(dev_priv) <= 4) {
+ /* PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
+ */
+ return dev_priv->rawclk_freq / 16;
+ } else if (INTEL_GEN(dev_priv) <= 8) {
+ /* PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return f12_5_mhz;
+ } else if (INTEL_GEN(dev_priv) <= 9) {
+ u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(dev_priv);
+ } else {
+ freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
+
+ /* Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+ } else if (INTEL_GEN(dev_priv) <= 10) {
+ u32 ctc_reg = I915_READ(CTC_MODE);
+ u32 freq = 0;
+ u32 rpm_config_reg = 0;
+
+ /* First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(dev_priv);
+ } else {
+ u32 crystal_clock;
+
+ rpm_config_reg = I915_READ(RPM_CONFIG0);
+ crystal_clock = (rpm_config_reg &
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
+ GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
+ switch (crystal_clock) {
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ freq = f19_2_mhz;
+ break;
+ case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ freq = f24_mhz;
+ break;
+ }
+
+ /* Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((rpm_config_reg &
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+ }
+
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
+ return 0;
+}
+
+/**
+ * intel_device_info_runtime_init - initialize runtime info
+ * @info: intel device info struct
+ *
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
@@ -342,17 +657,24 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+void intel_device_info_runtime_init(struct intel_device_info *info)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
enum pipe pipe;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ for_each_pipe(dev_priv, pipe)
+ info->num_scalers[pipe] = 2;
+ } else if (INTEL_GEN(dev_priv) == 9) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
}
+ BUILD_BUG_ON(I915_NUM_ENGINES >
+ sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -438,7 +760,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
}
/* Initialize slice/subslice/EU info */
- if (IS_CHERRYVIEW(dev_priv))
+ if (IS_HASWELL(dev_priv))
+ haswell_sseu_info_init(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
@@ -447,19 +771,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 10)
gen10_sseu_info_init(dev_priv);
- DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
- DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
- DRM_DEBUG_DRIVER("subslice total: %u\n",
- sseu_subslice_total(&info->sseu));
- DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n",
- hweight8(info->sseu.subslice_mask));
- DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
- DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->sseu.has_slice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->sseu.has_subslice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->sseu.has_eu_pg ? "y" : "n");
+ /* Initialize command stream timestamp frequency */
+ info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+}
+
+void intel_driver_caps_print(const struct intel_driver_caps *caps,
+ struct drm_printer *p)
+{
+ drm_printf(p, "scheduler: %x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
new file mode 100644
index 0000000..0835752
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DEVICE_INFO_H_
+#define _INTEL_DEVICE_INFO_H_
+
+#include "intel_display.h"
+
+struct drm_printer;
+struct drm_i915_private;
+
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ /* gen2 */
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ /* gen3 */
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ /* gen4 */
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ /* gen5 */
+ INTEL_IRONLAKE,
+ /* gen6 */
+ INTEL_SANDYBRIDGE,
+ /* gen7 */
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ /* gen8 */
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ /* gen9 */
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+ INTEL_COFFEELAKE,
+ /* gen10 */
+ INTEL_CANNONLAKE,
+ /* gen11 */
+ INTEL_ICELAKE,
+ INTEL_MAX_PLATFORMS
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ func(is_mobile); \
+ func(is_lp); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_reset_engine); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_guc_ct); \
+ func(has_hotplug); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_logical_ring_elsq); \
+ func(has_logical_ring_preemption); \
+ func(has_overlay); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(unfenced_needs_alignment); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_ipc);
+
+#define GEN_MAX_SLICES (6) /* CNL upper bound */
+#define GEN_MAX_SUBSLICES (7)
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask[GEN_MAX_SUBSLICES];
+ u16 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+
+ /* Topology fields */
+ u8 max_slices;
+ u8 max_subslices;
+ u8 max_eus_per_subslice;
+
+ /* We don't have more than 8 eus per subslice at the moment and as we
+ * store eus enabled using bits, no need to multiply by eus per
+ * subslice.
+ */
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+};
+
+typedef u8 intel_ring_mask_t;
+
+struct intel_device_info {
+ u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ intel_ring_mask_t ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
+ u32 display_mmio_offset;
+
+ u8 num_pipes;
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ u16 ddb_size; /* in blocks */
+
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+
+ u32 cs_timestamp_frequency_khz;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
+};
+
+struct intel_driver_caps {
+ unsigned int scheduler;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ unsigned int i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
+ total += hweight8(sseu->subslice_mask[i]);
+
+ return total;
+}
+
+static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
+ int slice, int subslice)
+{
+ int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice,
+ BITS_PER_BYTE);
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ return slice * slice_stride + subslice * subslice_stride;
+}
+
+static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
+ int slice, int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
+ (i * BITS_PER_BYTE);
+ }
+
+ return eu_mask;
+}
+
+static inline void sseu_set_eus(struct sseu_dev_info *sseu,
+ int slice, int subslice, u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0;
+ i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+}
+
+const char *intel_platform_name(enum intel_platform platform);
+
+void intel_device_info_runtime_init(struct intel_device_info *info);
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
+
+void intel_driver_caps_print(const struct intel_driver_caps *caps,
+ struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 50f8443..3b48fd2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -219,10 +219,8 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
- else if (IS_GEN5(dev_priv))
- return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
else
- return 270000;
+ return dev_priv->fdi_pll_freq;
}
static const struct intel_limit intel_limits_i8xx_dac = {
@@ -491,7 +489,7 @@ static const struct intel_limit intel_limits_bxt = {
};
static bool
-needs_modeset(struct drm_crtc_state *state)
+needs_modeset(const struct drm_crtc_state *state)
{
return drm_atomic_crtc_needs_modeset(state);
}
@@ -560,11 +558,11 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
-/**
+
+/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-
static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
const struct intel_limit *limit,
const struct dpll *clock)
@@ -1040,28 +1038,14 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
wait_for_pipe_scanline_moving(crtc, true);
}
-/*
- * intel_wait_for_pipe_off - wait for pipe to turn off
- * @crtc: crtc whose pipe to wait for
- *
- * After disabling a pipe, we can't wait for vblank in the usual way,
- * spinning on the vblank interrupt status bit, since we won't actually
- * see an interrupt when the pipe is disabled.
- *
- * On Gen4 and above:
- * wait for the pipe register state bit to turn off
- *
- * Otherwise:
- * wait for the display line value to settle (it usually
- * ends up stopping at the start of the next frame).
- *
- */
-static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
+static void
+intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 4) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
@@ -1653,7 +1637,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
u32 port_mask;
i915_reg_t dpll_reg;
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@@ -1675,7 +1659,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
+ port_name(dport->base.port),
+ I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1823,27 +1808,18 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- WARN_ON(!crtc->config->has_pch_encoder);
-
if (HAS_PCH_LPT(dev_priv))
return PIPE_A;
else
return crtc->pipe;
}
-/**
- * intel_enable_pipe - enable a pipe, asserting requirements
- * @crtc: crtc responsible for the pipe
- *
- * Enable @crtc's pipe, making sure that various hardware specific requirements
- * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
- */
-static void intel_enable_pipe(struct intel_crtc *crtc)
+static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
i915_reg_t reg;
u32 val;
@@ -1857,12 +1833,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv)) {
- if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
+ if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
} else {
- if (crtc->config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv,
intel_crtc_pch_transcoder(crtc));
@@ -1890,24 +1866,15 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* when it's derived from the timestamps. So let's wait for the
* pipe to start properly before we call drm_crtc_vblank_on()
*/
- if (dev->max_vblank_count == 0)
+ if (dev_priv->drm.max_vblank_count == 0)
intel_wait_for_pipe_scanline_moving(crtc);
}
-/**
- * intel_disable_pipe - disable a pipe, asserting requirements
- * @crtc: crtc whose pipes is to be disabled
- *
- * Disable the pipe of @crtc, making sure that various hardware
- * specific requirements are met, if applicable, e.g. plane
- * disabled, panel fitter off, etc.
- *
- * Will wait until the pipe has shut down before returning.
- */
-static void intel_disable_pipe(struct intel_crtc *crtc)
+static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val;
@@ -1929,7 +1896,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
- if (crtc->config->double_wide)
+ if (old_crtc_state->double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
@@ -1938,7 +1905,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
I915_WRITE(reg, val);
if ((val & PIPECONF_ENABLE) == 0)
- intel_wait_for_pipe_off(crtc);
+ intel_wait_for_pipe_off(old_crtc_state);
}
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
@@ -2062,12 +2029,12 @@ static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_pr
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
- else if (INTEL_INFO(dev_priv)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
return 4 * 1024;
else
return 0;
@@ -2100,14 +2067,26 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
}
}
+static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
+}
+
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation,
+ bool uses_fence,
+ unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
+ unsigned int pinctl;
u32 alignment;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -2135,11 +2114,26 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+ pinctl = 0;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ vma = i915_gem_object_pin_to_display_plane(obj,
+ alignment, &view, pinctl);
if (IS_ERR(vma))
goto err;
- if (i915_vma_is_map_and_fenceable(vma)) {
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ int ret;
+
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
@@ -2156,7 +2150,15 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* something and try to run the system in a "less than optimal"
* mode that matches the user configuration.
*/
- i915_vma_pin_fence(vma);
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
+ i915_gem_object_unpin_from_display_plane(vma);
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ if (ret == 0 && vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
}
i915_vma_get(vma);
@@ -2167,11 +2169,12 @@ err:
return vma;
}
-void intel_unpin_fb_vma(struct i915_vma *vma)
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- i915_vma_unpin_fence(vma);
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
@@ -2420,6 +2423,20 @@ static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
}
}
+/*
+ * From the Sky Lake PRM:
+ * "The Color Control Surface (CCS) contains the compression status of
+ * the cache-line pairs. The compression state of the cache-line pair
+ * is specified by 2 bits in the CCS. Each CCS cache-line represents
+ * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
+ * cache-line-pairs. CCS is always Y tiled."
+ *
+ * Since cache line pairs refers to horizontally adjacent cache lines,
+ * each cache line in the CCS corresponds to an area of 32x16 cache
+ * lines on the main surface. Since each pixel is 4 bytes, this gives
+ * us a ratio of one byte in the CCS for each 8x16 pixels in the
+ * main surface.
+ */
static const struct drm_format_info ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
@@ -2672,7 +2689,6 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2688,7 +2704,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > ggtt->stolen_usable_size)
+ if (size_aligned * 2 > dev_priv->stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -2828,7 +2844,10 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
valid_fb:
mutex_lock(&dev->struct_mutex);
intel_state->vma =
- intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ intel_pin_and_fence_fb_obj(fb,
+ primary->state->rotation,
+ intel_plane_uses_fence(intel_state),
+ &intel_state->flags);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
@@ -2951,14 +2970,19 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
return true;
}
-static int skl_check_main_surface(struct intel_plane_state *plane_state)
+static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
+ int dst_x = plane_state->base.dst.x1;
+ int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096;
u32 alignment, offset, aux_offset = plane_state->aux.offset;
@@ -2969,6 +2993,24 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
+ /*
+ * Display WA #1175: cnl,glk
+ * Planes other than the cursor may cause FIFO underflow and display
+ * corruption if starting less than 4 pixels from the right edge of
+ * the screen.
+ * Besides the above WA fix the similar problem, where planes other
+ * than the cursor ending less than 4 pixels from the left edge of the
+ * screen may cause FIFO underflow and display corruption.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+ DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
+ dst_x + w < 4 ? "end" : "start",
+ dst_x + w < 4 ? dst_x + w : dst_x,
+ 4, pipe_src_w - 4);
+ return -ERANGE;
+ }
+
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
@@ -3060,8 +3102,6 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb;
int src_x = plane_state->base.src.x1 >> 16;
int src_y = plane_state->base.src.y1 >> 16;
@@ -3071,20 +3111,6 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int y = src_y / vsub;
u32 offset;
- switch (plane->id) {
- case PLANE_PRIMARY:
- case PLANE_SPRITE0:
- break;
- default:
- DRM_DEBUG_KMS("RC support only on plane 1 and 2\n");
- return -EINVAL;
- }
-
- if (crtc->pipe == PIPE_C) {
- DRM_DEBUG_KMS("No RC support on pipe C\n");
- return -EINVAL;
- }
-
if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
plane_state->base.rotation);
@@ -3101,12 +3127,19 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
return 0;
}
-int skl_check_plane_surface(struct intel_plane_state *plane_state)
+int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
if (!plane_state->base.visible)
return 0;
@@ -3135,7 +3168,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
plane_state->aux.y = 0;
}
- ret = skl_check_main_surface(plane_state);
+ ret = skl_check_main_surface(crtc_state, plane_state);
if (ret)
return ret;
@@ -3161,7 +3194,7 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(dev_priv) < 5)
dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
switch (fb->format->format) {
@@ -3241,16 +3274,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_update_primary_plane(struct intel_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static void i9xx_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane plane = primary->plane;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(plane);
+ i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->main.x;
int y = plane_state->main.y;
unsigned long irqflags;
@@ -3269,34 +3302,34 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- I915_WRITE_FW(DSPSIZE(plane),
+ I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
- I915_WRITE_FW(PRIMSIZE(plane),
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+ } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(plane), 0);
- I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+ I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
I915_WRITE_FW(reg, dspcntr);
- I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(plane),
+ I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
+ I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE_FW(DSPSURF(plane),
+ I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
} else {
- I915_WRITE_FW(DSPADDR(plane),
+ I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
}
@@ -3305,32 +3338,31 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i9xx_disable_primary_plane(struct intel_plane *primary,
- struct intel_crtc *crtc)
+static void i9xx_disable_plane(struct intel_plane *plane,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- enum plane plane = primary->plane;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(plane), 0);
- if (INTEL_INFO(dev_priv)->gen >= 4)
- I915_WRITE_FW(DSPSURF(plane), 0);
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
+ if (INTEL_GEN(dev_priv) >= 4)
+ I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
- I915_WRITE_FW(DSPADDR(plane), 0);
- POSTING_READ_FW(DSPCNTR(plane));
+ I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+ POSTING_READ_FW(DSPCNTR(i9xx_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
{
-
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
- enum plane plane = primary->plane;
- enum pipe pipe = primary->pipe;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe = plane->pipe;
bool ret;
/*
@@ -3342,7 +3374,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+ ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
intel_display_power_put(dev_priv, power_domain);
@@ -3415,20 +3447,11 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
case DRM_FORMAT_RGB565:
return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888;
- /*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
- case DRM_FORMAT_ABGR8888:
- return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_ARGB8888:
- return PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
@@ -3448,6 +3471,33 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
+/*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
+static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+}
+
+static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ default:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+}
+
static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
@@ -3470,9 +3520,9 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-static u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
{
- switch (rotation) {
+ switch (rotate) {
case DRM_MODE_ROTATE_0:
break;
/*
@@ -3486,7 +3536,22 @@ static u32 skl_plane_ctl_rotation(unsigned int rotation)
case DRM_MODE_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
- MISSING_CASE(rotation);
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
}
return 0;
@@ -3504,16 +3569,27 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+ if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
}
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
@@ -3523,6 +3599,30 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+
+ if (intel_format_is_yuv(fb->format->format)) {
+ if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+ else
+ plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ return plane_color_ctl;
+}
+
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -4465,7 +4565,7 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
- return enc_to_dig_port(&encoder->base)->port;
+ return encoder->port;
}
return -1;
@@ -4703,8 +4803,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
/**
* skl_update_scaler_plane - Stages update to scaler state for a given plane.
- *
- * @state: crtc's scaler state
+ * @crtc_state: crtc's scaler state
* @plane_state: atomic plane state to update
*
* Return
@@ -4734,7 +4833,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return ret;
/* check colorkey */
- if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+ if (plane_state->ckey.flags) {
DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
intel_plane->base.base.id,
intel_plane->base.name);
@@ -4816,12 +4915,13 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
}
}
-void hsw_enable_ips(struct intel_crtc *crtc)
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!crtc->config->ips_enabled)
+ if (!crtc_state->ips_enabled)
return;
/*
@@ -4829,8 +4929,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
-
- assert_plane_enabled(to_intel_plane(crtc->base.primary));
+ WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
@@ -4856,16 +4955,15 @@ void hsw_enable_ips(struct intel_crtc *crtc)
}
}
-void hsw_disable_ips(struct intel_crtc *crtc)
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!crtc->config->ips_enabled)
+ if (!crtc_state->ips_enabled)
return;
- assert_plane_enabled(to_intel_plane(crtc->base.primary));
-
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -4902,6 +5000,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
/**
* intel_post_enable_primary - Perform operations after enabling primary plane
* @crtc: the CRTC whose primary plane was just enabled
+ * @new_crtc_state: the enabling state
*
* Performs potentially sleeping operations that must be done after the primary
* plane is enabled, such as updating FBC and IPS. Note that this may be
@@ -4910,7 +5009,8 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
* completely hide the primary plane.
*/
static void
-intel_post_enable_primary(struct drm_crtc *crtc)
+intel_post_enable_primary(struct drm_crtc *crtc,
+ const struct intel_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4918,14 +5018,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
/*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_enable_ips(intel_crtc);
-
- /*
* Gen2 reports pipe underruns whenever all planes are disabled.
* So don't enable underrun reporting before at least some planes
* are enabled.
@@ -4940,9 +5032,9 @@ intel_post_enable_primary(struct drm_crtc *crtc)
intel_check_pch_fifo_underruns(dev_priv);
}
-/* FIXME move all this to pre_plane_update() with proper state tracking */
+/* FIXME get rid of this and use pre_plane_update */
static void
-intel_pre_disable_primary(struct drm_crtc *crtc)
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4951,32 +5043,12 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
- * So diasble underrun reporting before all the planes get disabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
+ * So disable underrun reporting before all the planes get disabled.
*/
if (IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
-
- intel_pre_disable_primary(crtc);
+ hsw_disable_ips(to_intel_crtc_state(crtc->state));
/*
* Vblank time updates from the shadow to live plane control register
@@ -4992,6 +5064,38 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
intel_wait_for_vblank(dev_priv, pipe);
}
+static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->ips_enabled)
+ return false;
+
+ if (needs_modeset(&new_crtc_state->base))
+ return true;
+
+ return !new_crtc_state->ips_enabled;
+}
+
+static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->ips_enabled)
+ return false;
+
+ if (needs_modeset(&new_crtc_state->base))
+ return true;
+
+ /*
+ * We can't read out IPS on broadwell, assume the worst and
+ * forcibly enable IPS on the first fastset.
+ */
+ if (new_crtc_state->update_pipe &&
+ old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ return true;
+
+ return !old_crtc_state->ips_enabled;
+}
+
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -5008,6 +5112,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(crtc);
+ if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
+ hsw_enable_ips(pipe_config);
+
if (old_pri_state) {
struct intel_plane_state *primary_state =
intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
@@ -5020,7 +5127,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (primary_state->base.visible &&
(needs_modeset(&pipe_config->base) ||
!old_primary_state->base.visible))
- intel_post_enable_primary(&crtc->base);
+ intel_post_enable_primary(&crtc->base, pipe_config);
}
}
@@ -5038,6 +5145,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
+ if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+ hsw_disable_ips(old_crtc_state);
+
if (old_pri_state) {
struct intel_plane_state *primary_state =
intel_atomic_get_new_plane_state(old_intel_state,
@@ -5046,10 +5156,13 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
to_intel_plane_state(old_pri_state);
intel_fbc_pre_update(crtc, pipe_config, primary_state);
-
- if (old_primary_state->base.visible &&
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (IS_GEN2(dev_priv) && old_primary_state->base.visible &&
(modeset || !primary_state->base.visible))
- intel_pre_disable_primary(&crtc->base);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
}
/*
@@ -5312,7 +5425,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
ironlake_pch_enable(pipe_config);
@@ -5352,6 +5465,20 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
}
+static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
+
+ /* Program B credit equally to all pipes */
+ val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+
+ I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5429,9 +5556,12 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_pipe_mbus_enable(intel_crtc);
+
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(pipe_config);
@@ -5497,7 +5627,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
ironlake_pfit_disable(intel_crtc, false);
@@ -5549,7 +5679,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
@@ -5605,6 +5735,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_D_LANES;
case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_LANES;
+ case PORT_F:
+ return POWER_DOMAIN_PORT_DDI_F_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -5625,8 +5757,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
if (!crtc_state->base.active)
return 0;
- mask = BIT(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -5638,7 +5770,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
- mask |= BIT(POWER_DOMAIN_AUDIO);
+ mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
mask |= BIT_ULL(POWER_DOMAIN_PLLS);
@@ -5727,7 +5859,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5786,7 +5918,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->config);
else
intel_update_watermarks(intel_crtc);
- intel_enable_pipe(intel_crtc);
+ intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5830,7 +5962,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ intel_disable_pipe(old_crtc_state);
i9xx_pfit_disable(intel_crtc);
@@ -5925,6 +6057,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
+ dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
}
/*
@@ -6179,18 +6312,20 @@ retry:
return ret;
}
-static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *pipe_config)
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- if (pipe_config->ips_force_disable)
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* IPS only exists on ULT machines and is tied to pipe A. */
+ if (!hsw_crtc_supports_ips(crtc))
return false;
- if (pipe_config->pipe_bpp > 24)
+ if (!i915_modparams.enable_ips)
return false;
- /* HSW can handle pixel rate up to cdclk? */
- if (IS_HASWELL(dev_priv))
- return true;
+ if (crtc_state->pipe_bpp > 24)
+ return false;
/*
* We compare against max which means we must take
@@ -6199,19 +6334,36 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
*
* Should measure whether using a lower cdclk w/o IPS
*/
- return pipe_config->pixel_rate <=
- dev_priv->max_cdclk_freq * 95 / 100;
+ if (IS_BROADWELL(dev_priv) &&
+ crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
+ return false;
+
+ return true;
}
-static void hsw_compute_ips_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(crtc_state->base.state);
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return false;
- pipe_config->ips_enabled = i915_modparams.enable_ips &&
- hsw_crtc_supports_ips(crtc) &&
- pipe_config_supports_ips(dev_priv, pipe_config);
+ if (crtc_state->ips_force_disable)
+ return false;
+
+ /* IPS should be fine as long as at least one plane is enabled. */
+ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+ return false;
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) &&
+ crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
+ return false;
+
+ return true;
}
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -6219,7 +6371,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return INTEL_INFO(dev_priv)->gen < 4 &&
+ return INTEL_GEN(dev_priv) < 4 &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -6316,9 +6468,18 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
- pipe_config->pipe_src_w &= ~1;
+ if (pipe_config->pipe_src_w & 1) {
+ if (pipe_config->double_wide) {
+ DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+ return -EINVAL;
+ }
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(dev)) {
+ DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+ return -EINVAL;
+ }
+ }
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
@@ -6329,9 +6490,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (HAS_IPS(dev_priv))
- hsw_compute_ips_config(crtc, pipe_config);
-
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
@@ -7388,15 +7546,16 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe = crtc->pipe;
u32 val, base, offset;
- int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- val = I915_READ(DSPCNTR(plane));
- if (!(val & DISPLAY_PLANE_ENABLE))
+ if (!plane->get_hw_state(plane))
return;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -7409,6 +7568,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
+ val = I915_READ(DSPCNTR(i9xx_plane));
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -7420,14 +7581,17 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = i9xx_format_to_fourcc(pixel_format);
fb->format = drm_format_info(fourcc);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ offset = I915_READ(DSPOFFSET(i9xx_plane));
+ base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(plane));
+ offset = I915_READ(DSPTILEOFF(i9xx_plane));
else
- offset = I915_READ(DSPLINOFF(plane));
- base = I915_READ(DSPSURF(plane)) & 0xfffff000;
+ offset = I915_READ(DSPLINOFF(i9xx_plane));
+ base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
} else {
- base = I915_READ(DSPADDR(plane));
+ base = I915_READ(DSPADDR(i9xx_plane));
}
plane_config->base = base;
@@ -7435,15 +7599,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
- val = I915_READ(DSPSTRIDE(pipe));
+ val = I915_READ(DSPSTRIDE(i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), plane, fb->width, fb->height,
+ DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
@@ -7619,7 +7783,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
- if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+ if (encoder->port == PORT_A)
has_cpu_edp = true;
break;
default:
@@ -8094,7 +8258,7 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *config = intel_crtc->config;
- if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 val = 0;
switch (intel_crtc->config->pipe_bpp) {
@@ -8412,13 +8576,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, base, offset, stride_mult, tiling;
- int pipe = crtc->pipe;
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = crtc->pipe;
+ u32 val, base, offset, stride_mult, tiling, alpha;
int fourcc, pixel_format;
unsigned int aligned_height;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
+ if (!plane->get_hw_state(plane))
+ return;
+
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8429,14 +8598,22 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(PLANE_CTL(pipe, 0));
- if (!(val & PLANE_CTL_ENABLE))
- goto error;
+ val = I915_READ(PLANE_CTL(pipe, plane_id));
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
+ else
+ pixel_format = val & PLANE_CTL_FORMAT_MASK;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+ alpha &= PLANE_COLOR_ALPHA_MASK;
+ } else {
+ alpha = val & PLANE_CTL_ALPHA_MASK;
+ }
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ val & PLANE_CTL_ORDER_RGBX, alpha);
fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
@@ -8465,16 +8642,16 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
+ base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
- offset = I915_READ(PLANE_OFFSET(pipe, 0));
+ offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
- val = I915_READ(PLANE_SIZE(pipe, 0));
+ val = I915_READ(PLANE_SIZE(pipe, plane_id));
fb->height = ((val >> 16) & 0xfff) + 1;
fb->width = ((val >> 0) & 0x1fff) + 1;
- val = I915_READ(PLANE_STRIDE(pipe, 0));
+ val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = intel_fb_stride_alignment(fb, 0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -8482,8 +8659,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), fb->width, fb->height,
+ DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
@@ -8518,74 +8695,6 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
}
}
-static void
-ironlake_get_initial_plane_config(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, base, offset;
- int pipe = crtc->pipe;
- int fourcc, pixel_format;
- unsigned int aligned_height;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
-
- val = I915_READ(DSPCNTR(pipe));
- if (!(val & DISPLAY_PLANE_ENABLE))
- return;
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
- return;
- }
-
- fb = &intel_fb->base;
-
- fb->dev = dev;
-
- if (INTEL_GEN(dev_priv) >= 4) {
- if (val & DISPPLANE_TILED) {
- plane_config->tiling = I915_TILING_X;
- fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
- }
-
- pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
- fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
-
- base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = I915_READ(DSPOFFSET(pipe));
- } else {
- if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(pipe));
- else
- offset = I915_READ(DSPLINOFF(pipe));
- }
- plane_config->base = base;
-
- val = I915_READ(PIPESRC(pipe));
- fb->width = ((val >> 16) & 0xfff) + 1;
- fb->height = ((val >> 0) & 0xfff) + 1;
-
- val = I915_READ(DSPSTRIDE(pipe));
- fb->pitches[0] = val & 0xffffffc0;
-
- aligned_height = intel_fb_align_height(fb, 0, fb->height);
-
- plane_config->size = fb->pitches[0] * aligned_height;
-
- DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- pipe_name(pipe), fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
-
- plane_config->fb = intel_fb;
-}
-
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8843,7 +8952,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@@ -9217,9 +9328,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
}
- if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
- (I915_READ(IPS_CTL) & IPS_ENABLE);
+ if (hsw_crtc_supports_ips(crtc)) {
+ if (IS_HASWELL(dev_priv))
+ pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+ else {
+ /*
+ * We cannot readout IPS state on broadwell, set to
+ * true so we can set it to a defined state on first
+ * commit.
+ */
+ pipe_config->ips_enabled = true;
+ }
+ }
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
@@ -9300,11 +9420,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
u32 offset;
int ret;
- ret = drm_plane_helper_check_state(&plane_state->base,
- &plane_state->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(&plane_state->base,
+ &crtc_state->base,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
if (ret)
return ret;
@@ -9475,7 +9595,8 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
@@ -9706,111 +9827,27 @@ err:
return ERR_PTR(ret);
}
-static u32
-intel_framebuffer_pitch_for_width(int width, int bpp)
-{
- u32 pitch = DIV_ROUND_UP(width * bpp, 8);
- return ALIGN(pitch, 64);
-}
-
-static u32
-intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
-{
- u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return PAGE_ALIGN(pitch * mode->vdisplay);
-}
-
-static struct drm_framebuffer *
-intel_framebuffer_create_for_mode(struct drm_device *dev,
- const struct drm_display_mode *mode,
- int depth, int bpp)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- obj = i915_gem_object_create(to_i915(dev),
- intel_framebuffer_size_for_mode(mode, bpp));
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- mode_cmd.width = mode->hdisplay;
- mode_cmd.height = mode->vdisplay;
- mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
- bpp);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
-
- return fb;
-}
-
-static struct drm_framebuffer *
-mode_fits_in_fbdev(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj;
- struct drm_framebuffer *fb;
-
- if (!dev_priv->fbdev)
- return NULL;
-
- if (!dev_priv->fbdev->fb)
- return NULL;
-
- obj = dev_priv->fbdev->fb->obj;
- BUG_ON(!obj);
-
- fb = &dev_priv->fbdev->fb->base;
- if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->format->cpp[0] * 8))
- return NULL;
-
- if (obj->base.size < mode->vdisplay * fb->pitches[0])
- return NULL;
-
- drm_framebuffer_get(fb);
- return fb;
-#else
- return NULL;
-#endif
-}
-
-static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_framebuffer *fb,
- int x, int y)
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
+ struct drm_plane *plane;
struct drm_plane_state *plane_state;
- int hdisplay, vdisplay;
- int ret;
-
- plane_state = drm_atomic_get_plane_state(state, crtc->primary);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- if (mode)
- drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
- else
- hdisplay = vdisplay = 0;
+ int ret, i;
- ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = hdisplay;
- plane_state->crtc_h = vdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_w = hdisplay << 16;
- plane_state->src_h = vdisplay << 16;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
return 0;
}
@@ -9828,7 +9865,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
@@ -9896,10 +9932,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
found:
intel_crtc = to_intel_crtc(crtc);
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
state = drm_atomic_state_alloc(dev);
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
@@ -9931,39 +9963,17 @@ found:
if (!mode)
mode = &load_detect_mode;
- /* We need a framebuffer large enough to accommodate all accesses
- * that the plane may generate whilst we perform load detection.
- * We can not rely on the fbcon either being present (we get called
- * during its initialisation to detect all boot displays, or it may
- * not even exist) or that it is large enough to satisfy the
- * requested mode.
- */
- fb = mode_fits_in_fbdev(dev, mode);
- if (fb == NULL) {
- DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- } else
- DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- ret = PTR_ERR(fb);
- goto fail;
- }
-
- ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
- drm_framebuffer_put(fb);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = intel_modeset_disable_planes(state, crtc);
if (ret)
goto fail;
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
- if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
@@ -10472,6 +10482,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
pipe_config);
}
+ if (HAS_IPS(dev_priv))
+ pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+
return ret;
}
@@ -10601,7 +10614,7 @@ static const char * const output_type_str[] = {
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
- OUTPUT_TYPE(UNKNOWN),
+ OUTPUT_TYPE(DDI),
OUTPUT_TYPE(DP_MST),
};
@@ -10748,6 +10761,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
unsigned int used_mst_ports = 0;
+ bool ret = true;
/*
* Walk the connector list instead of the encoder
@@ -10772,23 +10786,23 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
+ port_mask = 1 << encoder->port;
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
- return false;
+ ret = false;
used_ports |= port_mask;
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
- 1 << enc_to_mst(&encoder->base)->primary->port;
+ 1 << encoder->port;
break;
default:
break;
@@ -10800,7 +10814,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
if (used_ports & used_mst_ports)
return false;
- return true;
+ return ret;
}
static void
@@ -10905,7 +10919,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* Determine output_types before calling the .compute_config()
* hooks so that the hooks can use this information safely.
*/
- pipe_config->output_types |= 1 << encoder->type;
+ if (encoder->compute_output_type)
+ pipe_config->output_types |=
+ BIT(encoder->compute_output_type(encoder, pipe_config,
+ connector_state));
+ else
+ pipe_config->output_types |= BIT(encoder->type);
}
encoder_retry:
@@ -10969,31 +10988,6 @@ fail:
return ret;
}
-static void
-intel_modeset_update_crtc_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- int i;
-
- /* Double check state. */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
-
- /*
- * Update legacy state to satisfy fbc code. This can
- * be removed when fbc uses the atomic state.
- */
- if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
- struct drm_plane_state *plane_state = crtc->primary->state;
-
- crtc->primary->fb = plane_state->fb;
- crtc->x = plane_state->src_x >> 16;
- crtc->y = plane_state->src_y >> 16;
- }
- }
-}
-
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -11065,24 +11059,17 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
- char *level;
- unsigned int category;
struct va_format vaf;
va_list args;
- if (adjust) {
- level = KERN_DEBUG;
- category = DRM_UT_KMS;
- } else {
- level = KERN_ERR;
- category = DRM_UT_NONE;
- }
-
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
+ if (adjust)
+ drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
+ else
+ drm_err("mismatch in %s %pV", name, &vaf);
va_end(args);
}
@@ -11094,6 +11081,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
bool adjust)
{
bool ret = true;
+ bool fixup_inherited = adjust &&
+ (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@@ -11113,6 +11103,31 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
ret = false; \
}
+#define PIPE_CONF_CHECK_BOOL(name) \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_err(adjust, __stringify(name), \
+ "(expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
+/*
+ * Checks state where we only read out the enabling, but not the entire
+ * state itself (like full infoframes or ELD for audio). These states
+ * require a full modeset on bootup to fix up.
+ */
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
+ if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
+ PIPE_CONF_CHECK_BOOL(name); \
+ } else { \
+ pipe_config_err(adjust, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
+ ret = false; \
+ }
+
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
@@ -11198,7 +11213,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(cpu_transcoder);
- PIPE_CONF_CHECK_I(has_pch_encoder);
+ PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
@@ -11230,17 +11245,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
- PIPE_CONF_CHECK_I(has_hdmi_sink);
+ PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- PIPE_CONF_CHECK_I(limited_color_range);
+ PIPE_CONF_CHECK_BOOL(limited_color_range);
- PIPE_CONF_CHECK_I(hdmi_scrambling);
- PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
- PIPE_CONF_CHECK_I(has_infoframe);
- PIPE_CONF_CHECK_I(ycbcr420);
+ PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+ PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(ycbcr420);
- PIPE_CONF_CHECK_I(has_audio);
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -11266,7 +11281,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
@@ -11276,11 +11291,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
}
- /* BDW+ don't expose a synchronous way to read the state */
- if (IS_HASWELL(dev_priv))
- PIPE_CONF_CHECK_I(ips_enabled);
-
- PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_BOOL(double_wide);
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@@ -11314,8 +11325,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ PIPE_CONF_CHECK_I(min_voltage_level);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_BOOL
+#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
@@ -11582,10 +11597,8 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active) {
- pipe_config->output_types |= 1 << encoder->type;
+ if (active)
encoder->get_config(encoder, pipe_config);
- }
}
intel_crtc_compute_pixel_rate(pipe_config);
@@ -11607,6 +11620,18 @@ verify_crtc_state(struct drm_crtc *crtc,
}
static void
+intel_verify_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane,
+ plane_state, i)
+ assert_plane(plane, plane_state->base.visible);
+}
+
+static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct drm_crtc *crtc,
@@ -11956,16 +11981,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* holding all the crtc locks, even if we don't end up
* touching the hardware
*/
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
- &intel_state->cdclk.logical)) {
+ if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &intel_state->cdclk.logical)) {
ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
/* All pipes must be switched off while we change the cdclk. */
- if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
@@ -11974,6 +11999,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
intel_state->cdclk.logical.cdclk,
intel_state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ intel_state->cdclk.logical.voltage_level,
+ intel_state->cdclk.actual.voltage_level);
} else {
to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
@@ -12018,6 +12046,14 @@ static int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
+ /* Catch I915_MODE_FLAG_INHERITED */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ crtc_state, i) {
+ if (crtc_state->mode.private_flags !=
+ old_crtc_state->mode.private_flags)
+ crtc_state->mode_changed = true;
+ }
+
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
@@ -12026,10 +12062,6 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- /* Catch I915_MODE_FLAG_INHERITED */
- if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
- crtc_state->mode_changed = true;
-
if (!needs_modeset(crtc_state))
continue;
@@ -12038,13 +12070,6 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
- /* FIXME: For only active_changed we shouldn't need to do any
- * state recomputation at all. */
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
-
ret = intel_modeset_pipe_config(crtc, pipe_config);
if (ret) {
intel_dump_pipe_config(to_intel_crtc(crtc),
@@ -12063,10 +12088,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (needs_modeset(crtc_state))
any_ms = true;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
"[modeset]" : "[fastset]");
@@ -12085,7 +12106,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
- intel_fbc_choose_crtc(dev_priv, state);
+ intel_fbc_choose_crtc(dev_priv, intel_state);
return calc_watermark_data(state);
}
@@ -12100,7 +12121,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
if (!dev->max_vblank_count)
- return drm_crtc_accurate_vblank_count(&crtc->base);
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
@@ -12325,9 +12346,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
}
- /* Only after disabling all output pipelines that will be changed can we
- * update the the output configuration. */
- intel_modeset_update_crtc_state(state);
+ /* FIXME: Eventually get rid of our intel_crtc->config pointer */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
@@ -12396,6 +12417,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
+ if (intel_state->modeset)
+ intel_verify_planes(intel_state);
+
if (intel_state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
@@ -12542,6 +12566,9 @@ static int intel_atomic_commit(struct drm_device *dev,
if (intel_state->modeset) {
memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
sizeof(intel_state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level,
+ intel_state->min_voltage_level,
+ sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@@ -12578,17 +12605,23 @@ struct wait_rps_boost {
struct wait_queue_entry wait;
struct drm_crtc *crtc;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
};
static int do_rps_boost(struct wait_queue_entry *_wait,
unsigned mode, int sync, void *key)
{
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct drm_i915_gem_request *rq = wait->request;
+ struct i915_request *rq = wait->request;
- gen6_rps_boost(rq, NULL);
- i915_gem_request_put(rq);
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ gen6_rps_boost(rq, NULL);
+ i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -12626,10 +12659,46 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
+static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct i915_vma *vma;
+
+ if (plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const int align = intel_cursor_alignment(dev_priv);
+
+ return i915_gem_object_attach_phys(obj, align);
+ }
+
+ vma = intel_pin_and_fence_fb_obj(fb,
+ plane_state->base.rotation,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->vma = vma;
+
+ return 0;
+}
+
+static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&old_plane_state->vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @fb: framebuffer to prepare for presentation
+ * @new_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
@@ -12700,20 +12769,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
}
- if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
- const int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(obj, align);
- } else {
- struct i915_vma *vma;
-
- vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (!IS_ERR(vma))
- to_intel_plane_state(new_state)->vma = vma;
- else
- ret = PTR_ERR(vma);
- }
+ ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
@@ -12747,7 +12803,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @fb: old framebuffer that was on plane
+ * @old_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
*
@@ -12757,15 +12813,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct i915_vma *vma;
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
/* Should only be called after a successful intel_prepare_plane_fb()! */
- vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
- if (vma) {
- mutex_lock(&plane->dev->struct_mutex);
- intel_unpin_fb_vma(vma);
- mutex_unlock(&plane->dev->struct_mutex);
- }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_plane_unpin_fb(to_intel_plane_state(old_state));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
int
@@ -12783,7 +12836,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
max_dotclk *= 2;
if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
@@ -12815,17 +12868,17 @@ intel_check_primary_plane(struct intel_plane *plane,
if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
- if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ if (!state->ckey.flags) {
min_scale = 1;
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
}
can_position = true;
}
- ret = drm_plane_helper_check_state(&state->base,
- &state->clip,
- min_scale, max_scale,
- can_position, true);
+ ret = drm_atomic_helper_check_plane_state(&state->base,
+ &crtc_state->base,
+ min_scale, max_scale,
+ can_position, true);
if (ret)
return ret;
@@ -12833,7 +12886,7 @@ intel_check_primary_plane(struct intel_plane *plane,
return 0;
if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_check_plane_surface(state);
+ ret = skl_check_plane_surface(crtc_state, state);
if (ret)
return ret;
@@ -12846,6 +12899,9 @@ intel_check_primary_plane(struct intel_plane *plane,
state->ctl = i9xx_plane_ctl(crtc_state, state);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+
return 0;
}
@@ -12890,6 +12946,7 @@ out:
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
@@ -12897,6 +12954,20 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
intel_pipe_update_end(new_crtc_state);
+
+ if (new_crtc_state->update_pipe &&
+ !needs_modeset(&new_crtc_state->base) &&
+ old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
+ if (!IS_GEN2(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
+
+ if (new_crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(intel_crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+ }
}
/**
@@ -12993,8 +13064,6 @@ static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
return i965_mod_supported(format, modifier);
else
return i8xx_mod_supported(format, modifier);
-
- unreachable();
}
static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
@@ -13034,7 +13103,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
struct drm_crtc_state *crtc_state = crtc->state;
- struct i915_vma *old_vma, *vma;
/*
* When crtc is inactive or there is a modeset pending,
@@ -13093,25 +13161,9 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_free;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
- int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- goto out_unlock;
- }
- } else {
- vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
- if (IS_ERR(vma)) {
- DRM_DEBUG_KMS("failed to pin object\n");
-
- ret = PTR_ERR(vma);
- goto out_unlock;
- }
-
- to_intel_plane_state(new_plane_state)->vma = vma;
- }
+ ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
+ if (ret)
+ goto out_unlock;
old_fb = old_plane_state->fb;
@@ -13131,9 +13183,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
- old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
- if (old_vma)
- intel_unpin_fb_vma(old_vma);
+ intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13161,6 +13211,32 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.format_mod_supported = intel_cursor_plane_format_mod_supported,
};
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -13198,25 +13274,33 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->plane = (enum plane) !pipe;
+ primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
- primary->plane = (enum plane) pipe;
+ primary->i9xx_plane = (enum i9xx_plane_id) pipe;
primary->id = PLANE_PRIMARY;
- primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
+ primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ primary->has_fbc = skl_plane_has_fbc(dev_priv,
+ primary->pipe,
+ primary->id);
+ else
+ primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
+ primary->i9xx_plane);
+
+ if (primary->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+ }
+
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 10) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
- modifiers = skl_format_modifiers_ccs;
- primary->update_plane = skl_update_plane;
- primary->disable_plane = skl_disable_plane;
- primary->get_hw_state = skl_plane_get_hw_state;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_primary_formats = skl_primary_formats;
- num_formats = ARRAY_SIZE(skl_primary_formats);
- if (pipe < PIPE_C)
+ if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
modifiers = skl_format_modifiers_ccs;
else
modifiers = skl_format_modifiers_noccs;
@@ -13229,16 +13313,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->update_plane = i9xx_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
+ primary->update_plane = i9xx_update_plane;
+ primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->update_plane = i9xx_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
+ primary->update_plane = i9xx_update_plane;
+ primary->disable_plane = i9xx_disable_plane;
primary->get_hw_state = i9xx_plane_get_hw_state;
}
@@ -13262,11 +13346,17 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_primary_formats, num_formats,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
- "plane %c", plane_name(primary->plane));
+ "plane %c",
+ plane_name(primary->i9xx_plane));
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
@@ -13286,6 +13376,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_MODE_ROTATE_0,
supported_rotations);
+ if (INTEL_GEN(dev_priv) >= 9)
+ drm_plane_create_color_properties(&primary->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
return primary;
@@ -13322,9 +13421,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->can_scale = false;
cursor->max_downscale = 1;
cursor->pipe = pipe;
- cursor->plane = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
cursor->id = PLANE_CURSOR;
- cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->update_plane = i845_update_cursor;
@@ -13450,14 +13549,13 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
goto fail;
intel_crtc->pipe = pipe;
- intel_crtc->plane = primary->plane;
/* initialize shared scalers */
intel_crtc_init_scalers(intel_crtc, crtc_state);
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
+ dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -13491,8 +13589,8 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
return to_intel_crtc(connector->base.state->crtc)->pipe;
}
-int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file)
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_crtc *drmmode_crtc;
@@ -13641,7 +13739,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A);
- /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
found = I915_READ(SFUSE_STRAP);
@@ -13651,6 +13749,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev_priv, PORT_D);
+ if (found & SFUSE_STRAP_DDIF_DETECTED)
+ intel_ddi_init(dev_priv, PORT_F);
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
@@ -13938,7 +14038,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* gen2/3 display engine uses the fence if present,
* so the tiling mode must match the fb modifier exactly.
*/
- if (INTEL_INFO(dev_priv)->gen < 4 &&
+ if (INTEL_GEN(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
goto err;
@@ -14107,10 +14207,37 @@ static void intel_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
+static enum drm_mode_status
+intel_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ if (mode->vscan > 1)
+ return MODE_NO_VSCAN;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->flags & DRM_MODE_FLAG_HSKEW)
+ return MODE_H_ILLEGAL;
+
+ if (mode->flags & (DRM_MODE_FLAG_CSYNC |
+ DRM_MODE_FLAG_NCSYNC |
+ DRM_MODE_FLAG_PCSYNC))
+ return MODE_HSYNC;
+
+ if (mode->flags & (DRM_MODE_FLAG_BCAST |
+ DRM_MODE_FLAG_PIXMUX |
+ DRM_MODE_FLAG_CLKDIV2))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.get_format_info = intel_get_format_info,
.output_poll_changed = intel_fbdev_output_poll_changed,
+ .mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
.atomic_state_alloc = intel_atomic_state_alloc,
@@ -14126,7 +14253,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
intel_init_cdclk_hooks(dev_priv);
- if (INTEL_INFO(dev_priv)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
skylake_get_initial_plane_config;
@@ -14137,7 +14264,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- ironlake_get_initial_plane_config;
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
@@ -14145,7 +14272,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
- ironlake_get_initial_plane_config;
+ i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
@@ -14384,6 +14511,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
}
@@ -14463,6 +14591,8 @@ retry:
cs->wm.need_postvbl_update = true;
dev_priv->display.optimize_watermarks(intel_state, cs);
+
+ to_intel_crtc_state(crtc->state)->wm = cs->wm;
}
put_state:
@@ -14472,6 +14602,22 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
+static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN5(dev_priv)) {
+ u32 fdi_pll_clk =
+ I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->fdi_pll_freq = 270000;
+ } else {
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -14544,7 +14690,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = ggtt->mappable_base;
+ dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
@@ -14561,6 +14707,7 @@ int intel_modeset_init(struct drm_device *dev)
}
intel_shared_dpll_init(dev);
+ intel_update_fdi_pll_freq(dev_priv);
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
@@ -14612,6 +14759,7 @@ int intel_modeset_init(struct drm_device *dev)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -14675,6 +14823,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
POSTING_READ(PIPECONF(pipe));
+
+ intel_wait_for_pipe_scanline_moving(crtc);
}
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -14700,11 +14850,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
}
static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
- struct intel_plane *primary)
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane plane = primary->plane;
- u32 val = I915_READ(DSPCNTR(plane));
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 val = I915_READ(DSPCNTR(i9xx_plane));
return (val & DISPLAY_PLANE_ENABLE) == 0 ||
(val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
@@ -14768,7 +14918,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- if (!transcoder_is_dsi(cpu_transcoder)) {
+ if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
I915_WRITE(reg,
@@ -14866,8 +15016,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
- /* Enabled encoders without active connectors will be fixed in
- * the crtc fixup. */
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -14980,7 +15128,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
@@ -15059,6 +15206,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
@@ -15082,6 +15231,23 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
}
}
+static void intel_early_display_was(struct drm_i915_private *dev_priv)
+{
+ /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ DARBF_GATING_DIS);
+
+ if (IS_HASWELL(dev_priv)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -15095,15 +15261,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
- if (IS_HASWELL(dev_priv)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
-
+ intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
@@ -15197,17 +15355,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-void intel_modeset_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_init_gt_powersave(dev_priv);
-
- intel_init_clock_gating(dev_priv);
-
- intel_setup_overlay(dev_priv);
-}
-
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -15236,14 +15383,15 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- /* First disable polling... */
- drm_kms_helper_poll_fini(dev);
-
- /* Then kill the work that may have been queued by hpd. */
+ /* Kill all the work that may have been queued by hpd. */
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
+ if (connector->hdcp_shim) {
+ cancel_delayed_work_sync(&connector->hdcp_check_work);
+ cancel_work_sync(&connector->hdcp_prop_work);
+ }
}
drm_connector_list_iter_end(&conn_iter);
}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
new file mode 100644
index 0000000..4e7418b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1,
+ DPIO_PHY2,
+};
+
+#define I915_NUM_PHYS_VLV 2
+
+enum aux_ch {
+ AUX_CH_A,
+ AUX_CH_B,
+ AUX_CH_C,
+ AUX_CH_D,
+ _AUX_CH_E, /* does not exist */
+ AUX_CH_F,
+};
+
+#define aux_ch_name(a) ((a) + 'A')
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if((__mask) & BIT(__p))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((plane_mask) & \
+ BIT(drm_plane_index(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 158438b..9a4a51e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,7 +36,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -94,15 +96,6 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
-static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
- 324000, 432000, 540000 };
-static const int skl_rates[] = { 162000, 216000, 270000,
- 324000, 432000, 540000 };
-static const int cnl_rates[] = { 162000, 216000, 270000,
- 324000, 432000, 540000,
- 648000, 810000 };
-static const int default_rates[] = { 162000, 270000, 540000 };
-
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -129,30 +122,57 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
-static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
-static void vlv_steal_power_sequencer(struct drm_device *dev,
+static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
+ static const int dp_rates[] = {
+ 162000, 270000, 540000, 810000
+ };
int i, max_rate;
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
- if (default_rates[i] > max_rate)
+ for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
+ if (dp_rates[i] > max_rate)
break;
- intel_dp->sink_rates[i] = default_rates[i];
+ intel_dp->sink_rates[i] = dp_rates[i];
}
intel_dp->num_sink_rates = i;
}
+/* Get length of rates array potentially limited by max_rate. */
+static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
+{
+ int i;
+
+ /* Limit results by potentially reduced max rate */
+ for (i = 0; i < len; i++) {
+ if (rates[len - i - 1] <= max_rate)
+ return len - i;
+ }
+
+ return 0;
+}
+
+/* Get length of common rates array potentially limited by max_rate. */
+static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
+ int max_rate)
+{
+ return intel_dp_rate_limit_len(intel_dp->common_rates,
+ intel_dp->num_common_rates, max_rate);
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
@@ -216,41 +236,85 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
+static int cnl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ /* Low voltage SKUs are limited to max of 5.4G */
+ if (voltage == VOLTAGE_INFO_0_85V)
+ return 540000;
+
+ /* For this SKU 8.1G is supported in all ports */
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ return 810000;
+
+ /* For other SKUs, max rate on ports A and D is 5.4G */
+ if (port == PORT_A || port == PORT_D)
+ return 540000;
+
+ return 810000;
+}
+
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
+ /* The values must be in increasing order */
+ static const int cnl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
+ };
+ static const int bxt_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000
+ };
+ static const int skl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000
+ };
+ static const int hsw_rates[] = {
+ 162000, 270000, 540000
+ };
+ static const int g4x_rates[] = {
+ 162000, 270000
+ };
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->port;
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[dig_port->base.port];
const int *source_rates;
- int size;
- u32 voltage;
+ int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
- if (IS_GEN9_LP(dev_priv)) {
- source_rates = bxt_rates;
- size = ARRAY_SIZE(bxt_rates);
- } else if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_CANNONLAKE(dev_priv)) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
- if (port == PORT_A || port == PORT_D ||
- voltage == VOLTAGE_INFO_0_85V)
- size -= 2;
+ max_rate = cnl_max_source_rate(intel_dp);
+ } else if (IS_GEN9_LP(dev_priv)) {
+ source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
} else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
IS_BROADWELL(dev_priv)) {
- source_rates = default_rates;
- size = ARRAY_SIZE(default_rates);
+ source_rates = hsw_rates;
+ size = ARRAY_SIZE(hsw_rates);
} else {
- source_rates = default_rates;
- size = ARRAY_SIZE(default_rates) - 1;
+ source_rates = g4x_rates;
+ size = ARRAY_SIZE(g4x_rates);
}
+ if (max_rate && vbt_max_rate)
+ max_rate = min(max_rate, vbt_max_rate);
+ else if (vbt_max_rate)
+ max_rate = vbt_max_rate;
+
+ if (max_rate)
+ size = intel_dp_rate_limit_len(source_rates, size, max_rate);
+
intel_dp->source_rates = source_rates;
intel_dp->num_source_rates = size;
}
@@ -302,27 +366,11 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
/* Paranoia, there should always be something in common. */
if (WARN_ON(intel_dp->num_common_rates == 0)) {
- intel_dp->common_rates[0] = default_rates[0];
+ intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
}
-/* get length of common rates potentially limited by max_rate */
-static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
- int max_rate)
-{
- const int *common_rates = intel_dp->common_rates;
- int i, common_len = intel_dp->num_common_rates;
-
- /* Limit results by potentially reduced max rate */
- for (i = 0; i < common_len; i++) {
- if (common_rates[common_len - i - 1] <= max_rate)
- return common_len - i;
- }
-
- return 0;
-}
-
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
uint8_t lane_count)
{
@@ -427,24 +475,19 @@ static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp);
+intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void
-intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd);
static void
-intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
+intel_dp_pps_init(struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/*
- * See vlv_power_sequencer_reset() why we need
+ * See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
@@ -454,10 +497,7 @@ static void pps_lock(struct intel_dp *intel_dp)
static void pps_unlock(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
mutex_unlock(&dev_priv->pps_mutex);
@@ -467,8 +507,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -477,11 +517,11 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power seqeuncer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->port)))
+ pipe_name(pipe), port_name(intel_dig_port->base.port)))
return;
DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -578,9 +618,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -603,16 +642,16 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
if (WARN_ON(pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev, pipe);
+ vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@@ -626,22 +665,16 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ int backlight_controller = dev_priv->vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
WARN_ON(!intel_dp_is_edp(intel_dp));
- /*
- * TODO: BXT has 2 PPS instances. The correct port->PPS instance
- * mapping needs to be retrieved from VBT, for now just hard-code to
- * use instance #0 always.
- */
if (!intel_dp->pps_reset)
- return 0;
+ return backlight_controller;
intel_dp->pps_reset = false;
@@ -649,9 +682,9 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
- return 0;
+ return backlight_controller;
}
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
@@ -701,10 +734,9 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -731,13 +763,12 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
@@ -754,15 +785,20 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DP &&
- encoder->type != INTEL_OUTPUT_EDP)
+ encoder->type != INTEL_OUTPUT_EDP &&
+ encoder->type != INTEL_OUTPUT_DDI)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
+ /* Skip pure DVI/HDMI DDI encoders */
+ if (!i915_mmio_reg_valid(intel_dp->output_reg))
+ continue;
+
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
@@ -783,10 +819,10 @@ struct pps_registers {
i915_reg_t pp_div;
};
-static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp,
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
int pps_idx = 0;
memset(regs, 0, sizeof(*regs));
@@ -800,7 +836,8 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv))
+ if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
+ !HAS_PCH_ICP(dev_priv))
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -809,8 +846,7 @@ _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- &regs);
+ intel_pps_get_registers(intel_dp, &regs);
return regs.pp_ctrl;
}
@@ -820,8 +856,7 @@ _pp_stat_reg(struct intel_dp *intel_dp)
{
struct pps_registers regs;
- intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
- &regs);
+ intel_pps_get_registers(intel_dp, &regs);
return regs.pp_stat;
}
@@ -833,8 +868,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
{
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
@@ -864,8 +898,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -878,8 +911,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -893,8 +925,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!intel_dp_is_edp(intel_dp))
return;
@@ -910,10 +941,8 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
uint32_t status;
bool done;
@@ -933,8 +962,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (index)
return 0;
@@ -948,8 +976,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (index)
return 0;
@@ -959,7 +986,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dig_port->port == PORT_A)
+ if (intel_dp->aux_ch == AUX_CH_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -967,10 +994,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1040,14 +1066,15 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
}
static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
- const uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size)
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size,
+ u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ i915_reg_t ch_ctl, ch_data[5];
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
uint32_t status;
@@ -1055,6 +1082,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
bool vdd;
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
pps_lock(intel_dp);
/*
@@ -1107,11 +1138,13 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
send_bytes,
aux_clock_divider);
+ send_ctl |= aux_send_ctl_flags;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
+ I915_WRITE(ch_data[i >> 2],
intel_dp_pack_aux(send + i,
send_bytes - i));
@@ -1127,14 +1160,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
- continue;
-
/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
* 400us delay required for errors and timeouts
* Timeout errors from the HW already meet this
* requirement so skip to next iteration
*/
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
usleep_range(400, 500);
continue;
@@ -1180,14 +1213,6 @@ done:
if (recv_bytes == 0 || recv_bytes > 20) {
DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
recv_bytes);
- /*
- * FIXME: This patch was created on top of a series that
- * organize the retries at drm level. There EBUSY should
- * also take care for 1ms wait before retrying.
- * That aux retries re-org is still needed and after that is
- * merged we remove this sleep from here.
- */
- usleep_range(1000, 1500);
ret = -EBUSY;
goto out;
}
@@ -1196,7 +1221,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
+ intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -1213,6 +1238,17 @@ out:
#define BARE_ADDRESS_SIZE 3
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
@@ -1221,11 +1257,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
size_t txsize, rxsize;
int ret;
- txbuf[0] = (msg->request << 4) |
- ((msg->address >> 16) & 0xf);
- txbuf[1] = (msg->address >> 8) & 0xff;
- txbuf[2] = msg->address & 0xff;
- txbuf[3] = msg->size - 1;
+ intel_dp_aux_header(txbuf, msg);
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
@@ -1242,7 +1274,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, 0);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
@@ -1264,7 +1297,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (WARN_ON(rxsize > 20))
return -E2BIG;
- ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, 0);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
@@ -1286,166 +1320,173 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
-static enum port intel_aux_port(struct drm_i915_private *dev_priv,
- enum port port)
+static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
- enum port aux_port;
+ enum aux_ch aux_ch;
if (!info->alternate_aux_channel) {
+ aux_ch = (enum aux_ch) port;
+
DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- port_name(port), port_name(port));
- return port;
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
}
switch (info->alternate_aux_channel) {
case DP_AUX_A:
- aux_port = PORT_A;
+ aux_ch = AUX_CH_A;
break;
case DP_AUX_B:
- aux_port = PORT_B;
+ aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
- aux_port = PORT_C;
+ aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- aux_port = PORT_D;
+ aux_ch = AUX_CH_D;
+ break;
+ case DP_AUX_F:
+ aux_ch = AUX_CH_F;
break;
default:
MISSING_CASE(info->alternate_aux_channel);
- aux_port = PORT_A;
+ aux_ch = AUX_CH_A;
break;
}
DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- port_name(aux_port), port_name(port));
+ aux_ch_name(aux_ch), port_name(port));
- return aux_port;
+ return aux_ch;
}
-static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static enum intel_display_power_domain
+intel_aux_power_domain(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_CTL(port);
+ switch (intel_dp->aux_ch) {
+ case AUX_CH_A:
+ return POWER_DOMAIN_AUX_A;
+ case AUX_CH_B:
+ return POWER_DOMAIN_AUX_B;
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F;
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_B);
+ MISSING_CASE(intel_dp->aux_ch);
+ return POWER_DOMAIN_AUX_A;
}
}
-static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_B, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
}
}
-static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- switch (port) {
- case PORT_A:
- return DP_AUX_CH_CTL(port);
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return PCH_DP_AUX_CH_CTL(port);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_A);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
}
}
-static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_A:
- return DP_AUX_CH_DATA(port, index);
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return PCH_DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_A, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
}
}
-static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- switch (port) {
- case PORT_A:
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_CTL(port);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_CTL(PORT_A);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
}
}
-static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- switch (port) {
- case PORT_A:
- case PORT_B:
- case PORT_C:
- case PORT_D:
- return DP_AUX_CH_DATA(port, index);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
default:
- MISSING_CASE(port);
- return DP_AUX_CH_DATA(PORT_A, index);
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
}
}
-static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return skl_aux_ctl_reg(dev_priv, port);
- else if (HAS_PCH_SPLIT(dev_priv))
- return ilk_aux_ctl_reg(dev_priv, port);
- else
- return g4x_aux_ctl_reg(dev_priv, port);
-}
-
-static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return skl_aux_data_reg(dev_priv, port, index);
- else if (HAS_PCH_SPLIT(dev_priv))
- return ilk_aux_data_reg(dev_priv, port, index);
- else
- return g4x_aux_data_reg(dev_priv, port, index);
-}
-
-static void intel_aux_reg_init(struct intel_dp *intel_dp)
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = intel_aux_port(dev_priv,
- dp_to_dig_port(intel_dp)->port);
- int i;
-
- intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
- for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
- intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
+ enum aux_ch aux_ch = intel_dp->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
}
static void
@@ -1457,14 +1498,42 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->aux_ch = intel_aux_ch(intel_dp);
+ intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_aux_reg_init(intel_dp);
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+ port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
@@ -1479,8 +1548,7 @@ static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dp_link_dpll *divisor = NULL;
int i, count = 0;
@@ -1628,7 +1696,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
@@ -1658,7 +1726,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false;
- if (port == PORT_A)
+ if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
@@ -1692,6 +1760,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
@@ -1841,6 +1913,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
bool link_mst)
{
+ intel_dp->link_trained = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
intel_dp->link_mst = link_mst;
@@ -1849,11 +1922,10 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
@@ -1940,20 +2012,18 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
-static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp);
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_verify_state(dev_priv, intel_dp);
+ intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2024,8 +2094,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2046,9 +2115,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
*/
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2067,7 +2135,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2087,7 +2155,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
msleep(intel_dp->panel_power_up_delay);
}
@@ -2113,13 +2181,12 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
pps_unlock(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
@@ -2133,7 +2200,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2193,7 +2260,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
intel_dp->want_panel_vdd = false;
@@ -2205,8 +2272,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2216,11 +2282,11 @@ static void edp_panel_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
if (WARN(edp_have_panel_power(intel_dp),
"eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->port)))
+ port_name(dp_to_dig_port(intel_dp)->base.port)))
return;
wait_panel_power_cycle(intel_dp);
@@ -2264,8 +2330,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2275,10 +2340,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->port));
+ port_name(dp_to_dig_port(intel_dp)->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2313,9 +2378,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2358,8 +2421,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2430,7 +2492,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->port),
+ port_name(dig_port->base.port),
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -2486,10 +2548,10 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
udelay(200);
}
-static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2505,6 +2567,21 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
udelay(200);
}
+static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
+{
+ /*
+ * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
+ * be capable of signalling downstream hpd with a long pulse.
+ * Whether or not that means D3 is safe to use is not clear,
+ * but let's assume so until proven otherwise.
+ *
+ * FIXME should really check all downstream ports...
+ */
+ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
+ intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -2515,6 +2592,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
return;
if (mode != DRM_MODE_DPMS_ON) {
+ if (downstream_hpd_needs_d0(intel_dp))
+ return;
+
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
} else {
@@ -2544,10 +2624,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = encoder->port;
u32 tmp;
bool ret;
@@ -2596,12 +2675,16 @@ out:
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
tmp = I915_READ(intel_dp->output_reg);
@@ -2679,8 +2762,11 @@ static void intel_disable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp->link_trained = false;
+
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2694,12 +2780,10 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void ilk_disable_dp(struct intel_encoder *encoder,
@@ -2725,38 +2809,34 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_off(intel_dp);
+ ironlake_edp_pll_off(intel_dp, old_crtc_state);
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(encoder, old_crtc_state);
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -2766,10 +2846,9 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
@@ -2852,8 +2931,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
/* enable with pattern 1 (as per spec) */
@@ -2877,10 +2955,9 @@ static void intel_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
@@ -2890,7 +2967,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(intel_dp);
+ vlv_init_panel_power_sequencer(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -2944,7 +3021,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = encoder->port;
intel_dp_prepare(encoder, pipe_config);
@@ -2977,22 +3054,21 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* from a port.
*/
DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->port));
+ pipe_name(pipe), port_name(intel_dig_port->base.port));
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
-static void vlv_steal_power_sequencer(struct drm_device *dev,
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
lockdep_assert_held(&dev_priv->pps_mutex);
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp;
enum port port;
@@ -3001,7 +3077,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- port = dp_to_dig_port(intel_dp)->port;
+ port = dp_to_dig_port(intel_dp)->base.port;
WARN(intel_dp->active_pipe == pipe,
"stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3018,13 +3094,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
}
}
-static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
+static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -3044,7 +3119,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev, crtc->pipe);
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
intel_dp->active_pipe = crtc->pipe;
@@ -3055,18 +3130,18 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->pps_pipe = crtc->pipe;
DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
+ pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
/* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
}
@@ -3077,14 +3152,14 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
{
intel_dp_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
intel_enable_dp(encoder, pipe_config, conn_state);
@@ -3098,14 +3173,14 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
{
intel_dp_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
/*
@@ -3119,41 +3194,12 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
- return false;
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
- uint8_t dprx = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx) != 1)
- return false;
- return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
-static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
- uint8_t alpm_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
- &alpm_caps) != 1)
- return false;
- return alpm_caps & DP_ALPM_CAP;
-}
-
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -3172,7 +3218,7 @@ uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
if (INTEL_GEN(dev_priv) >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
@@ -3505,10 +3551,9 @@ gen7_edp_signal_levels(uint8_t train_set)
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->base.port;
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
@@ -3563,10 +3608,9 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
uint32_t val;
if (!HAS_DDI(dev_priv))
@@ -3595,13 +3639,13 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
}
static void
-intel_dp_link_down(struct intel_dp *intel_dp)
+intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum port port = intel_dig_port->port;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ enum port port = encoder->port;
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
@@ -3700,40 +3744,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- /* Check if the panel supports PSR */
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
- }
-
- if (INTEL_GEN(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
- uint8_t frame_sync_cap;
-
- dev_priv->psr.sink_support = true;
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap) != 1)
- frame_sync_cap = 0;
- dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
- /* PSR2 needs frame sync as well */
- dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
- DRM_DEBUG_KMS("PSR2 %s on sink",
- dev_priv->psr.psr2_support ? "supported" : "not supported");
-
- if (dev_priv->psr.psr2_support) {
- dev_priv->psr.y_cord_support =
- intel_dp_get_y_cord_status(intel_dp);
- dev_priv->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- }
-
- }
+ intel_psr_init_dpcd(intel_dp);
/*
* Read the eDP display control registers.
@@ -3747,11 +3758,11 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
- /* Intermediate frequency support */
- if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
+ /* Read the eDP 1.4+ supported link rates. */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -3775,6 +3786,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
if (intel_dp->num_sink_rates)
intel_dp->use_rate_select = true;
else
@@ -3874,11 +3889,12 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
intel_dp->is_mst);
}
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, bool disable_wa)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret = 0;
int count = 0;
@@ -3914,15 +3930,17 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
}
out:
- hsw_enable_ips(intel_crtc);
+ if (disable_wa)
+ hsw_enable_ips(crtc_state);
return ret;
}
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int ret;
@@ -3936,16 +3954,16 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return -EIO;
if (buf & DP_TEST_SINK_START) {
- ret = intel_dp_sink_crc_stop(intel_dp);
+ ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
if (ret)
return ret;
}
- hsw_disable_ips(intel_crtc);
+ hsw_disable_ips(crtc_state);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf | DP_TEST_SINK_START) < 0) {
- hsw_enable_ips(intel_crtc);
+ hsw_enable_ips(crtc_state);
return -EIO;
}
@@ -3953,16 +3971,16 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
return 0;
}
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
u8 buf;
int count, ret;
int attempts = 6;
- ret = intel_dp_sink_crc_start(intel_dp);
+ ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
if (ret)
return ret;
@@ -3990,7 +4008,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
}
stop:
- intel_dp_sink_crc_stop(intel_dp);
+ intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
return ret;
}
@@ -4257,12 +4275,85 @@ go_again:
return -EINVAL;
}
-static void
-intel_dp_retrain_link(struct intel_dp *intel_dp)
+static bool
+intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp->link_trained)
+ return false;
+
+ if (!intel_dp_get_link_status(intel_dp, link_status))
+ return false;
+
+ /*
+ * Validate the cached values of intel_dp->link_rate and
+ * intel_dp->lane_count before attempting to retrain.
+ */
+ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
+ intel_dp->lane_count))
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+}
+
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ /* FIXME handle the MST connectors as well */
+
+ if (!connector || connector->base.status != connector_status_connected)
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->base.active)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ return 0;
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
@@ -4280,43 +4371,49 @@ intel_dp_retrain_link(struct intel_dp *intel_dp)
if (crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
+
+ return 0;
}
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+static bool intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- u8 link_status[DP_LINK_STATUS_SIZE];
+ struct drm_modeset_acquire_ctx ctx;
+ bool changed;
+ int ret;
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ changed = intel_encoder_hotplug(encoder, connector);
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("Failed to get link status\n");
- return;
- }
+ drm_modeset_acquire_init(&ctx, 0);
- if (!intel_encoder->base.crtc)
- return;
+ for (;;) {
+ ret = intel_dp_retrain_link(encoder, &ctx);
- if (!to_intel_crtc(intel_encoder->base.crtc)->active)
- return;
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
- /*
- * Validate the cached values of intel_dp->link_rate and
- * intel_dp->lane_count before attempting to retrain.
- */
- if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
- intel_dp->lane_count))
- return;
+ break;
+ }
- /* Retrain if Channel EQ or CR not ok */
- if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- intel_encoder->base.name);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- intel_dp_retrain_link(intel_dp);
- }
+ return changed;
}
/*
@@ -4335,8 +4432,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4375,13 +4471,14 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ /* defer to the hotplug work for link retraining if needed */
+ if (intel_dp_needs_link_retrain(intel_dp))
+ return false;
+
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
- drm_kms_helper_hotplug_event(intel_encoder->base.dev);
+ drm_kms_helper_hotplug_event(&dev_priv->drm);
}
return true;
@@ -4445,8 +4542,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum drm_connector_status status;
status = intel_panel_detect(dev_priv);
@@ -4456,173 +4552,174 @@ edp_detect(struct intel_dp *intel_dp)
return status;
}
-static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- switch (port->port) {
- case PORT_B:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
- case PORT_C:
+ case HPD_PORT_C:
bit = SDE_PORTC_HOTPLUG;
break;
- case PORT_D:
+ case HPD_PORT_D:
bit = SDE_PORTD_HOTPLUG;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(encoder->hpd_pin);
return false;
}
return I915_READ(SDEISR) & bit;
}
-static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool cpt_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- switch (port->port) {
- case PORT_B:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
- case PORT_C:
+ case HPD_PORT_C:
bit = SDE_PORTC_HOTPLUG_CPT;
break;
- case PORT_D:
+ case HPD_PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(encoder->hpd_pin);
return false;
}
return I915_READ(SDEISR) & bit;
}
-static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool spt_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- switch (port->port) {
- case PORT_A:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_A:
bit = SDE_PORTA_HOTPLUG_SPT;
break;
- case PORT_E:
+ case HPD_PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT;
break;
default:
- return cpt_digital_port_connected(dev_priv, port);
+ return cpt_digital_port_connected(encoder);
}
return I915_READ(SDEISR) & bit;
}
-static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool g4x_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- switch (port->port) {
- case PORT_B:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break;
- case PORT_C:
+ case HPD_PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
break;
- case PORT_D:
+ case HPD_PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(encoder->hpd_pin);
return false;
}
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- switch (port->port) {
- case PORT_B:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
- case PORT_C:
+ case HPD_PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
- case PORT_D:
+ case HPD_PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
- MISSING_CASE(port->port);
+ MISSING_CASE(encoder->hpd_pin);
return false;
}
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
- if (port->port == PORT_A)
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
- return ibx_digital_port_connected(dev_priv, port);
+ return ibx_digital_port_connected(encoder);
}
-static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool snb_digital_port_connected(struct intel_encoder *encoder)
{
- if (port->port == PORT_A)
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else
- return cpt_digital_port_connected(dev_priv, port);
+ return cpt_digital_port_connected(encoder);
}
-static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool ivb_digital_port_connected(struct intel_encoder *encoder)
{
- if (port->port == PORT_A)
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
else
- return cpt_digital_port_connected(dev_priv, port);
+ return cpt_digital_port_connected(encoder);
}
-static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- if (port->port == PORT_A)
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
- return cpt_digital_port_connected(dev_priv, port);
+ return cpt_digital_port_connected(encoder);
}
-static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
+static bool bxt_digital_port_connected(struct intel_encoder *encoder)
{
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- enum port port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit;
- port = intel_hpd_pin_to_port(intel_encoder->hpd_pin);
- switch (port) {
- case PORT_A:
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_A:
bit = BXT_DE_PORT_HP_DDIA;
break;
- case PORT_B:
+ case HPD_PORT_B:
bit = BXT_DE_PORT_HP_DDIB;
break;
- case PORT_C:
+ case HPD_PORT_C:
bit = BXT_DE_PORT_HP_DDIC;
break;
default:
- MISSING_CASE(port);
+ MISSING_CASE(encoder->hpd_pin);
return false;
}
@@ -4631,33 +4728,33 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
/*
* intel_digital_port_connected - is the specified port connected?
- * @dev_priv: i915 private structure
- * @port: the port to test
+ * @encoder: intel_encoder
*
- * Return %true if @port is connected, %false otherwise.
+ * Return %true if port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+bool intel_digital_port_connected(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
if (HAS_GMCH_DISPLAY(dev_priv)) {
if (IS_GM45(dev_priv))
- return gm45_digital_port_connected(dev_priv, port);
+ return gm45_digital_port_connected(encoder);
else
- return g4x_digital_port_connected(dev_priv, port);
+ return g4x_digital_port_connected(encoder);
}
if (IS_GEN5(dev_priv))
- return ilk_digital_port_connected(dev_priv, port);
+ return ilk_digital_port_connected(encoder);
else if (IS_GEN6(dev_priv))
- return snb_digital_port_connected(dev_priv, port);
+ return snb_digital_port_connected(encoder);
else if (IS_GEN7(dev_priv))
- return ivb_digital_port_connected(dev_priv, port);
+ return ivb_digital_port_connected(encoder);
else if (IS_GEN8(dev_priv))
- return bdw_digital_port_connected(dev_priv, port);
+ return bdw_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
- return bxt_digital_port_connected(dev_priv, port);
+ return bxt_digital_port_connected(encoder);
else
- return spt_digital_port_connected(dev_priv, port);
+ return spt_digital_port_connected(encoder);
}
static struct edid *
@@ -4702,25 +4799,21 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *intel_connector)
+intel_dp_long_pulse(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
enum drm_connector_status status;
u8 sink_irq_vector = 0;
- WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(to_i915(dev),
- dp_to_dig_port(intel_dp)))
+ else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
status = intel_dp_detect_dpcd(intel_dp);
else
status = connector_status_disconnected;
@@ -4740,9 +4833,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
goto out;
}
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
-
if (intel_dp->reset_link_params) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -4768,20 +4858,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
*/
status = connector_status_disconnected;
goto out;
- } else {
- /*
- * If display is now connected check links status,
- * there has been known issues of link loss triggerring
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
- intel_dp_check_link_status(intel_dp);
}
/*
@@ -4793,7 +4869,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@@ -4816,7 +4892,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
return status;
}
@@ -4832,8 +4908,19 @@ intel_dp_detect(struct drm_connector *connector,
connector->base.id, connector->name);
/* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done)
+ if (!intel_dp->detect_done) {
+ struct drm_crtc *crtc;
+ int ret;
+
+ crtc = connector->state->crtc;
+ if (crtc) {
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+ }
+
status = intel_dp_long_pulse(intel_dp->attached_connector);
+ }
intel_dp->detect_done = false;
@@ -4859,9 +4946,6 @@ intel_dp_force(struct drm_connector *connector)
intel_dp_set_edid(intel_dp);
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
-
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4984,11 +5068,241 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
pps_unlock(intel_dp);
}
+static
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+ u8 *an)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
+ static const struct drm_dp_aux_msg msg = {
+ .request = DP_AUX_NATIVE_WRITE,
+ .address = DP_AUX_HDCP_AKSV,
+ .size = DRM_HDCP_KSV_LEN,
+ };
+ uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
+ ssize_t dpcd_ret;
+ int ret;
+
+ /* Output An first, that's easy */
+ dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
+ an, DRM_HDCP_AN_LEN);
+ if (dpcd_ret != DRM_HDCP_AN_LEN) {
+ DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+
+ /*
+ * Since Aksv is Oh-So-Secret, we can't access it in software. So in
+ * order to get it on the wire, we need to create the AUX header as if
+ * we were writing the data, and then tickle the hardware to output the
+ * data once the header is sent out.
+ */
+ intel_dp_aux_header(txbuf, &msg);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
+ rxbuf, sizeof(rxbuf),
+ DP_AUX_CH_CTL_AUX_AKSV_SELECT);
+ if (ret < 0) {
+ DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+ return ret;
+ } else if (ret == 0) {
+ DRM_ERROR("Aksv write over DP/AUX was empty\n");
+ return -EIO;
+ }
+
+ reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
+ return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
+}
+
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+ u8 *bksv)
+{
+ ssize_t ret;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret != DRM_HDCP_KSV_LEN) {
+ DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus)
+{
+ ssize_t ret;
+ /*
+ * For some reason the HDMI and DP HDCP specs call this register
+ * definition by different names. In the HDMI spec, it's called BSTATUS,
+ * but in DP it's called BINFO.
+ */
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret != DRM_HDCP_BSTATUS_LEN) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
+ u8 *bcaps)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ bcaps, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+ u8 *ri_prime)
+{
+ ssize_t ret;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret != DRM_HDCP_RI_LEN) {
+ DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready)
+{
+ ssize_t ret;
+ u8 bstatus;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ *ksv_ready = bstatus & DP_BSTATUS_READY;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ ssize_t ret;
+ int i;
+
+ /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
+ for (i = 0; i < num_downstream; i += 3) {
+ size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_AUX_HDCP_KSV_FIFO,
+ ksv_fifo + i * DRM_HDCP_KSV_LEN,
+ len);
+ if (ret != len) {
+ DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
+ ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part)
+{
+ ssize_t ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_AUX_HDCP_V_PRIME(i), part,
+ DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
+ DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+ bool enable)
+{
+ /* Not used for single stream DisplayPort setups */
+ return 0;
+}
+
+static
+bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+{
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return false;
+ }
+
+ return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
+}
+
+static
+int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
+ bool *hdcp_capable)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
+ return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .check_link = intel_dp_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+};
+
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5041,7 +5355,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(encoder->dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
}
@@ -5076,14 +5390,9 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum irqreturn ret = IRQ_NONE;
- if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
- intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
- intel_dig_port->base.type = INTEL_OUTPUT_DP;
-
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
* vdd off can generate a long pulse on eDP which
@@ -5092,12 +5401,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->port));
+ port_name(intel_dig_port->base.port));
return IRQ_HANDLED;
}
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->port),
+ port_name(intel_dig_port->base.port),
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -5125,7 +5434,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (!intel_dp->is_mst) {
- if (!intel_dp_short_pulse(intel_dp)) {
+ bool handled;
+
+ handled = intel_dp_short_pulse(intel_dp);
+
+ /* Short pulse can signify loss of hdcp authentication */
+ intel_hdcp_check_link(intel_dp->attached_connector);
+
+ if (!handled) {
intel_dp->detect_done = false;
goto put_power;
}
@@ -5159,8 +5475,11 @@ static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+
+ if (!IS_G4X(dev_priv) && port != PORT_A)
+ intel_attach_force_audio_property(connector);
- intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
if (intel_dp_is_edp(intel_dp)) {
@@ -5185,13 +5504,13 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp, struct edp_power_seq *seq)
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
struct pps_registers regs;
- intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ intel_pps_get_registers(intel_dp, &regs);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
@@ -5199,7 +5518,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) {
+ if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
+ !HAS_PCH_ICP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
@@ -5217,7 +5537,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
+ HAS_PCH_ICP(dev_priv)) {
seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
} else {
@@ -5235,13 +5556,12 @@ intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
}
static void
-intel_pps_verify_state(struct drm_i915_private *dev_priv,
- struct intel_dp *intel_dp)
+intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps_delays;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+ intel_pps_readout_hw_state(intel_dp, &hw);
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
@@ -5252,10 +5572,9 @@ intel_pps_verify_state(struct drm_i915_private *dev_priv,
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
@@ -5265,7 +5584,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
if (final->t11_t12 != 0)
return;
- intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+ intel_pps_readout_hw_state(intel_dp, &cur);
intel_pps_dump_state("cur", &cur);
@@ -5336,23 +5655,28 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
*/
final->t8 = 1;
final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
}
static void
-intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
- struct intel_dp *intel_dp,
+intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- intel_pps_get_registers(dev_priv, intel_dp, &regs);
+ intel_pps_get_registers(intel_dp, &regs);
/*
* On some VLV machines the BIOS can leave the VDD
@@ -5385,7 +5709,8 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
+ HAS_PCH_ICP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
@@ -5411,7 +5736,8 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
+ HAS_PCH_ICP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5419,21 +5745,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) ?
+ (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
+ HAS_PCH_ICP(dev_priv)) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
-static void intel_dp_pps_init(struct drm_device *dev,
- struct intel_dp *intel_dp)
+static void intel_dp_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
} else {
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
+ intel_dp_init_panel_power_sequencer(intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
}
}
@@ -5472,7 +5798,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
- intel_crtc = to_intel_crtc(encoder->base.crtc);
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
@@ -5545,8 +5870,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!crtc_state->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
@@ -5581,8 +5905,7 @@ unlock:
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
if (!old_crtc_state->has_drrs)
return;
@@ -5765,7 +6088,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
/**
* intel_dp_drrs_init - Init basic DRRS work and mutex.
- * @intel_connector: eDP connector
+ * @connector: eDP connector
* @fixed_mode: preferred mode of panel
*
* This function is called only once at driver load to initialize basic
@@ -5777,12 +6100,10 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
* from VBT setting).
*/
static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_connector *intel_connector,
- struct drm_display_mode *fixed_mode)
+intel_dp_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode)
{
- struct drm_connector *connector = &intel_connector->base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5798,8 +6119,8 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
return NULL;
}
- downclock_mode = intel_find_panel_downclock
- (dev_priv, fixed_mode, connector);
+ downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
+ &connector->base);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
@@ -5816,11 +6137,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
@@ -5838,7 +6157,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev)) {
+ if (intel_get_lvds_encoder(&dev_priv->drm)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
@@ -5848,7 +6167,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(dev, intel_dp);
+ intel_dp_pps_init(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
@@ -5868,7 +6187,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
edid);
- drm_edid_to_eld(connector, edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -5942,37 +6260,6 @@ out_vdd_off:
return false;
}
-/* Set up the hotplug pin and aux power domain. */
-static void
-intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
-{
- struct intel_encoder *encoder = &intel_dig_port->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
-
- encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port);
-
- switch (intel_dig_port->port) {
- case PORT_A:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
- break;
- case PORT_B:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
- break;
- case PORT_C:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
- break;
- case PORT_D:
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
- break;
- case PORT_E:
- /* FIXME: Check VBT for actual wiring of PORT E */
- intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
- break;
- default:
- MISSING_CASE(intel_dig_port->port);
- }
-}
-
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
struct intel_connector *intel_connector;
@@ -6005,7 +6292,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -6024,20 +6311,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->active_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
- intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
- else
- intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
- else
- intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
-
if (HAS_DDI(dev_priv))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -6074,10 +6347,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- connector->interlace_allowed = true;
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp_init_connector_port_info(intel_dig_port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_dp_aux_init(intel_dp);
@@ -6093,7 +6367,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* init MST on ports that can support it */
if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C || port == PORT_D))
+ (port == PORT_B || port == PORT_C ||
+ port == PORT_D || port == PORT_F))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -6105,6 +6380,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_add_properties(intel_dp, connector);
+ if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
+ int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
+ if (ret)
+ DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ }
+
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
@@ -6147,6 +6428,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
"DP %c", port_name(port)))
goto err_encoder_init;
+ intel_encoder->hotplug = intel_dp_hotplug;
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
@@ -6175,7 +6457,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->disable = g4x_disable_dp;
}
- intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 05907fa..f59b59b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -248,6 +248,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
int tries;
u32 training_pattern;
uint8_t link_status[DP_LINK_STATUS_SIZE];
+ bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
@@ -259,7 +260,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
return false;
}
- intel_dp->channel_eq_status = false;
for (tries = 0; tries < 5; tries++) {
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
@@ -279,7 +279,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
- intel_dp->channel_eq_status = true;
+ channel_eq = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training "
"successful\n");
break;
@@ -301,12 +301,14 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
intel_dp_set_idle_link_train(intel_dp);
- return intel_dp->channel_eq_status;
+ return channel_eq;
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
{
+ intel_dp->link_trained = true;
+
intel_dp_set_link_train(intel_dp,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -328,14 +330,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
return;
failure_handling:
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
- if (!intel_dp_get_link_train_fallback_values(intel_dp,
- intel_dp->link_rate,
- intel_dp->lane_count))
- /* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
+ /* Dont fallback and prune modes if its eDP */
+ if (!intel_dp_is_edp(intel_dp)) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
+ if (!intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count))
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
+ } else {
+ DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
+ }
return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7725214..c3de091 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -34,6 +34,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -87,6 +88,12 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
+ if (IS_GEN9_LP(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
return true;
}
@@ -142,7 +149,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -172,13 +180,27 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0) {
+ if (intel_dp->active_mst_links == 0)
intel_dig_port->base.post_disable(&intel_dig_port->base,
- NULL, NULL);
- }
+ old_crtc_state, NULL);
+
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
+static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0 &&
+ intel_dig_port->base.pre_pll_enable)
+ intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
+ pipe_config, NULL);
+}
+
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -187,7 +209,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@@ -231,7 +253,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_dig_port->base.port;
int ret;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -265,48 +287,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- u32 temp, flags = 0;
-
- pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, crtc);
-
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (temp & TRANS_DDI_PHSYNC)
- flags |= DRM_MODE_FLAG_PHSYNC;
- else
- flags |= DRM_MODE_FLAG_NHSYNC;
- if (temp & TRANS_DDI_PVSYNC)
- flags |= DRM_MODE_FLAG_PVSYNC;
- else
- flags |= DRM_MODE_FLAG_NVSYNC;
-
- switch (temp & TRANS_DDI_BPC_MASK) {
- case TRANS_DDI_BPC_6:
- pipe_config->pipe_bpp = 18;
- break;
- case TRANS_DDI_BPC_8:
- pipe_config->pipe_bpp = 24;
- break;
- case TRANS_DDI_BPC_10:
- pipe_config->pipe_bpp = 30;
- break;
- case TRANS_DDI_BPC_12:
- pipe_config->pipe_bpp = 36;
- break;
- default:
- break;
- }
- pipe_config->base.adjusted_mode.flags |= flags;
-
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- intel_dp_get_m_n(crtc, pipe_config);
- intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+ intel_ddi_get_config(&intel_dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@@ -570,13 +552,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
- intel_encoder->port = intel_dig_port->port;
+ intel_encoder->port = intel_dig_port->base.port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index de38d01..c8e9e44 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -147,7 +147,7 @@ struct bxt_ddi_phy_info {
*/
struct {
/**
- * @port: which port maps to this channel.
+ * @channel.port: which port maps to this channel.
*/
enum port port;
} channel[2];
@@ -466,21 +466,21 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
lockdep_assert_held(&dev_priv->power_domains.lock);
- if (rcomp_phy != -1) {
+ was_enabled = true;
+ if (rcomp_phy != -1)
was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
- /*
- * We need to copy the GRC calibration value from rcomp_phy,
- * so make sure it's powered up.
- */
- if (!was_enabled)
- _bxt_ddi_phy_init(dev_priv, rcomp_phy);
- }
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
_bxt_ddi_phy_init(dev_priv, phy);
- if (rcomp_phy != -1 && !was_enabled)
- bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
+ if (!was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@@ -567,8 +567,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_count)
+bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
{
switch (lane_count) {
case 1:
@@ -587,9 +586,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@@ -614,9 +612,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- enum port port = dport->port;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@@ -642,7 +639,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
@@ -734,11 +731,12 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
@@ -777,17 +775,16 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
- intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+ intel_dp_unused_lane_mask(crtc_state->lane_count);
u32 val;
/*
@@ -803,7 +800,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -833,7 +830,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
@@ -858,16 +855,15 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
int data, i, stagger;
u32 val;
@@ -878,16 +874,16 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
- if (intel_crtc->config->lane_count == 1)
+ if (crtc_state->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
@@ -896,13 +892,13 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
+ if (crtc_state->port_clock > 270000)
stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
+ else if (crtc_state->port_clock > 135000)
stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
+ else if (crtc_state->port_clock > 67500)
stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
+ else if (crtc_state->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
@@ -911,7 +907,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
@@ -924,7 +920,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
- if (intel_crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
@@ -934,7 +930,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
+ chv_data_lane_soft_reset(encoder, crtc_state, false);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -950,10 +946,11 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
}
}
-void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@@ -991,7 +988,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
@@ -1009,15 +1006,14 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->sb_lock);
@@ -1037,15 +1033,15 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@@ -1067,14 +1063,14 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index df808a9..51c5ae4 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -813,15 +813,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
-
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock);
-
- } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@@ -1369,15 +1365,13 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@@ -1388,7 +1382,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
- if (encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
@@ -1808,18 +1802,15 @@ bxt_get_dpll(struct intel_crtc *crtc,
{
struct intel_dpll_hw_state dpll_hw_state = { };
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
int i, clock = crtc_state->port_clock;
- if (encoder->type == INTEL_OUTPUT_HDMI &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
&dpll_hw_state))
return NULL;
- if ((encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP ||
- encoder->type == INTEL_OUTPUT_DP_MST) &&
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;
@@ -1828,15 +1819,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state = dpll_hw_state;
- if (encoder->type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
-
- intel_dig_port = intel_mst->primary;
- } else
- intel_dig_port = enc_to_dig_port(&encoder->base);
-
/* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id) intel_dig_port->port;
+ i = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
@@ -2008,8 +1992,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
@@ -2030,8 +2014,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/*
@@ -2055,8 +2039,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
@@ -2077,8 +2061,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
- * FIXME: (DVFS) is used to adjust the display voltage to match the
- * display clock frequencies
+ * Note: DVFS is actually handled via the cdclk code paths,
+ * hence we do nothing here.
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
@@ -2126,10 +2110,8 @@ out:
return ret;
}
-static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
- unsigned int *pdiv,
- unsigned int *qdiv,
- unsigned int *kdiv)
+static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
+ int *qdiv, int *kdiv)
{
/* even dividers */
if (bestdiv % 2 == 0) {
@@ -2167,10 +2149,12 @@ static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
}
}
-static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq,
- uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv,
- uint32_t kdiv)
+static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
+ u32 dco_freq, u32 ref_freq,
+ int pdiv, int qdiv, int kdiv)
{
+ u32 dco;
+
switch (kdiv) {
case 1:
params->kdiv = 1;
@@ -2202,39 +2186,35 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t
WARN(1, "Incorrect PDiv\n");
}
- if (kdiv != 2)
- qdiv = 1;
+ WARN_ON(kdiv != 2 && qdiv != 1);
params->qdiv_ratio = qdiv;
params->qdiv_mode = (qdiv == 1) ? 0 : 1;
- params->dco_integer = div_u64(dco_freq, ref_freq);
- params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) -
- ((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000);
+ dco = div_u64((u64)dco_freq << 15, ref_freq);
+
+ params->dco_integer = dco >> 15;
+ params->dco_fraction = dco & 0x7fff;
}
static bool
-cnl_ddi_calculate_wrpll(int clock /* in Hz */,
+cnl_ddi_calculate_wrpll(int clock,
struct drm_i915_private *dev_priv,
struct skl_wrpll_params *wrpll_params)
{
- uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */
- unsigned int dco_min = 7998 * KHz(1);
- unsigned int dco_max = 10000 * KHz(1);
- unsigned int dco_mid = (dco_min + dco_max) / 2;
-
+ u32 afe_clock = clock * 5;
+ u32 dco_min = 7998000;
+ u32 dco_max = 10000000;
+ u32 dco_mid = (dco_min + dco_max) / 2;
static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 24, 28, 30, 32, 36, 40,
42, 44, 48, 50, 52, 54, 56, 60,
64, 66, 68, 70, 72, 76, 78, 80,
84, 88, 90, 92, 96, 98, 100, 102,
3, 5, 7, 9, 15, 21 };
- unsigned int d, dco;
- unsigned int dco_centrality = 0;
- unsigned int best_dco_centrality = 999999;
- unsigned int best_div = 0;
- unsigned int best_dco = 0;
- unsigned int pdiv = 0, qdiv = 0, kdiv = 0;
+ u32 dco, best_dco = 0, dco_centrality = 0;
+ u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
+ int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
dco = afe_clock * dividers[d];
@@ -2271,7 +2251,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
- if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params))
+ if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2281,7 +2261,6 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq |
DPLL_CFGCR1_CENTRAL_FREQ;
memset(&crtc_state->dpll_hw_state, 0,
@@ -2345,15 +2324,13 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_DP_MST ||
- encoder->type == INTEL_OUTPUT_EDP) {
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@@ -2361,8 +2338,8 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
}
crtc_state->dpll_hw_state = dpll_hw_state;
} else {
- DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n",
- encoder->type);
+ DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
+ crtc_state->output_types);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5d77f75..d436858 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -41,22 +41,21 @@
#include <drm/drm_atomic.h>
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
- *
- * TODO: When modesetting has fully transitioned to atomic, the below
- * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
- * added.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, US, W) ({ \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
int ret__; \
+ might_sleep(); \
for (;;) { \
bool expired__ = time_after(jiffies, timeout__); \
+ OP; \
if (COND) { \
ret__ = 0; \
break; \
@@ -65,16 +64,16 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if ((W) && drm_can_sleep()) { \
- usleep_range((W), (W)*2); \
- } else { \
- cpu_relax(); \
- } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
@@ -123,7 +122,7 @@
int ret__; \
BUILD_BUG_ON(!__builtin_constant_p(US)); \
if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10); \
+ ret__ = _wait_for((COND), (US), 10, 10); \
else \
ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \
@@ -173,7 +172,7 @@ enum intel_output_type {
INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
- INTEL_OUTPUT_UNKNOWN = 10,
+ INTEL_OUTPUT_DDI = 10,
INTEL_OUTPUT_DP_MST = 11,
};
@@ -205,6 +204,7 @@ struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
struct i915_vma *vma;
+ unsigned long vma_flags;
async_cookie_t cookie;
int preferred_bpp;
};
@@ -215,7 +215,11 @@ struct intel_encoder {
enum intel_output_type type;
enum port port;
unsigned int cloneable;
- void (*hot_plug)(struct intel_encoder *);
+ bool (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector);
+ enum intel_output_type (*compute_output_type)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@@ -299,6 +303,80 @@ struct intel_panel {
} backlight;
};
+/*
+ * This structure serves as a translation layer between the generic HDCP code
+ * and the bus-specific code. What that means is that HDCP over HDMI differs
+ * from HDCP over DP, so to account for these differences, we need to
+ * communicate with the receiver through this shim.
+ *
+ * For completeness, the 2 buses differ in the following ways:
+ * - DP AUX vs. DDC
+ * HDCP registers on the receiver are set via DP AUX for DP, and
+ * they are set via DDC for HDMI.
+ * - Receiver register offsets
+ * The offsets of the registers are different for DP vs. HDMI
+ * - Receiver register masks/offsets
+ * For instance, the ready bit for the KSV fifo is in a different
+ * place on DP vs HDMI
+ * - Receiver register names
+ * Seriously. In the DP spec, the 16-bit register containing
+ * downstream information is called BINFO, on HDMI it's called
+ * BSTATUS. To confuse matters further, DP has a BSTATUS register
+ * with a completely different definition.
+ * - KSV FIFO
+ * On HDMI, the ksv fifo is read all at once, whereas on DP it must
+ * be read 3 keys at a time
+ * - Aksv output
+ * Since Aksv is hidden in hardware, there's different procedures
+ * to send it over DP AUX vs DDC
+ */
+struct intel_hdcp_shim {
+ /* Outputs the transmitter's An and Aksv values to the receiver. */
+ int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+
+ /* Reads the receiver's key selection vector */
+ int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+
+ /*
+ * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
+ * definitions are the same in the respective specs, but the names are
+ * different. Call it BSTATUS since that's the name the HDMI spec
+ * uses and it was there first.
+ */
+ int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus);
+
+ /* Determines whether a repeater is present downstream */
+ int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present);
+
+ /* Reads the receiver's Ri' value */
+ int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+
+ /* Determines if the receiver's KSV FIFO is ready for consumption */
+ int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready);
+
+ /* Reads the ksv fifo for num_downstream devices */
+ int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo);
+
+ /* Reads a 32-bit part of V' from the receiver */
+ int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part);
+
+ /* Enables HDCP signalling on the port */
+ int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+ bool enable);
+
+ /* Ensures the link is still protected */
+ bool (*check_link)(struct intel_digital_port *intel_dig_port);
+
+ /* Detects panel's hdcp capability. This is optional for HDMI. */
+ int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
+ bool *hdcp_capable);
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -330,6 +408,12 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
+
+ const struct intel_hdcp_shim *hdcp_shim;
+ struct mutex hdcp_mutex;
+ uint64_t hdcp_value; /* protected by hdcp_mutex */
+ struct delayed_work hdcp_check_work;
+ struct work_struct hdcp_prop_work;
};
struct intel_digital_connector_state {
@@ -386,6 +470,8 @@ struct intel_atomic_state {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -405,8 +491,9 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
- struct drm_rect clip;
struct i915_vma *vma;
+ unsigned long flags;
+#define PLANE_HAS_FENCE BIT(0)
struct {
u32 offset;
@@ -420,6 +507,9 @@ struct intel_plane_state {
/* plane control register */
u32 ctl;
+ /* plane color control register */
+ u32 color_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -738,6 +828,9 @@ struct intel_crtc_state {
*/
uint8_t lane_lat_optim_mask;
+ /* minimum acceptable voltage level */
+ u8 min_voltage_level;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
@@ -795,7 +888,6 @@ struct intel_crtc_state {
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
- enum plane plane;
/*
* Whether the crtc and the connected output pipeline is active. Implies
* that crtc->enabled is set, i.e. the current mode configuration has
@@ -840,10 +932,11 @@ struct intel_crtc {
struct intel_plane {
struct drm_plane base;
- u8 plane;
+ enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
bool can_scale;
+ bool has_fbc;
int max_downscale;
uint32_t frontbuffer_bit;
@@ -950,17 +1043,16 @@ struct intel_dp_compliance {
struct intel_dp {
i915_reg_t output_reg;
- i915_reg_t aux_ch_ctl_reg;
- i915_reg_t aux_ch_data_reg[5];
uint32_t DP;
int link_rate;
uint8_t lane_count;
uint8_t sink_count;
bool link_mst;
+ bool link_trained;
bool has_audio;
bool detect_done;
- bool channel_eq_status;
bool reset_link_params;
+ enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
@@ -1035,6 +1127,9 @@ struct intel_dp {
int send_bytes,
uint32_t aux_clock_divider);
+ i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
+ i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
+
/* This is called before a link training is starterd */
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
@@ -1049,7 +1144,6 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
- enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
@@ -1081,7 +1175,7 @@ struct intel_dp_mst_encoder {
static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
@@ -1095,7 +1189,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
static inline enum dpio_phy
vlv_dport_to_phy(struct intel_digital_port *dport)
{
- switch (dport->port) {
+ switch (dport->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
@@ -1127,7 +1221,7 @@ intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
}
static inline struct intel_crtc *
-intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
+intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane)
{
return dev_priv->plane_to_crtc_mapping[plane];
}
@@ -1148,7 +1242,7 @@ enc_to_dig_port(struct drm_encoder *encoder)
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
switch (intel_encoder->type) {
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
@@ -1272,7 +1366,6 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
@@ -1284,18 +1377,18 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int plane, unsigned int height);
@@ -1305,7 +1398,9 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
@@ -1319,14 +1414,20 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv);
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void icl_init_cdclk(struct drm_i915_private *dev_priv);
+void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -1363,8 +1464,8 @@ struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
-int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
static inline bool
@@ -1409,8 +1510,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_vma(struct i915_vma *vma);
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation,
+ bool uses_fence,
+ unsigned long *out_flags);
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
@@ -1478,8 +1582,9 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
-void hsw_enable_ips(struct intel_crtc *crtc);
-void hsw_disable_ips(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
@@ -1492,11 +1597,15 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+u32 glk_color_ctl(const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */
@@ -1518,11 +1627,14 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+int intel_dp_sink_crc(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1578,8 +1690,7 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port);
+bool intel_digital_port_connected(struct intel_encoder *encoder);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1597,7 +1708,8 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector);
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -1641,7 +1753,7 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
/* intel_fbc.c */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct drm_atomic_state *state);
+ struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
@@ -1746,8 +1858,20 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+/* intel_hdcp.c */
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+int intel_hdcp_check_link(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
/* intel_psr.c */
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
@@ -1869,14 +1993,12 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
- struct intel_rps_client *rps);
+void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1897,15 +2019,10 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *ddb,
int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-static inline int intel_rc6_enabled(void)
-{
- return i915_modparams.enable_rc6;
-}
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -1913,12 +2030,13 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
/* intel_sprite.c */
+bool intel_format_is_yuv(u32 format);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
-int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void skl_update_plane(struct intel_plane *plane,
@@ -1926,6 +2044,8 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 83f1584..51a1d68 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -662,11 +662,11 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
}
}
-static void intel_dsi_port_enable(struct intel_encoder *encoder)
+static void intel_dsi_port_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -705,7 +705,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
- temp |= intel_crtc->pipe ?
+ temp |= crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
@@ -875,7 +875,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
- intel_dsi_port_enable(encoder);
+ intel_dsi_port_enable(encoder, pipe_config);
}
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -1082,7 +1082,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
@@ -1093,8 +1093,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
- intel_crtc = to_intel_crtc(encoder->base.crtc);
- adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
+ adjusted_mode_sw = &crtc->config->base.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -1243,6 +1242,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+
if (IS_GEN9_LP(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
@@ -1265,11 +1266,6 @@ intel_dsi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_KMS("\n");
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
- return MODE_NO_DBLESCAN;
- }
-
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -1665,6 +1661,27 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
+static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ enum i9xx_plane_id plane;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (connector->encoder->crtc_mask == BIT(PIPE_B))
+ plane = PLANE_B;
+ else
+ plane = PLANE_A;
+
+ val = I915_READ(DSPCNTR(plane));
+ if (val & DISPPLANE_ROTATE_180)
+ orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ }
+
+ return orientation;
+}
+
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1680,6 +1697,13 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
allowed_scalers);
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ connector->base.display_info.panel_orientation =
+ intel_dsi_get_panel_orientation(connector);
+ drm_connector_init_panel_orientation_property(
+ &connector->base,
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 53c9b76..eb0c559b2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -159,6 +159,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
+
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -217,9 +219,6 @@ intel_dvo_mode_valid(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
/* XXX: Validate clock range */
if (fixed_mode) {
@@ -246,7 +245,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- /* If we have timings from the BIOS for the panel, put them in
+ /*
+ * If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
@@ -294,11 +294,6 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
I915_WRITE(dvo_reg, dvo_val);
}
-/**
- * Detect the output connection on our DVO device.
- *
- * Unimplemented.
- */
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
@@ -314,7 +309,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
- /* We should probably have an i2c driver get_modes function for those
+ /*
+ * We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
@@ -372,7 +368,7 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-/**
+/*
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
* Other chips with DVO LVDS will need to extend this to deal with the LVDS
@@ -444,7 +440,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
uint32_t dpll[I915_MAX_PIPES];
enum port port;
- /* Allow the I2C driver info to specify the GPIO to be used in
+ /*
+ * Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
@@ -455,7 +452,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
else
gpio = GMBUS_PIN_DPB;
- /* Set up the I2C bus necessary for the chip we're probing.
+ /*
+ * Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
@@ -463,12 +461,14 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_dvo->dev = *dvo;
- /* GMBUS NAK handling seems to be unstable, hence let the
+ /*
+ * GMBUS NAK handling seems to be unstable, hence let the
* transmitter detection run in bit banging mode for now.
*/
intel_gmbus_force_bit(i2c, true);
- /* ns2501 requires the DVO 2x clock before it will
+ /*
+ * ns2501 requires the DVO 2x clock before it will
* respond to i2c accesses, so make sure we have
* have the clock enabled before we attempt to
* initialize the device.
@@ -526,7 +526,8 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
- /* For our LVDS chipsets, we should hopefully be able
+ /*
+ * For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
* However, it's in a different format from the BIOS
* data on chipsets with integrated LVDS (stored in AIM
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6074e04d..4ba139c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -37,12 +37,12 @@
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
-/* Same as Haswell, but 72064 bytes now. */
-#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
+#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
@@ -50,6 +50,8 @@ struct engine_class_info {
const char *name;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
+
+ u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
@@ -57,21 +59,25 @@ static const struct engine_class_info intel_engine_classes[] = {
.name = "rcs",
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_vebox_ring_buffer,
+ .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
@@ -117,6 +123,22 @@ static const struct engine_info intel_engines[] = {
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
},
+ [VCS3] = {
+ .hw_id = VCS3_HW,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 2,
+ .mmio_base = GEN11_BSD3_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
+ [VCS4] = {
+ .hw_id = VCS4_HW,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 3,
+ .mmio_base = GEN11_BSD4_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
[VECS] = {
.hw_id = VECS_HW,
.uabi_id = I915_EXEC_VEBOX,
@@ -125,6 +147,14 @@ static const struct engine_info intel_engines[] = {
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
},
+ [VECS2] = {
+ .hw_id = VECS2_HW,
+ .uabi_id = I915_EXEC_VEBOX,
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 1,
+ .mmio_base = GEN11_VEBOX2_RING_BASE,
+ .irq_shift = 0, /* not used */
+ },
};
/**
@@ -153,14 +183,15 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
switch (INTEL_GEN(dev_priv)) {
default:
MISSING_CASE(INTEL_GEN(dev_priv));
+ return DEFAULT_LR_CONTEXT_RENDER_SIZE;
+ case 11:
+ return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10:
return GEN10_LR_CONTEXT_RENDER_SIZE;
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
- return i915_modparams.enable_execlists ?
- GEN8_LR_CONTEXT_RENDER_SIZE :
- GEN8_CXT_TOTAL_SIZE;
+ return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE;
@@ -203,6 +234,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
class_info = &intel_engine_classes[info->class];
+ BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
+ BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+
+ if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ return -EINVAL;
+
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
@@ -213,13 +256,33 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
class_info->name, info->instance) >=
sizeof(engine->name));
- engine->uabi_id = info->uabi_id;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = info->mmio_base;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ switch (engine->id) {
+ case VCS:
+ engine->mmio_base = GEN11_BSD_RING_BASE;
+ break;
+ case VCS2:
+ engine->mmio_base = GEN11_BSD2_RING_BASE;
+ break;
+ case VECS:
+ engine->mmio_base = GEN11_VEBOX_RING_BASE;
+ break;
+ default:
+ /* take the original value for all other engines */
+ engine->mmio_base = info->mmio_base;
+ break;
+ }
+ } else {
+ engine->mmio_base = info->mmio_base;
+ }
engine->irq_shift = info->irq_shift;
engine->class = info->class;
engine->instance = info->instance;
+ engine->uabi_id = info->uabi_id;
+ engine->uabi_class = class_info->uabi_class;
+
engine->context_size = __intel_engine_context_size(dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
@@ -228,8 +291,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ spin_lock_init(&engine->stats.lock);
+
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine;
return 0;
}
@@ -281,6 +347,8 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->num_rings = hweight32(mask);
+ i915_check_and_clear_faults(dev_priv);
+
return 0;
cleanup:
@@ -306,7 +374,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
- if (i915_modparams.enable_execlists)
+ if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
@@ -356,18 +424,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
if (HAS_VEBOX(dev_priv))
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
}
- if (dev_priv->semaphore) {
- struct page *page = i915_vma_first_page(dev_priv->semaphore);
- void *semaphores;
-
- /* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap_atomic(page);
- memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
- 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
- I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap_atomic(semaphores);
- }
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
@@ -412,6 +468,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+ execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT;
execlists->first = NULL;
}
@@ -620,7 +677,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+ if (engine->i915->preempt_context) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
@@ -633,25 +690,19 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
- ret = i915_gem_render_state_init(engine);
- if (ret)
- goto err_breadcrumbs;
-
if (HWS_NEEDS_PHYSICAL(engine->i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
if (ret)
- goto err_rs_fini;
+ goto err_breadcrumbs;
return 0;
-err_rs_fini:
- i915_gem_render_state_fini(engine);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
@@ -674,17 +725,19 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
else
cleanup_status_page(engine);
- i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ if (engine->default_state)
+ i915_gem_object_put(engine->default_state);
+
+ if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
@@ -700,7 +753,7 @@ u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
return acthd;
}
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
@@ -1014,22 +1067,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_DG_MIRROR_FIX_ENABLE);
-
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
- /*
- * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
- * but we do that in per ctx batchbuffer as there is an issue
- * with this register not getting restored on ctx restore
- */
- }
-
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
@@ -1045,11 +1082,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
- /* WaDisableMaskBasedCammingInRCC:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
- PIXEL_MASK_CAMMING_DISABLE);
-
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
@@ -1079,14 +1111,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ IS_COFFEELAKE(dev_priv))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(dev_priv)) {
+ u32 val = I915_READ(GEN8_L3SQCREG1);
+
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+ I915_WRITE(GEN8_L3SQCREG1, val);
+ }
+
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
@@ -1210,66 +1250,22 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
- /* WaStoreMultiplePTEenable:bxt */
- /* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
-
- /* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
- }
-
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisablePooledEuLoadBalancingFix:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
- }
-
- /* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
- }
-
- /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
- /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
- /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
- /* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
- }
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
/* WaToEnableHwFixForPushConstHWBug:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaInPlaceDecompressionHang:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
return 0;
}
@@ -1327,6 +1323,9 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WaDisableEarlyEOT:cnl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
return 0;
}
@@ -1441,7 +1440,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
int err;
- WARN_ON(engine->id != RCS);
+ if (GEM_WARN_ON(engine->id != RCS))
+ return -EINVAL;
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
@@ -1472,20 +1472,20 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
return 0;
}
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+int intel_ring_workarounds_emit(struct i915_request *rq)
{
- struct i915_workarounds *w = &req->i915->workarounds;
+ struct i915_workarounds *w = &rq->i915->workarounds;
u32 *cs;
int ret, i;
if (w->count == 0)
return 0;
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(req, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, w->count * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1496,9 +1496,9 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
@@ -1510,7 +1510,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
bool idle = true;
- intel_runtime_pm_get(dev_priv);
+ /* If the whole device is asleep, the engine must be idle */
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return true;
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1549,10 +1551,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
return true;
- /* Interrupt/tasklet pending? */
- if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
- return false;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active))
return false;
@@ -1573,10 +1571,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- if (READ_ONCE(dev_priv->gt.active_requests))
- return false;
-
- /* If the driver is wedged, HW state may be very inconsistent and
+ /*
+ * If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -1590,6 +1586,34 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
+/**
+ * intel_engine_has_kernel_context:
+ * @engine: the engine
+ *
+ * Returns true if the last context to be executed on this engine, or has been
+ * executed if the engine is already idle, is the kernel context
+ * (#i915.kernel_context).
+ */
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
+{
+ const struct i915_gem_context * const kernel_context =
+ engine->i915->kernel_context;
+ struct i915_request *rq;
+
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ /*
+ * Check the last context seen by the engine. If active, it will be
+ * the last request that remains in the timeline. When idle, it is
+ * the last executed context as tracked by retirement.
+ */
+ rq = __i915_gem_active_peek(&engine->timeline->last_request);
+ if (rq)
+ return rq->ctx == kernel_context;
+ else
+ return engine->last_retired_context == kernel_context;
+}
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -1599,19 +1623,63 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
-void intel_engines_mark_idle(struct drm_i915_private *i915)
+/**
+ * intel_engines_park: called when the GT is transitioning from busy->idle
+ * @i915: the i915 device
+ *
+ * The GT is now idle and about to go to sleep (maybe never to wake again?).
+ * Time for us to tidy and put away our toys (release resources back to the
+ * system).
+ */
+void intel_engines_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
+ /* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine);
+ tasklet_kill(&engine->execlists.tasklet);
+
+ /*
+ * We are committed now to parking the engines, make sure there
+ * will be no more interrupts arriving later and the engines
+ * are truly idle.
+ */
+ if (wait_for(intel_engine_is_idle(engine), 10)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ dev_err(i915->drm.dev,
+ "%s is not idle before parking\n",
+ engine->name);
+ intel_engine_dump(engine, &p, NULL);
+ }
+
+ if (engine->park)
+ engine->park(engine);
+
i915_gem_batch_pool_fini(&engine->batch_pool);
- tasklet_kill(&engine->execlists.irq_tasklet);
engine->execlists.no_priolist = false;
}
}
+/**
+ * intel_engines_unpark: called when the GT is transitioning from idle->busy
+ * @i915: the i915 device
+ *
+ * The GT was idle and now about to fire up with some new user requests.
+ */
+void intel_engines_unpark(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (engine->unpark)
+ engine->unpark(engine);
+ }
+}
+
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1627,77 +1695,98 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int which;
+
+ which = 0;
+ for_each_engine(engine, i915, id)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
+
static void print_request(struct drm_printer *m,
- struct drm_i915_gem_request *rq,
+ struct i915_request *rq,
const char *prefix)
{
- drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno,
- i915_gem_request_completed(rq) ? "!" : "",
- rq->ctx->hw_id, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" : "",
+ rq->fence.context, rq->fence.seqno,
rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name);
}
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
- struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_request *rq;
- struct rb_node *rb;
- u64 addr;
-
- drm_printf(m, "%s\n", engine->name);
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
- drm_printf(m, "\tReset count: %d\n",
- i915_reset_engine_count(error, engine));
-
- rcu_read_lock();
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ drm_printf(m, "*\n");
+ skip = true;
+ }
+ continue;
+ }
- drm_printf(m, "\tRequests:\n");
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ drm_printf(m, "%08zx %s\n", pos, line);
- rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tfirst ");
+ prev = buf + pos;
+ skip = false;
+ }
+}
- rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tlast ");
+static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+ struct drm_printer *m)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const struct intel_engine_execlists * const execlists =
+ &engine->execlists;
+ u64 addr;
- rq = i915_gem_find_active_request(engine);
- if (rq) {
- print_request(m, rq, "\t\tactive ");
- drm_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ drm_printf(m, "\tRING_START: 0x%08x\n",
+ I915_READ(RING_START(engine->mmio_base)));
+ drm_printf(m, "\tRING_HEAD: 0x%08x\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ drm_printf(m, "\tRING_TAIL: 0x%08x\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
+ drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ if (INTEL_GEN(engine->i915) > 2) {
+ drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
+ I915_READ(RING_MI_MODE(engine->mmio_base)),
+ I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
}
- drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
- I915_READ(RING_START(engine->mmio_base)),
- rq ? i915_ggtt_offset(rq->ring->vma) : 0);
- drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
- rq ? rq->ring->head : 0);
- drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
- rq ? rq->ring->tail : 0);
- drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+ if (INTEL_GEN(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ }
- rcu_read_unlock();
+ if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
+ drm_printf(m, "\tSYNC_0: 0x%08x\n",
+ I915_READ(RING_SYNC_0(engine->mmio_base)));
+ drm_printf(m, "\tSYNC_1: 0x%08x\n",
+ I915_READ(RING_SYNC_1(engine->mmio_base)));
+ if (HAS_VEBOX(dev_priv))
+ drm_printf(m, "\tSYNC_2: 0x%08x\n",
+ I915_READ(RING_SYNC_2(engine->mmio_base)));
+ }
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
@@ -1705,8 +1794,26 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 8)
+ addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
+ RING_DMA_FADD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ else
+ addr = I915_READ(DMA_FADD_I8XX);
+ drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 4) {
+ drm_printf(m, "\tIPEIR: 0x%08x\n",
+ I915_READ(RING_IPEIR(engine->mmio_base)));
+ drm_printf(m, "\tIPEHR: 0x%08x\n",
+ I915_READ(RING_IPEHR(engine->mmio_base)));
+ } else {
+ drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ }
- if (i915_modparams.enable_execlists) {
+ if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
u32 ptr, read, write;
unsigned int idx;
@@ -1742,16 +1849,19 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
- drm_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
- print_request(m, rq, "rq: ");
+ char hdr[80];
+
+ snprintf(hdr, sizeof(hdr),
+ "\t\tELSP[%d] count=%d, rq: ",
+ idx, count);
+ print_request(m, rq, hdr);
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n",
- idx);
+ drm_printf(m, "\t\tELSP[%d] idle\n", idx);
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
@@ -1764,10 +1874,82 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
I915_READ(RING_PP_DIR_DCLV(engine)));
}
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
+{
+ struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct i915_request *rq;
+ struct rb_node *rb;
+
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (i915_terminally_wedged(&engine->i915->gpu_error))
+ drm_printf(m, "*** WEDGED ***\n");
+
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
+
+ rcu_read_lock();
+
+ drm_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct i915_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct i915_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ drm_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ drm_printf(m, "\t\tring->start: 0x%08x\n",
+ i915_ggtt_offset(rq->ring->vma));
+ drm_printf(m, "\t\tring->head: 0x%08x\n",
+ rq->ring->head);
+ drm_printf(m, "\t\tring->tail: 0x%08x\n",
+ rq->ring->tail);
+ }
+
+ rcu_read_unlock();
+
+ if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+ intel_engine_print_registers(engine, m);
+ intel_runtime_pm_put(engine->i915);
+ } else {
+ drm_printf(m, "\tDevice is asleep; skipping register dump\n");
+ }
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link)
print_request(m, rq, "\t\tE ");
+ drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
@@ -1786,7 +1968,143 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
}
spin_unlock_irq(&b->rb_lock);
- drm_printf(m, "\n");
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ engine->irq_posted,
+ yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)),
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
+
+ drm_printf(m, "HWSP:\n");
+ hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
+
+ drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
+}
+
+static u8 user_class_map[] = {
+ [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
+ [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
+};
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ if (class >= ARRAY_SIZE(user_class_map))
+ return NULL;
+
+ class = user_class_map[class];
+
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS);
+
+ if (instance > MAX_ENGINE_INSTANCE)
+ return NULL;
+
+ return i915->engine_class[class][instance];
+}
+
+/**
+ * intel_enable_engine_stats() - Enable engine busy tracking on engine
+ * @engine: engine to enable stats collection
+ *
+ * Start collecting the engine busyness data for @engine.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int intel_enable_engine_stats(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ unsigned long flags;
+ int err = 0;
+
+ if (!intel_engine_supports_stats(engine))
+ return -ENODEV;
+
+ tasklet_disable(&execlists->tasklet);
+ spin_lock_irqsave(&engine->stats.lock, flags);
+
+ if (unlikely(engine->stats.enabled == ~0)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (engine->stats.enabled++ == 0) {
+ const struct execlist_port *port = execlists->port;
+ unsigned int num_ports = execlists_num_ports(execlists);
+
+ engine->stats.enabled_at = ktime_get();
+
+ /* XXX submission method oblivious? */
+ while (num_ports-- && port_isset(port)) {
+ engine->stats.active++;
+ port++;
+ }
+
+ if (engine->stats.active)
+ engine->stats.start = engine->stats.enabled_at;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+ tasklet_enable(&execlists->tasklet);
+
+ return err;
+}
+
+static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
+{
+ ktime_t total = engine->stats.total;
+
+ /*
+ * If the engine is executing something at the moment
+ * add it to the total.
+ */
+ if (engine->stats.active)
+ total = ktime_add(total,
+ ktime_sub(ktime_get(), engine->stats.start));
+
+ return total;
+}
+
+/**
+ * intel_engine_get_busy_time() - Return current accumulated engine busyness
+ * @engine: engine to report on
+ *
+ * Returns accumulated time @engine was busy since engine stats were enabled.
+ */
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
+{
+ ktime_t total;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+ total = __intel_engine_get_busy_time(engine);
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+
+ return total;
+}
+
+/**
+ * intel_disable_engine_stats() - Disable engine busy tracking on engine
+ * @engine: engine to disable stats collection
+ *
+ * Stops collecting the engine busyness data for @engine.
+ */
+void intel_disable_engine_stats(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (!intel_engine_supports_stats(engine))
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+ WARN_ON_ONCE(engine->stats.enabled == 0);
+ if (--engine->stats.enabled == 0) {
+ engine->stats.total = __intel_engine_get_busy_time(engine);
+ engine->stats.active = 0;
+ }
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 1a0f5e0..707d49c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -46,16 +46,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
return HAS_FBC(dev_priv);
}
-static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
-{
- return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8;
-}
-
-static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
-{
- return INTEL_GEN(dev_priv) < 4;
-}
-
static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
{
return INTEL_GEN(dev_priv) <= 3;
@@ -151,7 +141,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
+ fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
@@ -177,13 +167,13 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
if (params->fb.format->cpp[0] == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
@@ -224,7 +214,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
- dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fb.format->cpp[0] == 2)
threshold++;
@@ -241,7 +231,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
dpfc_ctl |= params->vma->fence->id;
@@ -306,7 +296,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
- dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
+ dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fb.format->cpp[0] == 2)
threshold++;
@@ -324,7 +314,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->vma->fence) {
+ if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
@@ -492,7 +482,8 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
schedule_work(&work->work);
}
-static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
+ const char *reason)
{
struct intel_fbc *fbc = &dev_priv->fbc;
@@ -505,6 +496,8 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
+
+ fbc->no_fbc_reason = reason;
}
static bool multiple_pipes_ok(struct intel_crtc *crtc,
@@ -531,7 +524,6 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
@@ -541,7 +533,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
- end = ggtt->stolen_size - 8 * 1024 * 1024;
+ end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -615,10 +607,16 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->compressed_llb = compressed_llb;
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + fbc->compressed_fb.start);
+ dev_priv->dsm.start + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
- dev_priv->mm.stolen_base + compressed_llb->start);
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -663,11 +661,13 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
- /* These should have been caught earlier. */
- WARN_ON(stride < 512);
- WARN_ON((stride & (64 - 1)) != 0);
+ /* This should have been caught earlier. */
+ if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+ return false;
/* Below are the additional FBC restrictions. */
+ if (stride < 512)
+ return false;
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
return stride == 4096 || stride == 8192;
@@ -743,6 +743,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct drm_framebuffer *fb = plane_state->base.fb;
cache->vma = NULL;
+ cache->flags = 0;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -768,6 +769,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.stride = fb->pitches[0];
cache->vma = plane_state->vma;
+ cache->flags = plane_state->flags;
+ if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
+ cache->flags &= ~PLANE_HAS_FENCE;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -789,8 +793,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
- (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
+ if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
fbc->no_fbc_reason = "incompatible mode";
return false;
}
@@ -806,8 +809,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* Note that is possible for a tiled surface to be unmappable (and
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
+ *
+ * FIXME with 90/270 degree rotation we should use the fence on
+ * the normal GTT view (the rotated view doesn't even have a
+ * fence). Would need changes to the FBC fence Y offset as well.
+ * For now this will effecively disable FBC with 90/270 degree
+ * rotation.
*/
- if (!cache->vma->fence) {
+ if (!(cache->flags & PLANE_HAS_FENCE)) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -850,6 +859,17 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ /*
+ * Work around a problem on GEN9+ HW, where enabling FBC on a plane
+ * having a Y offset that isn't divisible by 4 causes FIFO underrun
+ * and screen flicker.
+ */
+ if (IS_GEN(dev_priv, 9, 10) &&
+ (fbc->state_cache.plane.adjusted_y & 3)) {
+ fbc->no_fbc_reason = "plane Y offset is misaligned";
+ return false;
+ }
+
return true;
}
@@ -888,9 +908,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
memset(params, 0, sizeof(*params));
params->vma = cache->vma;
+ params->flags = cache->flags;
params->crtc.pipe = crtc->pipe;
- params->crtc.plane = crtc->plane;
+ params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
@@ -916,6 +937,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
+ const char *reason = "update pending";
if (!fbc_supported(dev_priv))
return;
@@ -923,7 +945,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
mutex_lock(&fbc->lock);
if (!multiple_pipes_ok(crtc, plane_state)) {
- fbc->no_fbc_reason = "more than one pipe active";
+ reason = "more than one pipe active";
goto deactivate;
}
@@ -933,11 +955,35 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
deactivate:
- intel_fbc_deactivate(dev_priv);
+ intel_fbc_deactivate(dev_priv, reason);
unlock:
mutex_unlock(&fbc->lock);
}
+/**
+ * __intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This is the low level function that actually disables FBC. Callers should
+ * grab the FBC lock.
+ */
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_crtc *crtc = fbc->crtc;
+
+ WARN_ON(!mutex_is_locked(&fbc->lock));
+ WARN_ON(!fbc->enabled);
+ WARN_ON(fbc->active);
+
+ DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
+
+ __intel_fbc_cleanup_cfb(dev_priv);
+
+ fbc->enabled = false;
+ fbc->crtc = NULL;
+}
+
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -949,6 +995,13 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!fbc->enabled || fbc->crtc != crtc)
return;
+ if (!i915_modparams.enable_fbc) {
+ intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
+ __intel_fbc_disable(dev_priv);
+
+ return;
+ }
+
if (!intel_fbc_can_activate(crtc)) {
WARN_ON(fbc->active);
return;
@@ -966,9 +1019,8 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
intel_fbc_reg_params_equal(&old_params, &fbc->params))
return;
- intel_fbc_deactivate(dev_priv);
+ intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
intel_fbc_schedule_activation(crtc);
- fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
}
void intel_fbc_post_update(struct intel_crtc *crtc)
@@ -1009,7 +1061,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
if (fbc->enabled && fbc->busy_bits)
- intel_fbc_deactivate(dev_priv);
+ intel_fbc_deactivate(dev_priv, "frontbuffer write");
mutex_unlock(&fbc->lock);
}
@@ -1054,11 +1106,11 @@ out:
* enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
*/
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct drm_atomic_state *state)
+ struct intel_atomic_state *state)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
bool crtc_chosen = false;
int i;
@@ -1066,7 +1118,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
/* Does this atomic commit involve the CRTC currently tied to FBC? */
if (fbc->crtc &&
- !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base))
+ !intel_atomic_get_new_crtc_state(state, fbc->crtc))
goto out;
if (!intel_fbc_can_enable(dev_priv))
@@ -1076,25 +1128,19 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* plane. We could go for fancier schemes such as checking the plane
* size, but this would just affect the few platforms that don't tie FBC
* to pipe or plane A. */
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
- struct intel_crtc_state *intel_crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
- if (!intel_plane_state->base.visible)
+ if (!plane->has_fbc)
continue;
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
+ if (!plane_state->base.visible)
continue;
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
- continue;
-
- intel_crtc_state = to_intel_crtc_state(
- drm_atomic_get_existing_crtc_state(state, &crtc->base));
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- intel_crtc_state->enable_fbc = true;
+ crtc_state->enable_fbc = true;
crtc_chosen = true;
break;
}
@@ -1160,31 +1206,6 @@ out:
}
/**
- * __intel_fbc_disable - disable FBC
- * @dev_priv: i915 device instance
- *
- * This is the low level function that actually disables FBC. Callers should
- * grab the FBC lock.
- */
-static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_crtc *crtc = fbc->crtc;
-
- WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->enabled);
- WARN_ON(fbc->active);
- WARN_ON(crtc->active);
-
- DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-
- __intel_fbc_cleanup_cfb(dev_priv);
-
- fbc->enabled = false;
- fbc->crtc = NULL;
-}
-
-/**
* intel_fbc_disable - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
@@ -1198,6 +1219,8 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (!fbc_supported(dev_priv))
return;
+ WARN_ON(crtc->active);
+
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
@@ -1220,8 +1243,10 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled)
+ if (fbc->enabled) {
+ WARN_ON(fbc->crtc->active);
__intel_fbc_disable(dev_priv);
+ }
mutex_unlock(&fbc->lock);
cancel_work_sync(&fbc->work.work);
@@ -1242,7 +1267,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true;
- intel_fbc_deactivate(dev_priv);
+ intel_fbc_deactivate(dev_priv, "FIFO underrun");
out:
mutex_unlock(&fbc->lock);
}
@@ -1346,7 +1371,6 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- enum pipe pipe;
INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -1367,14 +1391,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
return;
}
- for_each_pipe(dev_priv, pipe) {
- fbc->possible_framebuffer_bits |=
- INTEL_FRONTBUFFER_PRIMARY(pipe);
-
- if (fbc_on_pipe_a_only(dev_priv))
- break;
- }
-
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ea96682..6f12adc 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -48,7 +48,8 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
struct drm_i915_gem_object *obj = ifbdev->fb->obj;
- unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU;
+ unsigned int origin =
+ ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
intel_fb_obj_invalidate(obj, origin);
}
@@ -115,7 +116,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
@@ -139,7 +139,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
obj = NULL;
- if (size * 2 < ggtt->stolen_usable_size)
+ if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
obj = i915_gem_object_create(dev_priv, size);
@@ -178,6 +178,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
+ unsigned long flags = 0;
bool prealloc = false;
void __iomem *vaddr;
int ret;
@@ -212,7 +213,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0);
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
+ DRM_MODE_ROTATE_0,
+ false, &flags);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;
@@ -269,6 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
+ ifbdev->vma_flags = flags;
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -276,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
return 0;
out_unpin:
- intel_unpin_fb_vma(vma);
+ intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -514,7 +518,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->vma) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
- intel_unpin_fb_vma(ifbdev->vma);
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index fcfc217..3a8d3d0 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -79,6 +79,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
spin_unlock(&dev_priv->fb_tracking.lock);
}
+ might_sleep();
intel_psr_invalidate(dev_priv, frontbuffer_bits);
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
@@ -108,6 +109,7 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
if (!frontbuffer_bits)
return;
+ might_sleep();
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 10037c0..ff08ea0 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -23,6 +23,8 @@
*/
#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
static void gen8_guc_raise_irq(struct intel_guc *guc)
@@ -60,13 +62,144 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
+ intel_guc_log_init_early(guc);
mutex_init(&guc->send_mutex);
guc->send = intel_guc_send_nop;
guc->notify = gen8_guc_raise_irq;
}
+int intel_guc_init_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq) {
+ DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Even though both sending GuC action, and adding a new workitem to
+ * GuC workqueue are serialized (each with its own locking), since
+ * we're using mutliple engines, it's possible that we're going to
+ * issue a preempt request with two (or more - each for different
+ * engine) workitems in GuC queue. In this situation, GuC may submit
+ * all of them, which will make us very confused.
+ * Our preemption contexts may even already be complete - before we
+ * even had the chance to sent the preempt action to GuC!. Rather
+ * than introducing yet another lock, we can just use ordered workqueue
+ * to make sure we're always sending a single preemption request with a
+ * single workitem.
+ */
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv)) {
+ guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
+ WQ_HIGHPRI);
+ if (!guc->preempt_wq) {
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ DRM_ERROR("Couldn't allocate workqueue for GuC "
+ "preemption\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_fini_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv))
+ destroy_workqueue(guc->preempt_wq);
+
+ destroy_workqueue(guc->log.runtime.flush_wq);
+}
+
+static int guc_shared_data_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->shared_data = vma;
+ guc->shared_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void guc_shared_data_destroy(struct intel_guc *guc)
+{
+ i915_gem_object_unpin_map(guc->shared_data->obj);
+ i915_vma_unpin_and_release(&guc->shared_data);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(!guc->shared_data);
+
+ ret = intel_guc_log_create(guc);
+ if (ret)
+ goto err_shared;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_log;
+ GEM_BUG_ON(!guc->ads_vma);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ return 0;
+
+err_log:
+ intel_guc_log_destroy(guc);
+err_shared:
+ guc_shared_data_destroy(guc);
+ return ret;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ i915_ggtt_disable_guc(dev_priv);
+ intel_guc_ads_destroy(guc);
+ intel_guc_log_destroy(guc);
+ guc_shared_data_destroy(guc);
+}
+
static u32 get_gt_type(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -87,6 +220,19 @@ static u32 get_core_family(struct drm_i915_private *dev_priv)
}
}
+static u32 get_log_verbosity_flags(void)
+{
+ if (i915_modparams.guc_log_level > 0) {
+ u32 verbosity = i915_modparams.guc_log_level - 1;
+
+ GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
+ return verbosity << GUC_LOG_VERBOSITY_SHIFT;
+ }
+
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return GUC_LOG_DISABLED;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -119,15 +265,10 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
- if (i915_modparams.guc_log_level >= 0) {
- params[GUC_CTL_DEBUG] =
- i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- } else {
- params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
- }
+ params[GUC_CTL_DEBUG] = get_log_verbosity_flags();
/* If GuC submission is enabled, set up additional parameters here */
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
@@ -229,9 +370,8 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_rc6_enabled() ||
- NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ /* WaRsDisableCoarsePowerGating:skl,cnl */
+ if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
action[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -263,54 +403,53 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
/**
* intel_guc_suspend() - notify GuC entering suspend state
- * @dev_priv: i915 device private
+ * @guc: the guc
*/
-int intel_guc_suspend(struct drm_i915_private *dev_priv)
+int intel_guc_suspend(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
+ u32 data[] = {
+ INTEL_GUC_ACTION_ENTER_S_STATE,
+ GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
+ guc_ggtt_offset(guc->shared_data)
+ };
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
- gen9_disable_guc_interrupts(dev_priv);
+/**
+ * intel_guc_reset_engine() - ask GuC to reset an engine
+ * @guc: intel_guc structure
+ * @engine: engine to be reset
+ */
+int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine)
+{
+ u32 data[7];
- ctx = dev_priv->kernel_context;
+ GEM_BUG_ON(!guc->execbuf_client);
- data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
- /* any value greater than GUC_POWER_D0 */
- data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
+ data[1] = engine->guc_id;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = guc->execbuf_client->stage_id;
+ data[6] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
* intel_guc_resume() - notify GuC resuming from suspend state
- * @dev_priv: i915 device private
+ * @guc: the guc
*/
-int intel_guc_resume(struct drm_i915_private *dev_priv)
+int intel_guc_resume(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- if (i915_modparams.guc_log_level >= 0)
- gen9_enable_guc_interrupts(dev_priv);
-
- ctx = dev_priv->kernel_context;
-
- data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
- data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ u32 data[] = {
+ INTEL_GUC_ACTION_EXIT_S_STATE,
+ GUC_POWER_D0,
+ guc_ggtt_offset(guc->shared_data)
+ };
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 418450b..b9424ac 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -30,13 +30,18 @@
#include "intel_guc_fwif.h"
#include "intel_guc_ct.h"
#include "intel_guc_log.h"
+#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
-#include "i915_guc_reg.h"
#include "i915_vma.h"
+struct guc_preempt_work {
+ struct work_struct work;
+ struct intel_engine_cs *engine;
+};
+
/*
* Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
* ExecList submission.
*/
struct intel_guc {
@@ -54,8 +59,14 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
+ struct i915_vma *shared_data;
+ void *shared_data_vaddr;
+
+ struct intel_guc_client *execbuf_client;
+ struct intel_guc_client *preempt_client;
- struct i915_guc_client *execbuf_client;
+ struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
+ struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */
@@ -108,12 +119,16 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
+int intel_guc_init_wq(struct intel_guc *guc);
+void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init(struct intel_guc *guc);
+void intel_guc_fini(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
-int intel_guc_suspend(struct drm_i915_private *dev_priv);
-int intel_guc_resume(struct drm_i915_private *dev_priv);
+int intel_guc_suspend(struct intel_guc *guc);
+int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
new file mode 100644
index 0000000..ac62753
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_guc_ads.h"
+#include "intel_uc.h"
+#include "i915_drv.h"
+
+/*
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
+ * scheduling policies (guc_policies), a structure describing a collection of
+ * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
+ * its internal state for sleep.
+ */
+
+static void guc_policy_init(struct guc_policy *policy)
+{
+ policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
+ policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
+ policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
+ policy->policy_flags = 0;
+}
+
+static void guc_policies_init(struct guc_policies *policies)
+{
+ struct guc_policy *policy;
+ u32 p, i;
+
+ policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
+ policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+ for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ policy = &policies->policy[p][i];
+
+ guc_policy_init(policy);
+ }
+ }
+
+ policies->is_valid = 1;
+}
+
+/*
+ * The first 80 dwords of the register state context, containing the
+ * execlists and ppgtt registers.
+ */
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+
+/**
+ * intel_guc_ads_create() - creates GuC ADS
+ * @guc: intel_guc struct
+ *
+ */
+int intel_guc_ads_create(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct i915_vma *vma;
+ struct page *page;
+ /* The ads obj includes the struct itself and buffers passed to GuC */
+ struct {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_mmio_reg_state reg_state;
+ u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+ } __packed *blob;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
+ const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
+ u32 base;
+
+ GEM_BUG_ON(guc->ads_vma);
+
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
+
+ page = i915_vma_first_page(vma);
+ blob = kmap(page);
+
+ /* GuC scheduling policies */
+ guc_policies_init(&blob->policies);
+
+ /* MMIO reg state */
+ for_each_engine(engine, dev_priv, id) {
+ blob->reg_state.white_list[engine->guc_id].mmio_start =
+ engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+ /* Nothing to be saved or restored for now. */
+ blob->reg_state.white_list[engine->guc_id].count = 0;
+ }
+
+ /*
+ * The GuC requires a "Golden Context" when it reinitialises
+ * engines after a reset. Here we use the Render ring default
+ * context, which must already exist and be pinned in the GGTT,
+ * so its address won't change after we've told the GuC where
+ * to find it. Note that we have to skip our header (1 page),
+ * because our GuC shared data is there.
+ */
+ blob->ads.golden_context_lrca =
+ guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) +
+ skipped_offset;
+
+ /*
+ * The GuC expects us to exclude the portion of the context image that
+ * it skips from the size it is to read. It starts reading from after
+ * the execlist context (so skipping the first page [PPHWSP] and 80
+ * dwords). Weird guc is weird.
+ */
+ for_each_engine(engine, dev_priv, id)
+ blob->ads.eng_state_size[engine->guc_id] =
+ engine->context_size - skipped_size;
+
+ base = guc_ggtt_offset(vma);
+ blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
+ blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
+ blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
+
+ kunmap(page);
+
+ return 0;
+}
+
+void intel_guc_ads_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->ads_vma);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h
new file mode 100644
index 0000000..c473574
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_ads.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_ADS_H_
+#define _INTEL_GUC_ADS_H_
+
+struct intel_guc;
+
+int intel_guc_ads_create(struct intel_guc *guc);
+void intel_guc_ads_destroy(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index c4cbec1..24ad557 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -198,6 +198,7 @@ static int ctch_open(struct intel_guc *guc,
err = ctch_init(guc, ctch);
if (unlikely(err))
goto err_out;
+ GEM_BUG_ON(!ctch->vma);
}
/* vma should be already allocated and map'ed */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index ef67a36..d07f2b9 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -30,17 +30,14 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-#define SKL_FW_MAJOR 6
-#define SKL_FW_MINOR 1
+#define SKL_FW_MAJOR 9
+#define SKL_FW_MINOR 33
-#define BXT_FW_MAJOR 8
-#define BXT_FW_MINOR 7
+#define BXT_FW_MAJOR 9
+#define BXT_FW_MINOR 29
#define KBL_FW_MAJOR 9
-#define KBL_FW_MINOR 14
-
-#define GLK_FW_MAJOR 10
-#define GLK_FW_MINOR 56
+#define KBL_FW_MINOR 39
#define GUC_FW_PATH(platform, major, minor) \
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
@@ -54,66 +51,96 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
-#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-
-/**
- * intel_guc_fw_select() - selects GuC firmware for uploading
- *
- * @guc: intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_fw_select(struct intel_guc *guc)
+static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
+ GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
+
+ if (!HAS_GUC(dev_priv))
+ return;
if (i915_modparams.guc_firmware_path) {
- guc->fw.path = i915_modparams.guc_firmware_path;
- guc->fw.major_ver_wanted = 0;
- guc->fw.minor_ver_wanted = 0;
+ guc_fw->path = i915_modparams.guc_firmware_path;
+ guc_fw->major_ver_wanted = 0;
+ guc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- guc->fw.path = I915_SKL_GUC_UCODE;
- guc->fw.major_ver_wanted = SKL_FW_MAJOR;
- guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+ guc_fw->path = I915_SKL_GUC_UCODE;
+ guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- guc->fw.path = I915_BXT_GUC_UCODE;
- guc->fw.major_ver_wanted = BXT_FW_MAJOR;
- guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+ guc_fw->path = I915_BXT_GUC_UCODE;
+ guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc->fw.path = I915_KBL_GUC_UCODE;
- guc->fw.major_ver_wanted = KBL_FW_MAJOR;
- guc->fw.minor_ver_wanted = KBL_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- guc->fw.path = I915_GLK_GUC_UCODE;
- guc->fw.major_ver_wanted = GLK_FW_MAJOR;
- guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+ guc_fw->path = I915_KBL_GUC_UCODE;
+ guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
- DRM_ERROR("No GuC firmware known for platform with GuC!\n");
- return -ENOENT;
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(guc_fw->type));
}
-
- return 0;
}
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
*
- * This is used for polling the GuC status in a wait_for()
- * loop below.
+ * On platforms with GuC selects firmware for uploading
*/
-static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
- u32 *status)
+void intel_guc_fw_init_early(struct intel_guc *guc)
{
- u32 val = I915_READ(GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
- *status = val;
- return (uk_val == GS_UKERNEL_READY ||
- ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
+ struct intel_uc_fw *guc_fw = &guc->fw;
+
+ intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+ guc_fw_select(guc_fw);
+}
+
+static void guc_prepare_xfer(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /* Must program this register before loading the ucode with DMA */
+ I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING);
+
+ if (IS_GEN9_LP(dev_priv))
+ I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (IS_GEN9(dev_priv)) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+ I915_READ(GEN7_MISCCPCTL)));
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
+ struct sg_table *sg = vma->pages;
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ int i;
+
+ if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
+ guc_fw->rsa_offset) != sizeof(rsa))
+ return -EINVAL;
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
}
/*
@@ -122,29 +149,19 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
- *
- * Note that GuC needs the CSS header plus uKernel code to be copied by the
- * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
- struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
- struct sg_table *sg = vma->pages;
- u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
- int i, ret = 0;
-
- /* where RSA signature starts */
- offset = guc_fw->rsa_offset;
-
- /* Copy RSA signature from the fw image to HW for verification */
- sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
- for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+ u32 status;
+ int ret;
- /* The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components */
+ /*
+ * The header plus uCode will be copied to WOPCM via DMA, excluding any
+ * other components
+ */
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
@@ -162,33 +179,62 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+ /* Wait for DMA to finish */
+ ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
+ 2, 100, &status);
+ DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
+
+ return ret;
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_guc *guc, u32 *status)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 val = I915_READ(GUC_STATUS);
+ u32 uk_val = val & GS_UKERNEL_MASK;
+
+ *status = val;
+ return (uk_val == GS_UKERNEL_READY) ||
+ ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_guc *guc)
+{
+ u32 status;
+ int ret;
+
/*
- * Wait for the DMA to complete & the GuC to start up.
+ * Wait for the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
- ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
-
- DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
- I915_READ(DMA_CTRL), status);
+ ret = wait_for(guc_ready(guc, &status), 100);
+ DRM_DEBUG_DRIVER("GuC status %#x\n", status);
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
DRM_ERROR("GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
- DRM_DEBUG_DRIVER("returning %d\n", ret);
-
return ret;
}
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -198,34 +244,24 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* Enable MIA caching. GuC clock gating is disabled. */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
-
- /* WaDisableMinuteIaClockGating:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
- ~GUC_ENABLE_MIA_CLOCK_GATING));
- }
-
- /* WaC6DisallowByGfxPause:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
- I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
-
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ guc_prepare_xfer(guc);
- if (IS_GEN9(dev_priv)) {
- /* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded via MMIO.
+ */
+ ret = guc_xfer_rsa(guc, vma);
+ if (ret)
+ DRM_WARN("GuC firmware signature xfer error %d\n", ret);
- /* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
- }
+ ret = guc_xfer_ucode(guc, vma);
+ if (ret)
+ DRM_WARN("GuC firmware code xfer error %d\n", ret);
- ret = guc_ucode_xfer_dma(dev_priv, vma);
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -233,19 +269,19 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
}
/**
- * intel_guc_fw_upload() - finish preparing the GuC for activity
+ * intel_guc_fw_upload() - load GuC uCode to device
* @guc: intel_guc structure
*
- * Called during driver loading and also after a GPU reset.
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
*
- * The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_init(), so here we need only check that
- * worked, and then transfer the image to the h/w.
+ * earlier call to intel_uc_init_fw(), so here we need to only check that
+ * fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_guc_fw_upload(struct intel_guc *guc)
{
- return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
+ return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
index 023f5ba..4ec5d3d 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -27,7 +27,7 @@
struct intel_guc;
-int intel_guc_fw_select(struct intel_guc *guc);
+void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 80c5074..6a10aa6 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -544,9 +544,37 @@ union guc_log_control {
u32 value;
} __packed;
+struct guc_ctx_report {
+ u32 report_return_status;
+ u32 reserved1[64];
+ u32 affected_count;
+ u32 reserved2[2];
+} __packed;
+
+/* GuC Shared Context Data Struct */
+struct guc_shared_ctx_data {
+ u32 addr_of_last_preempted_data_low;
+ u32 addr_of_last_preempted_data_high;
+ u32 addr_of_last_preempted_data_high_tmp;
+ u32 padding;
+ u32 is_mapped_to_proxy;
+ u32 proxy_ctx_id;
+ u32 engine_reset_ctx_id;
+ u32 media_reset_count;
+ u32 reserved1[8];
+ u32 uk_last_ctx_switch_reason;
+ u32 was_reset;
+ u32 lrca_gpu_addr;
+ u64 execlist_ctx;
+ u32 reserved2[66];
+ struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
+ INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
@@ -562,6 +590,18 @@ enum intel_guc_action {
INTEL_GUC_ACTION_LIMIT
};
+enum intel_guc_preempt_options {
+ INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
+};
+
+enum intel_guc_report_status {
+ INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
+ INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
+ INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
+ INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
+};
+
/*
* The GuC sends its response to a command by overwriting the
* command in SS0. The response is distinguishable from a command
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 76d3eb1..c0c2e7d 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -33,11 +33,10 @@ static void guc_log_capture_logs(struct intel_guc *guc);
/**
* DOC: GuC firmware log
*
- * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Firmware log is enabled by setting i915.guc_log_level to the positive level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch
* registers value.
- *
*/
static int guc_log_flush_complete(struct intel_guc *guc)
@@ -59,11 +58,17 @@ static int guc_log_flush(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_log_control(struct intel_guc *guc, u32 control_val)
+static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
{
+ union guc_log_control control_val = {
+ {
+ .logging_enabled = enable,
+ .verbosity = verbosity,
+ },
+ };
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
- control_val
+ control_val.value
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -78,7 +83,8 @@ static int subbuf_start_callback(struct rchan_buf *buf,
void *prev_subbuf,
size_t prev_padding)
{
- /* Use no-overwrite mode by default, where relay will stop accepting
+ /*
+ * Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting
@@ -104,7 +110,8 @@ static struct dentry *create_buf_file_callback(const char *filename,
{
struct dentry *buf_file;
- /* This to enable the use of a single buffer for the relay channel and
+ /*
+ * This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging.
@@ -114,7 +121,8 @@ static struct dentry *create_buf_file_callback(const char *filename,
if (!parent)
return NULL;
- /* Not using the channel filename passed as an argument, since for each
+ /*
+ * Not using the channel filename passed as an argument, since for each
* channel relay appends the corresponding CPU number to the filename
* passed in relay_open(). This should be fine as relay just needs a
* dentry of the file associated with the channel buffer and that file's
@@ -147,13 +155,16 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
struct dentry *log_dir;
int ret;
- if (i915_modparams.guc_log_level < 0)
+ if (!i915_modparams.guc_log_level)
return 0;
+ mutex_lock(&guc->log.runtime.relay_lock);
+
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
- /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ /*
+ * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
* not mounted and so can't create the relay file.
* The relay API seems to fit well with debugfs only, for availing relay
* there are 3 requirements which can be met for debugfs file only in a
@@ -166,25 +177,41 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
*/
if (!log_dir) {
DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
- return ret;
+ goto out_unlock;
}
- return 0;
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&guc->log.runtime.relay_lock);
+ return ret;
+}
+
+static bool guc_log_has_relay(struct intel_guc *guc)
+{
+ lockdep_assert_held(&guc->log.runtime.relay_lock);
+
+ return guc->log.runtime.relay_chan != NULL;
}
static void guc_move_to_next_buf(struct intel_guc *guc)
{
- /* Make sure the updates made in the sub buffer are visible when
+ /*
+ * Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer.
*/
smp_wmb();
+ if (!guc_log_has_relay(guc))
+ return;
+
/* All data has been written, so now move the offset of sub buffer. */
relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
@@ -194,10 +221,11 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
static void *guc_get_write_buffer(struct intel_guc *guc)
{
- if (!guc->log.runtime.relay_chan)
+ if (!guc_log_has_relay(guc))
return NULL;
- /* Just get the base address of a new sub buffer and copy data into it
+ /*
+ * Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to
@@ -262,15 +290,30 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
/* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = guc->log.runtime.buf_addr;
+ mutex_lock(&guc->log.runtime.relay_lock);
+
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+ if (unlikely(!log_buf_snapshot_state)) {
+ /*
+ * Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ mutex_unlock(&guc->log.runtime.relay_lock);
+
+ return;
+ }
+
/* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- /* Make a copy of the state structure, inside GuC log buffer
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
@@ -290,14 +333,12 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
log_buf_state->flush_to_file = 0;
log_buf_state++;
- if (unlikely(!log_buf_snapshot_state))
- continue;
-
/* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state));
- /* The write pointer could have been updated by GuC firmware,
+ /*
+ * The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer.
@@ -332,15 +373,9 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
dst_data += buffer_size;
}
- if (log_buf_snapshot_state)
- guc_move_to_next_buf(guc);
- else {
- /* Used rate limited to avoid deluge of messages, logs might be
- * getting consumed by User at a slow rate.
- */
- DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
- guc->log.capture_miss_count++;
- }
+ guc_move_to_next_buf(guc);
+
+ mutex_unlock(&guc->log.runtime.relay_lock);
}
static void capture_logs_work(struct work_struct *work)
@@ -360,19 +395,21 @@ static int guc_log_runtime_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ if (!guc->log.vma)
+ return -ENODEV;
+
GEM_BUG_ON(guc_log_has_runtime(guc));
ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
if (ret)
return ret;
- /* Create a WC (Uncached for read) vmalloc mapping of log
+ /*
+ * Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
@@ -384,17 +421,55 @@ static int guc_log_runtime_create(struct intel_guc *guc)
guc->log.runtime.buf_addr = vaddr;
+ return 0;
+}
+
+static void guc_log_runtime_destroy(struct intel_guc *guc)
+{
+ /*
+ * It's possible that the runtime stuff was never allocated because
+ * GuC log was disabled at the boot time.
+ */
+ if (!guc_log_has_runtime(guc))
+ return;
+
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
+}
+
+void intel_guc_log_init_early(struct intel_guc *guc)
+{
+ mutex_init(&guc->log.runtime.relay_lock);
+ INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+}
+
+int intel_guc_log_relay_create(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+ int ret;
+
+ if (!i915_modparams.guc_log_level)
+ return 0;
+
+ mutex_lock(&guc->log.runtime.relay_lock);
+
+ GEM_BUG_ON(guc_log_has_relay(guc));
+
/* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
+ subbuf_size = GUC_LOG_SIZE;
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ /*
+ * Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
- /* Create a relay channel, so that we have buffers for storing
+ /*
+ * Create a relay channel, so that we have buffers for storing
* the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered.
*/
@@ -404,56 +479,39 @@ static int guc_log_runtime_create(struct intel_guc *guc)
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
ret = -ENOMEM;
- goto err_vaddr;
+ goto err;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.runtime.relay_chan = guc_log_relay_chan;
- INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.runtime.flush_wq) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- ret = -ENOMEM;
- goto err_relaychan;
- }
+ mutex_unlock(&guc->log.runtime.relay_lock);
return 0;
-err_relaychan:
- relay_close(guc->log.runtime.relay_chan);
-err_vaddr:
- i915_gem_object_unpin_map(guc->log.vma->obj);
- guc->log.runtime.buf_addr = NULL;
+err:
+ mutex_unlock(&guc->log.runtime.relay_lock);
+ /* logging will be off */
+ i915_modparams.guc_log_level = 0;
return ret;
}
-static void guc_log_runtime_destroy(struct intel_guc *guc)
+void intel_guc_log_relay_destroy(struct intel_guc *guc)
{
+ mutex_lock(&guc->log.runtime.relay_lock);
+
/*
- * It's possible that the runtime stuff was never allocated because
- * guc_log_level was < 0 at the time
- **/
- if (!guc_log_has_runtime(guc))
- return;
+ * It's possible that the relay was never allocated because
+ * GuC log was disabled at the boot time.
+ */
+ if (!guc_log_has_relay(guc))
+ goto out_unlock;
- destroy_workqueue(guc->log.runtime.flush_wq);
relay_close(guc->log.runtime.relay_chan);
- i915_gem_object_unpin_map(guc->log.vma->obj);
- guc->log.runtime.buf_addr = NULL;
+ guc->log.runtime.relay_chan = NULL;
+
+out_unlock:
+ mutex_unlock(&guc->log.runtime.relay_lock);
}
static int guc_log_late_setup(struct intel_guc *guc)
@@ -461,16 +519,24 @@ static int guc_log_late_setup(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (!guc_log_has_runtime(guc)) {
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
+ /*
+ * If log was disabled at boot time, then setup needed to handle
+ * log buffer flush interrupts would not have been done yet, so
+ * do that now.
*/
- ret = guc_log_runtime_create(guc);
+ ret = intel_guc_log_relay_create(guc);
if (ret)
goto err;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
+ ret = guc_log_runtime_create(guc);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ if (ret)
+ goto err_relay;
}
ret = guc_log_relay_file_create(guc);
@@ -480,10 +546,14 @@ static int guc_log_late_setup(struct intel_guc *guc)
return 0;
err_runtime:
+ mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_runtime_destroy(guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+err_relay:
+ intel_guc_log_relay_destroy(guc);
err:
/* logging will remain off */
- i915_modparams.guc_log_level = -1;
+ i915_modparams.guc_log_level = 0;
return ret;
}
@@ -493,7 +563,8 @@ static void guc_log_capture_logs(struct intel_guc *guc)
guc_read_update_log_buffer(guc);
- /* Generally device is expected to be active only at this
+ /*
+ * Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
intel_runtime_pm_get(dev_priv);
@@ -505,20 +576,26 @@ static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!i915_modparams.enable_guc_submission ||
- (i915_modparams.guc_log_level < 0))
+ if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
return;
/* First disable the interrupts, will be renabled afterwards */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
gen9_disable_guc_interrupts(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- /* Before initiating the forceful flush, wait for any pending/ongoing
+ /*
+ * Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&guc->log.runtime.flush_work);
/* Ask GuC to update the log buffer state */
+ intel_runtime_pm_get(dev_priv);
guc_log_flush(guc);
+ intel_runtime_pm_put(dev_priv);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(guc);
@@ -529,21 +606,12 @@ int intel_guc_log_create(struct intel_guc *guc)
struct i915_vma *vma;
unsigned long offset;
u32 flags;
- u32 size;
int ret;
GEM_BUG_ON(guc->log.vma);
- if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX;
-
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
-
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ /*
+ * We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
@@ -552,7 +620,7 @@ int intel_guc_log_create(struct intel_guc *guc)
goto err;
}
- vma = intel_guc_allocate_vma(guc, size);
+ vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -560,7 +628,7 @@ int intel_guc_log_create(struct intel_guc *guc)
guc->log.vma = vma;
- if (i915_modparams.guc_log_level >= 0) {
+ if (i915_modparams.guc_log_level) {
ret = guc_log_runtime_create(guc);
if (ret < 0)
goto err_vma;
@@ -581,7 +649,7 @@ err_vma:
i915_vma_unpin_and_release(&guc->log.vma);
err:
/* logging will be off */
- i915_modparams.guc_log_level = -1;
+ i915_modparams.guc_log_level = 0;
return ret;
}
@@ -591,35 +659,46 @@ void intel_guc_log_destroy(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->log.vma);
}
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- union guc_log_control log_param;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ bool enable_logging = control_val > 0;
+ u32 verbosity;
int ret;
- log_param.value = control_val;
+ if (!guc->log.vma)
+ return -ENODEV;
- if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
- log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
+ if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
- if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0))
+ if (!enable_logging && !i915_modparams.guc_log_level)
return 0;
- ret = guc_log_control(guc, log_param.value);
+ verbosity = enable_logging ? control_val - 1 : 0;
+
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+ ret = guc_log_control(guc, enable_logging, verbosity);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
if (ret < 0) {
DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
return ret;
}
- if (log_param.logging_enabled) {
- i915_modparams.guc_log_level = log_param.verbosity;
+ if (enable_logging) {
+ i915_modparams.guc_log_level = 1 + verbosity;
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled. Try again now, just in case.
+ /*
+ * If log was disabled at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would
+ * not have been enabled. Try again now, just in case.
*/
ret = guc_log_late_setup(guc);
if (ret < 0) {
@@ -628,9 +707,14 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
}
/* GuC logging is currently the only user of Guc2Host interrupts */
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
gen9_enable_guc_interrupts(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
} else {
- /* Once logging is disabled, GuC won't generate logs & send an
+ /*
+ * Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
* buffer state and then collect the left over logs.
@@ -638,7 +722,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
guc_flush_logs(guc);
/* As logging is disabled, update log level to reflect that */
- i915_modparams.guc_log_level = -1;
+ i915_modparams.guc_log_level = 0;
}
return ret;
@@ -646,23 +730,27 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission ||
- (i915_modparams.guc_log_level < 0))
+ if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
return;
- mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_late_setup(&dev_priv->guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission)
+ struct intel_guc *guc = &dev_priv->guc;
+
+ if (!USES_GUC_SUBMISSION(dev_priv))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
/* GuC logging is currently the only user of Guc2Host interrupts */
+ intel_runtime_pm_get(dev_priv);
gen9_disable_guc_interrupts(dev_priv);
- guc_log_runtime_destroy(&dev_priv->guc);
+ intel_runtime_pm_put(dev_priv);
+
+ guc_log_runtime_destroy(guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_guc_log_relay_destroy(guc);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
index f512cf7..dab0e94 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -32,6 +32,13 @@
struct drm_i915_private;
struct intel_guc;
+/*
+ * The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap
+ */
+#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
+ 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+
struct intel_guc_log {
u32 flags;
struct i915_vma *vma;
@@ -41,6 +48,8 @@ struct intel_guc_log {
struct workqueue_struct *flush_wq;
struct work_struct flush_work;
struct rchan *relay_chan;
+ /* To serialize the access to relay_chan */
+ struct mutex relay_lock;
} runtime;
/* logging related stats */
u32 capture_miss_count;
@@ -52,7 +61,10 @@ struct intel_guc_log {
int intel_guc_log_create(struct intel_guc *guc);
void intel_guc_log_destroy(struct intel_guc *guc);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+void intel_guc_log_init_early(struct intel_guc *guc);
+int intel_guc_log_relay_create(struct intel_guc *guc);
+void intel_guc_log_relay_destroy(struct intel_guc *guc);
+int intel_guc_log_control(struct intel_guc *guc, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 35cf991..19a9247 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -21,8 +21,8 @@
* IN THE SOFTWARE.
*
*/
-#ifndef _I915_GUC_REG_H_
-#define _I915_GUC_REG_H_
+#ifndef _INTEL_GUC_REG_H_
+#define _INTEL_GUC_REG_H_
/* Definitions of GuC H/W registers, bits, etc */
@@ -52,7 +52,8 @@
#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
-#define UOS_RSA_SCRATCH_MAX_COUNT 64
+#define UOS_RSA_SCRATCH_COUNT 64
+
#define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_LOW _MMIO(0xc308)
@@ -102,13 +103,6 @@
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
-#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
- GUC_ENABLE_READ_CACHE_LOGIC | \
- GUC_ENABLE_MIA_CACHING | \
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
- GUC_ENABLE_MIA_CLOCK_GATING)
-
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index f84c267..8a8ad2f 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -25,23 +25,30 @@
#include <linux/circ_buf.h>
#include <trace/events/dma_fence.h>
-#include "i915_guc_submission.h"
+#include "intel_guc_submission.h"
+#include "intel_lrc_reg.h"
#include "i915_drv.h"
+#define GUC_PREEMPT_FINISHED 0x1
+#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
+#define GUC_PREEMPT_BREADCRUMB_BYTES \
+ (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
+
/**
* DOC: GuC-based command submission
*
* GuC client:
- * A i915_guc_client refers to a submission path through GuC. Currently, there
- * is only one of these (the execbuf_client) and this one is charged with all
- * submissions to the GuC. This struct is the owner of a doorbell, a process
- * descriptor and a workqueue (all of them inside a single gem object that
- * contains all required pages for these elements).
+ * A intel_guc_client refers to a submission path through GuC. Currently, there
+ * are two clients. One of them (the execbuf_client) is charged with all
+ * submissions to the GuC, the other one (preempt_client) is responsible for
+ * preempting the execbuf_client. This struct is the owner of a doorbell, a
+ * process descriptor and a workqueue (all of them inside a single gem object
+ * that contains all required pages for these elements).
*
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
* descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * Currently, there exists a 1:1 mapping between a intel_guc_client and a
* guc_stage_desc (via the client's stage_id), so effectively only one
* gets used. This stage descriptor lets the GuC know about the doorbell,
* workqueue and process descriptor. Theoretically, it also lets the GuC
@@ -70,23 +77,22 @@
* WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
* represents in-order queue. The kernel driver packs ring tail pointer and an
* ELSP context descriptor dword into Work Item.
- * See guc_wq_item_append()
- *
- * ADS:
- * The Additional Data Struct (ADS) has pointers for different buffers used by
- * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
- * scheduling policies (guc_policies), a structure describing a collection of
- * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
- * its internal state for sleep.
+ * See guc_add_request()
*
*/
-static inline bool is_high_priority(struct i915_guc_client* client)
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static inline bool is_high_priority(struct intel_guc_client *client)
{
- return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
+ return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
+ client->priority == GUC_CLIENT_PRIORITY_HIGH);
}
-static int __reserve_doorbell(struct i915_guc_client *client)
+static int reserve_doorbell(struct intel_guc_client *client)
{
unsigned long offset;
unsigned long end;
@@ -100,7 +106,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
* priority contexts, the second half for high-priority ones.
*/
offset = 0;
- end = GUC_NUM_DOORBELLS/2;
+ end = GUC_NUM_DOORBELLS / 2;
if (is_high_priority(client)) {
offset = end;
end += offset;
@@ -118,7 +124,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
return 0;
}
-static void __unreserve_doorbell(struct i915_guc_client *client)
+static void unreserve_doorbell(struct intel_guc_client *client)
{
GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
@@ -150,7 +156,7 @@ static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
{
struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
@@ -164,7 +170,7 @@ static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
* client object which contains the page being used for the doorbell
*/
-static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
+static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
{
struct guc_stage_desc *desc;
@@ -173,12 +179,12 @@ static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
desc->db_id = new_id;
}
-static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
+static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
{
return client->vaddr + client->doorbell_offset;
}
-static bool has_doorbell(struct i915_guc_client *client)
+static bool has_doorbell(struct intel_guc_client *client)
{
if (client->doorbell_id == GUC_DOORBELL_INVALID)
return false;
@@ -186,29 +192,21 @@ static bool has_doorbell(struct i915_guc_client *client)
return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
}
-static int __create_doorbell(struct i915_guc_client *client)
+static void __create_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
- int err;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
-
- err = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (err)
- doorbell->db_status = GUC_DOORBELL_DISABLED;
-
- return err;
}
-static int __destroy_doorbell(struct i915_guc_client *client)
+static void __destroy_doorbell(struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
@@ -216,56 +214,49 @@ static int __destroy_doorbell(struct i915_guc_client *client)
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
- * release the doorbell */
+ * release the doorbell
+ */
if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-
- return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
-static int create_doorbell(struct i915_guc_client *client)
+static int create_doorbell(struct intel_guc_client *client)
{
int ret;
- ret = __reserve_doorbell(client);
- if (ret)
- return ret;
-
__update_doorbell_desc(client, client->doorbell_id);
-
- ret = __create_doorbell(client);
- if (ret)
- goto err;
+ __create_doorbell(client);
+
+ ret = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (ret) {
+ __destroy_doorbell(client);
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ DRM_ERROR("Couldn't create client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
return 0;
-
-err:
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
- return ret;
}
-static int destroy_doorbell(struct i915_guc_client *client)
+static int destroy_doorbell(struct intel_guc_client *client)
{
- int err;
+ int ret;
GEM_BUG_ON(!has_doorbell(client));
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
-
- err = __destroy_doorbell(client);
- if (err)
- return err;
+ __destroy_doorbell(client);
+ ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
+ if (ret)
+ DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
+ client->stage_id, ret);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
-
- return 0;
+ return ret;
}
-static unsigned long __select_cacheline(struct intel_guc* guc)
+static unsigned long __select_cacheline(struct intel_guc *guc)
{
unsigned long offset;
@@ -276,12 +267,12 @@ static unsigned long __select_cacheline(struct intel_guc* guc)
guc->db_cacheline += cache_line_size();
DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cache_line_size());
+ offset, guc->db_cacheline, cache_line_size());
return offset;
}
static inline struct guc_process_desc *
-__get_process_desc(struct i915_guc_client *client)
+__get_process_desc(struct intel_guc_client *client)
{
return client->vaddr + client->proc_desc_offset;
}
@@ -290,7 +281,7 @@ __get_process_desc(struct i915_guc_client *client)
* Initialise the process descriptor shared with the GuC firmware.
*/
static void guc_proc_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@@ -311,6 +302,37 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
+static int guc_stage_desc_pool_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->stage_desc_pool = vma;
+ guc->stage_desc_pool_vaddr = vaddr;
+ ida_init(&guc->stage_ids);
+
+ return 0;
+}
+
+static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
+{
+ ida_destroy(&guc->stage_ids);
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
+}
+
/*
* Initialise/clear the stage descriptor shared with the GuC firmware.
*
@@ -319,7 +341,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
* write queue, etc).
*/
static void guc_stage_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
@@ -331,7 +353,10 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc = __get_stage_desc(client);
memset(desc, 0, sizeof(*desc));
- desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
+ GUC_STAGE_DESC_ATTR_KERNEL;
+ if (is_high_priority(client))
+ desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
desc->stage_id = client->stage_id;
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
@@ -356,7 +381,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
* submission or, in other words, not using a direct submission
* model) the KMD's LRCA is not used for any work submission.
* Instead, the GuC uses the LRCA of the user mode context (see
- * guc_wq_item_append below).
+ * guc_add_request below).
*/
lrc->context_desc = lower_32_bits(ce->lrc_desc);
@@ -365,7 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
/* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id */
+ * here. In proxy submission, it wants the stage id
+ */
lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
@@ -378,7 +404,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
}
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
+ client->engines, desc->engines_used);
WARN_ON(desc->engines_used == 0);
/*
@@ -398,7 +424,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
}
static void guc_stage_desc_fini(struct intel_guc *guc,
- struct i915_guc_client *client)
+ struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@@ -407,23 +433,19 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq)
+static void guc_wq_item_append(struct intel_guc_client *client,
+ u32 target_engine, u32 context_desc,
+ u32 ring_tail, u32 fence_id)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *ctx = rq->ctx;
struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
- u32 ring_tail, wq_off;
+ u32 wq_off;
lockdep_assert_held(&client->wq_lock);
- ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
-
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
* wrapped to the beginning. This simplifies the implementation below.
@@ -445,19 +467,18 @@ static void guc_wq_item_append(struct i915_guc_client *client,
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
(wqi_len << WQ_LEN_SHIFT) |
- (engine->guc_id << WQ_TARGET_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
-
- wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
-
+ wqi->context_desc = context_desc;
wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->global_seqno;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
- /* Postincrement WQ tail for next time. */
+ /* Make the update visible to GuC */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_reset_wq(struct i915_guc_client *client)
+static void guc_reset_wq(struct intel_guc_client *client)
{
struct guc_process_desc *desc = __get_process_desc(client);
@@ -465,7 +486,7 @@ static void guc_reset_wq(struct i915_guc_client *client)
desc->tail = 0;
}
-static void guc_ring_doorbell(struct i915_guc_client *client)
+static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
u32 cookie;
@@ -475,108 +496,205 @@ static void guc_ring_doorbell(struct i915_guc_client *client)
/* pointer of current doorbell cacheline */
db = __get_doorbell(client);
- /* we're not expecting the doorbell cookie to change behind our back */
+ /*
+ * We're not expecting the doorbell cookie to change behind our back,
+ * we also need to treat 0 as a reserved value.
+ */
cookie = READ_ONCE(db->cookie);
- WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
+ WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
/* XXX: doorbell was lost and need to acquire it again */
GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
+static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ struct intel_guc_client *client = guc->execbuf_client;
+ struct intel_engine_cs *engine = rq->engine;
+ u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
+ engine));
+ u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
+
+ spin_lock(&client->wq_lock);
+
+ guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ ring_tail, rq->global_seqno);
+ guc_ring_doorbell(client);
+
+ client->submissions[engine->id] += 1;
+
+ spin_unlock(&client->wq_lock);
+}
+
+/*
+ * When we're doing submissions using regular execlists backend, writing to
+ * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
+ * pinned in mappable aperture portion of GGTT are visible to command streamer.
+ * Writes done by GuC on our behalf are not guaranteeing such ordering,
+ * therefore, to ensure the flush, we're issuing a POSTING READ.
+ */
+static void flush_ggtt_writes(struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ POSTING_READ_FW(GUC_STATUS);
+}
+
+static void inject_preempt_context(struct work_struct *work)
+{
+ struct guc_preempt_work *preempt_work =
+ container_of(work, typeof(*preempt_work), work);
+ struct intel_engine_cs *engine = preempt_work->engine;
+ struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
+ preempt_work[engine->id]);
+ struct intel_guc_client *client = guc->preempt_client;
+ struct guc_stage_desc *stage_desc = __get_stage_desc(client);
+ u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
+ engine));
+ u32 data[7];
+
+ /*
+ * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
+ * See guc_fill_preempt_context().
+ */
+ spin_lock_irq(&client->wq_lock);
+ guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
+ spin_unlock_irq(&client->wq_lock);
+
+ /*
+ * If GuC firmware performs an engine reset while that engine had
+ * a preemption pending, it will set the terminated attribute bit
+ * on our preemption stage descriptor. GuC firmware retains all
+ * pending work items for a high-priority GuC client, unlike the
+ * normal-priority GuC client where work items are dropped. It
+ * wants to make sure the preempt-to-idle work doesn't run when
+ * scheduling resumes, and uses this bit to inform its scheduler
+ * and presumably us as well. Our job is to clear it for the next
+ * preemption after reset, otherwise that and future preemptions
+ * will never complete. We'll just clear it every time.
+ */
+ stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
+
+ data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
+ data[1] = client->stage_id;
+ data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
+ INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
+ data[3] = engine->guc_id;
+ data[4] = guc->execbuf_client->priority;
+ data[5] = guc->execbuf_client->stage_id;
+ data[6] = guc_ggtt_offset(guc->shared_data);
+
+ if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
+ execlists_clear_active(&engine->execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ tasklet_schedule(&engine->execlists.tasklet);
+ }
+}
+
+/*
+ * We're using user interrupt and HWSP value to mark that preemption has
+ * finished and GPU is idle. Normally, we could unwind and continue similar to
+ * execlists submission path. Unfortunately, with GuC we also need to wait for
+ * it to finish its own postprocessing, before attempting to submit. Otherwise
+ * GuC may silently ignore our submissions, and thus we risk losing request at
+ * best, executing out-of-order and causing kernel panic at worst.
+ */
+#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
+static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
+{
+ struct intel_guc *guc = &engine->i915->guc;
+ struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
+ struct guc_ctx_report *report =
+ &data->preempt_ctx_report[engine->guc_id];
+
+ WARN_ON(wait_for_atomic(report->report_return_status ==
+ INTEL_GUC_REPORT_STATUS_COMPLETE,
+ GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+ /*
+ * GuC is expecting that we're also going to clear the affected context
+ * counter, let's also reset the return status to not depend on GuC
+ * resetting it after recieving another preempt action
+ */
+ report->affected_count = 0;
+ report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
+}
+
/**
- * i915_guc_submit() - Submit commands through GuC
+ * guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void i915_guc_submit(struct intel_engine_cs *engine)
+static void guc_submit(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client = guc->execbuf_client;
+ struct intel_guc *guc = &engine->i915->guc;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- const unsigned int engine_id = engine->id;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int count;
rq = port_unpack(&port[n], &count);
if (rq && count == 0) {
port_set(&port[n], port_pack(rq, ++count));
- if (i915_vma_is_map_and_fenceable(rq->ring->vma))
- POSTING_READ_FW(GUC_STATUS);
-
- spin_lock(&client->wq_lock);
-
- guc_wq_item_append(client, rq);
- guc_ring_doorbell(client);
+ flush_ggtt_writes(rq->ring->vma);
- client->submissions[engine_id] += 1;
-
- spin_unlock(&client->wq_lock);
+ guc_add_request(guc, rq);
}
}
}
-static void nested_enable_signaling(struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
- /* If we use dma_fence_enable_sw_signaling() directly, lockdep
- * detects an ordering issue between the fence lockclass and the
- * global_timeline. This circular dependency can only occur via 2
- * different fences (but same fence lockclass), so we use the nesting
- * annotation here to prevent the warn, equivalent to the nesting
- * inside i915_gem_request_submit() for when we also enable the
- * signaler.
- */
-
- if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags))
- return;
-
- GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
- trace_dma_fence_enable_signal(&rq->fence);
-
- spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
- intel_engine_enable_signaling(rq, true);
- spin_unlock(&rq->lock);
-}
-
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
-{
- GEM_BUG_ON(rq == port_request(port));
-
- if (port_isset(port))
- i915_gem_request_put(port_request(port));
+ GEM_BUG_ON(port_isset(port));
- port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
- nested_enable_signaling(rq);
+ port_set(port, i915_request_get(rq));
}
-static void i915_guc_dequeue(struct intel_engine_cs *engine)
+static void guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- struct drm_i915_gem_request *last = NULL;
+ struct i915_request *last = NULL;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
bool submit = false;
struct rb_node *rb;
- if (port_isset(port))
- port++;
-
spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
+
+ if (port_isset(port)) {
+ if (engine->i915->preempt_context) {
+ struct guc_preempt_work *preempt_work =
+ &engine->i915->guc.preempt_work[engine->id];
+
+ if (execlists->queue_priority >
+ max(port_request(port)->priotree.priority, 0)) {
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ queue_work(engine->i915->guc.preempt_wq,
+ &preempt_work->work);
+ goto unlock;
+ }
+ }
+
+ port++;
+ if (port_isset(port))
+ goto unlock;
+ }
+ GEM_BUG_ON(port_isset(port));
+
while (rb) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
@@ -592,10 +710,9 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&rq->priotree.link);
- rq->priotree.priority = INT_MAX;
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
@@ -607,28 +724,34 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
kmem_cache_free(engine->i915->priorities, p);
}
done:
+ execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb;
if (submit) {
port_assign(port, last);
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
- i915_guc_submit(engine);
+ guc_submit(engine);
}
+
+ /* We must always keep the beast fed if we have work piled up */
+ GEM_BUG_ON(port_isset(execlists->port) &&
+ !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+ GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+
+unlock:
spin_unlock_irq(&engine->timeline->lock);
}
-static void i915_guc_irq_handler(unsigned long data)
+static void guc_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ while (rq && i915_request_completed(rq)) {
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
execlists_port_complete(execlists, port);
@@ -637,14 +760,26 @@ static void i915_guc_irq_handler(unsigned long data)
if (!rq)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
- if (!port_isset(last_port))
- i915_guc_dequeue(engine);
+ if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
+ intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
+ GUC_PREEMPT_FINISHED) {
+ execlists_cancel_port_requests(&engine->execlists);
+ execlists_unwind_incomplete_requests(execlists);
+
+ wait_for_guc_preempt_report(engine);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+ }
+
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+ guc_dequeue(engine);
}
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
- * path of i915_guc_submit() above.
+ * path of guc_submit() above.
*/
/* Check that a doorbell register is in the expected state */
@@ -668,101 +803,72 @@ static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
return false;
}
-/*
- * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
- * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
- * doorbell to the rightful owner.
- */
-static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
+static bool guc_verify_doorbells(struct intel_guc *guc)
{
- int err;
+ u16 db_id;
- __update_doorbell_desc(client, db_id);
- err = __create_doorbell(client);
- if (!err)
- err = __destroy_doorbell(client);
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ if (!doorbell_ok(guc, db_id))
+ return false;
- return err;
+ return true;
}
-/*
- * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
- * HW is (re)initialised. For that end, we might have to borrow the first
- * client. Also, tell GuC about all the doorbells in use by all clients.
- * We do this because the KMD, the GuC and the doorbell HW can easily go out of
- * sync (e.g. we can reset the GuC, but not the doorbel HW).
- */
-static int guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_clients_doorbell_init(struct intel_guc *guc)
{
- struct i915_guc_client *client = guc->execbuf_client;
- bool recreate_first_client = false;
- u16 db_id;
int ret;
- /* For unused doorbells, make sure they are disabled */
- for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
- if (doorbell_ok(guc, db_id))
- continue;
-
- if (has_doorbell(client)) {
- /* Borrow execbuf_client (we will recreate it later) */
- destroy_doorbell(client);
- recreate_first_client = true;
- }
-
- ret = __reset_doorbell(client, db_id);
- WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
- }
-
- if (recreate_first_client) {
- ret = __reserve_doorbell(client);
- if (unlikely(ret)) {
- DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
- return ret;
- }
-
- __update_doorbell_desc(client, client->doorbell_id);
- }
+ ret = create_doorbell(guc->execbuf_client);
+ if (ret)
+ return ret;
- /* Now for every client (and not only execbuf_client) make sure their
- * doorbells are known by the GuC */
- //for (client = client_list; client != NULL; client = client->next)
- {
- ret = __create_doorbell(client);
+ if (guc->preempt_client) {
+ ret = create_doorbell(guc->preempt_client);
if (ret) {
- DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
- client->stage_id, ret);
+ destroy_doorbell(guc->execbuf_client);
return ret;
}
}
- /* Read back & verify all (used & unused) doorbell registers */
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- WARN_ON(!doorbell_ok(guc, db_id));
-
return 0;
}
+static void guc_clients_doorbell_fini(struct intel_guc *guc)
+{
+ /*
+ * By the time we're here, GuC has already been reset.
+ * Instead of trying (in vain) to communicate with it, let's just
+ * cleanup the doorbell HW and our internal state.
+ */
+ if (guc->preempt_client) {
+ __destroy_doorbell(guc->preempt_client);
+ __update_doorbell_desc(guc->preempt_client,
+ GUC_DOORBELL_INVALID);
+ }
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+}
+
/**
- * guc_client_alloc() - Allocate an i915_guc_client
+ * guc_client_alloc() - Allocate an intel_guc_client
* @dev_priv: driver private data structure
* @engines: The set of engines to enable for this client
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- * The kernel client to replace ExecList submission is created with
- * NORMAL priority. Priority of a client for scheduler can be HIGH,
- * while a preemption context can use CRITICAL.
+ * The kernel client to replace ExecList submission is created with
+ * NORMAL priority. Priority of a client for scheduler can be HIGH,
+ * while a preemption context can use CRITICAL.
* @ctx: the context that owns the client (we use the default render
- * context)
+ * context)
*
- * Return: An i915_guc_client object if success, else NULL.
+ * Return: An intel_guc_client object if success, else NULL.
*/
-static struct i915_guc_client *
+static struct intel_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
u32 engines,
u32 priority,
struct i915_gem_context *ctx)
{
- struct i915_guc_client *client;
+ struct intel_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
@@ -780,7 +886,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
spin_lock_init(&client->wq_lock);
ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
+ GFP_KERNEL);
if (ret < 0)
goto err_client;
@@ -818,7 +924,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
guc_proc_desc_init(guc, client);
guc_stage_desc_init(guc, client);
- ret = create_doorbell(client);
+ ret = reserve_doorbell(client);
if (ret)
goto err_vaddr;
@@ -840,17 +946,9 @@ err_client:
return ERR_PTR(ret);
}
-static void guc_client_free(struct i915_guc_client *client)
+static void guc_client_free(struct intel_guc_client *client)
{
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
-
- /* FIXME: in many cases, by the time we get here the GuC has been
- * reset, so we cannot destroy the doorbell properly. Ignore the
- * error message for now */
- destroy_doorbell(client);
+ unreserve_doorbell(client);
guc_stage_desc_fini(client->guc, client);
i915_gem_object_unpin_map(client->vma->obj);
i915_vma_unpin_and_release(&client->vma);
@@ -858,175 +956,161 @@ static void guc_client_free(struct i915_guc_client *client)
kfree(client);
}
-static void guc_policy_init(struct guc_policy *policy)
-{
- policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
- policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
- policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
- policy->policy_flags = 0;
-}
-
-static void guc_policies_init(struct guc_policies *policies)
+static inline bool ctx_save_restore_disabled(struct intel_context *ce)
{
- struct guc_policy *policy;
- u32 p, i;
-
- policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
- policies->max_num_work_items = POLICY_MAX_NUM_WI;
+ u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
- for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
- for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
- policy = &policies->policy[p][i];
+#define SR_DISABLED \
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
- guc_policy_init(policy);
- }
- }
+ return (sr & SR_DISABLED) == SR_DISABLED;
- policies->is_valid = 1;
+#undef SR_DISABLED
}
-/*
- * The first 80 dwords of the register state context, containing the
- * execlists and ppgtt registers.
- */
-#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-
-static int guc_ads_create(struct intel_guc *guc)
+static void guc_fill_preempt_context(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct i915_vma *vma;
- struct page *page;
- /* The ads obj includes the struct itself and buffers passed to GuC */
- struct {
- struct guc_ads ads;
- struct guc_policies policies;
- struct guc_mmio_reg_state reg_state;
- u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
- } __packed *blob;
+ struct intel_guc_client *client = guc->preempt_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
- const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
- u32 base;
- GEM_BUG_ON(guc->ads_vma);
-
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- guc->ads_vma = vma;
+ for_each_engine(engine, dev_priv, id) {
+ struct intel_context *ce = &client->owner->engine[id];
+ u32 addr = intel_hws_preempt_done_address(engine);
+ u32 *cs;
- page = i915_vma_first_page(vma);
- blob = kmap(page);
+ GEM_BUG_ON(!ce->pin_count);
- /* GuC scheduling policies */
- guc_policies_init(&blob->policies);
+ /*
+ * We rely on this context image *not* being saved after
+ * preemption. This ensures that the RING_HEAD / RING_TAIL
+ * remain pointing at initial values forever.
+ */
+ GEM_BUG_ON(!ctx_save_restore_disabled(ce));
+
+ cs = ce->ring->vaddr;
+ if (id == RCS) {
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ } else {
+ cs = gen8_emit_ggtt_write(cs,
+ GUC_PREEMPT_FINISHED,
+ addr);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
- /* MMIO reg state */
- for_each_engine(engine, dev_priv, id) {
- blob->reg_state.white_list[engine->guc_id].mmio_start =
- engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
+ GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
+ GUC_PREEMPT_BREADCRUMB_BYTES);
- /* Nothing to be saved or restored for now. */
- blob->reg_state.white_list[engine->guc_id].count = 0;
+ flush_ggtt_writes(ce->ring->vma);
}
+}
- /*
- * The GuC requires a "Golden Context" when it reinitialises
- * engines after a reset. Here we use the Render ring default
- * context, which must already exist and be pinned in the GGTT,
- * so its address won't change after we've told the GuC where
- * to find it. Note that we have to skip our header (1 page),
- * because our GuC shared data is there.
- */
- blob->ads.golden_context_lrca =
- guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
-
- /*
- * The GuC expects us to exclude the portion of the context image that
- * it skips from the size it is to read. It starts reading from after
- * the execlist context (so skipping the first page [PPHWSP] and 80
- * dwords). Weird guc is weird.
- */
- for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
+static int guc_clients_create(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_client *client;
+
+ GEM_BUG_ON(guc->execbuf_client);
+ GEM_BUG_ON(guc->preempt_client);
+
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for submission!\n");
+ return PTR_ERR(client);
+ }
+ guc->execbuf_client = client;
- base = guc_ggtt_offset(vma);
- blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
- blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
- blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
+ if (dev_priv->preempt_context) {
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_HIGH,
+ dev_priv->preempt_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for preemption!\n");
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return PTR_ERR(client);
+ }
+ guc->preempt_client = client;
- kunmap(page);
+ guc_fill_preempt_context(guc);
+ }
return 0;
}
-static void guc_ads_destroy(struct intel_guc *guc)
+static void guc_clients_destroy(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->ads_vma);
+ struct intel_guc_client *client;
+
+ client = fetch_and_zero(&guc->preempt_client);
+ if (client)
+ guc_client_free(client);
+
+ client = fetch_and_zero(&guc->execbuf_client);
+ guc_client_free(client);
}
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
-int i915_guc_submission_init(struct drm_i915_private *dev_priv)
+int intel_guc_submission_init(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_vma *vma;
- void *vaddr;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
if (guc->stage_desc_pool)
return 0;
- vma = intel_guc_allocate_vma(guc,
- PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ ret = guc_stage_desc_pool_create(guc);
+ if (ret)
+ return ret;
+ /*
+ * Keep static analysers happy, let them know that we allocated the
+ * vma after testing that it didn't exist earlier.
+ */
+ GEM_BUG_ON(!guc->stage_desc_pool);
- guc->stage_desc_pool = vma;
+ WARN_ON(!guc_verify_doorbells(guc));
+ ret = guc_clients_create(guc);
+ if (ret)
+ return ret;
- vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_vma;
+ for_each_engine(engine, dev_priv, id) {
+ guc->preempt_work[id].engine = engine;
+ INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
}
- guc->stage_desc_pool_vaddr = vaddr;
-
- ret = intel_guc_log_create(guc);
- if (ret < 0)
- goto err_vaddr;
-
- ret = guc_ads_create(guc);
- if (ret < 0)
- goto err_log;
-
- ida_init(&guc->stage_ids);
-
return 0;
-err_log:
- intel_guc_log_destroy(guc);
-err_vaddr:
- i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
-err_vma:
- i915_vma_unpin_and_release(&guc->stage_desc_pool);
- return ret;
}
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
+void intel_guc_submission_fini(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- ida_destroy(&guc->stage_ids);
- guc_ads_destroy(guc);
- intel_guc_log_destroy(guc);
- i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
- i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ for_each_engine(engine, dev_priv, id)
+ cancel_work_sync(&guc->preempt_work[id].work);
+
+ guc_clients_destroy(guc);
+ WARN_ON(!guc_verify_doorbells(guc));
+
+ guc_stage_desc_pool_destroy(guc);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1036,7 +1120,9 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
int irqs;
- /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+ /* tell all command streamers to forward interrupts (but not vblank)
+ * to GuC
+ */
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
@@ -1097,10 +1183,19 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
+static void guc_submission_park(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client = guc->execbuf_client;
+ intel_engine_unpin_breadcrumbs_irq(engine);
+}
+
+static void guc_submission_unpark(struct intel_engine_cs *engine)
+{
+ intel_engine_pin_breadcrumbs_irq(engine);
+}
+
+int intel_guc_submission_enable(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
@@ -1118,61 +1213,50 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
- if (!client) {
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CLIENT_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for execbuf!\n");
- return PTR_ERR(client);
- }
+ GEM_BUG_ON(!guc->execbuf_client);
- guc->execbuf_client = client;
- }
+ guc_reset_wq(guc->execbuf_client);
+ if (guc->preempt_client)
+ guc_reset_wq(guc->preempt_client);
err = intel_guc_sample_forcewake(guc);
if (err)
- goto err_execbuf_client;
-
- guc_reset_wq(client);
+ return err;
- err = guc_init_doorbell_hw(guc);
+ err = guc_clients_doorbell_init(guc);
if (err)
- goto err_execbuf_client;
+ return err;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_execlists * const execlists = &engine->execlists;
- /* The tasklet was initialised by execlists, and may be in
- * a state of flux (across a reset) and so we just want to
- * take over the callback without changing any other state
- * in the tasklet.
- */
- execlists->irq_tasklet.func = i915_guc_irq_handler;
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- tasklet_schedule(&execlists->irq_tasklet);
+ struct intel_engine_execlists * const execlists =
+ &engine->execlists;
+
+ execlists->tasklet.func = guc_submission_tasklet;
+ engine->park = guc_submission_park;
+ engine->unpark = guc_submission_unpark;
+
+ engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
}
return 0;
-
-err_execbuf_client:
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return err;
}
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
+void intel_guc_submission_disable(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
+ guc_clients_doorbell_fini(guc);
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_guc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index cb4353b..fb081ce 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef _I915_GUC_SUBMISSION_H_
-#define _I915_GUC_SUBMISSION_H_
+#ifndef _INTEL_GUC_SUBMISSION_H_
+#define _INTEL_GUC_SUBMISSION_H_
#include <linux/spinlock.h>
@@ -52,7 +52,7 @@ struct drm_i915_private;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*/
-struct i915_guc_client {
+struct intel_guc_client {
struct i915_vma *vma;
void *vaddr;
struct i915_gem_context *owner;
@@ -67,14 +67,17 @@ struct i915_guc_client {
u16 doorbell_id;
unsigned long doorbell_offset;
+ /* Protects GuC client's WQ access */
spinlock_t wq_lock;
/* Per-engine counts of GuC submissions */
u64 submissions[I915_NUM_ENGINES];
};
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+int intel_guc_submission_init(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_disable(struct intel_guc *guc);
+void intel_guc_submission_fini(struct intel_guc *guc);
+int intel_guc_preempt_work_create(struct intel_guc *guc);
+void intel_guc_preempt_work_destroy(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index b4a7f31..a2fe7c8 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,12 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (!i915_modparams.enable_execlists) {
- DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
- return -EIO;
- }
-
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 12ac270..c8ea510 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -27,13 +27,9 @@
static bool
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
- if (INTEL_GEN(engine->i915) >= 8) {
- return (ipehr >> 23) == 0x1c;
- } else {
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
- }
+ ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
+ return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER);
}
static struct intel_engine_cs *
@@ -41,31 +37,20 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
+ u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
struct intel_engine_cs *signaller;
enum intel_engine_id id;
- if (INTEL_GEN(dev_priv) >= 8) {
- for_each_engine(signaller, dev_priv, id) {
- if (engine == signaller)
- continue;
-
- if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
- return signaller;
- }
- } else {
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
+ for_each_engine(signaller, dev_priv, id) {
+ if (engine == signaller)
+ continue;
- for_each_engine(signaller, dev_priv, id) {
- if(engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
+ return signaller;
}
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
- engine->name, ipehr, offset);
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
+ engine->name, ipehr);
return ERR_PTR(-ENODEV);
}
@@ -135,11 +120,6 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
*seqno = ioread32(vaddr + head + 4) + 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(vaddr + head + 12);
- offset <<= 32;
- offset |= ioread32(vaddr + head + 8);
- }
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
@@ -266,19 +246,19 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, 0,
+ i915_handle_error(dev_priv, BIT(engine->id),
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return ENGINE_WAIT_KICK;
}
- if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return ENGINE_DEAD;
case 1:
- i915_handle_error(dev_priv, 0,
+ i915_handle_error(dev_priv, ALL_ENGINES,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
@@ -369,13 +349,18 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
case ENGINE_ACTIVE_HEAD:
case ENGINE_ACTIVE_SUBUNITS:
- /* Seqno stuck with still active engine gets leeway,
+ /*
+ * Seqno stuck with still active engine gets leeway,
* in hopes that it is just a long shader.
*/
timeout = I915_SEQNO_DEAD_TIMEOUT;
break;
case ENGINE_DEAD:
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("hangcheck");
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
break;
default:
@@ -426,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
if (!i915_modparams.enable_hangcheck)
return;
@@ -444,30 +428,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_hangcheck cur_state, *hc = &cur_state;
- const bool busy = intel_engine_has_waiter(engine);
+ struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
- hangcheck_load_sample(engine, hc);
- hangcheck_accumulate_sample(engine, hc);
- hangcheck_store_sample(engine, hc);
+ hangcheck_load_sample(engine, &hc);
+ hangcheck_accumulate_sample(engine, &hc);
+ hangcheck_store_sample(engine, &hc);
if (engine->hangcheck.stalled) {
hung |= intel_engine_flag(engine);
- if (hc->action != ENGINE_DEAD)
+ if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
-
- busy_count += busy;
}
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
/* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
+ i915_queue_hangcheck(dev_priv);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
new file mode 100644
index 0000000..14ca5d3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_hdcp.h>
+#include <linux/i2c.h>
+#include <linux/random.h>
+
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+#define KEY_LOAD_TRIES 5
+
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ int ret, read_ret;
+ bool ksv_ready;
+
+ /* Poll for ksv list ready (spec says max time allowed is 5s) */
+ ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+ &ksv_ready),
+ read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
+ 100 * 1000);
+ if (ret)
+ return ret;
+ if (read_ret)
+ return read_ret;
+ if (!ksv_ready)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
+ HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+}
+
+static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 val;
+
+ val = I915_READ(HDCP_KEY_STATUS);
+ if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
+ return 0;
+
+ /*
+ * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
+ * out of reset. So if Key is not already loaded, its an error state.
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ return -ENXIO;
+
+ /*
+ * Initiate loading the HDCP key from fuses.
+ *
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
+ * differ in the key load trigger process from other platforms.
+ */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = sandybridge_pcode_write(dev_priv,
+ SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ mutex_unlock(&dev_priv->pcu_lock);
+ if (ret) {
+ DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ }
+
+ /* Wait for the keys to load (500us) */
+ ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
+ if (ret)
+ return ret;
+ else if (!(val & HDCP_KEY_LOAD_STATUS))
+ return -ENXIO;
+
+ /* Send Aksv over to PCH display for use in authentication */
+ I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+
+ return 0;
+}
+
+/* Returns updated SHA-1 index */
+static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+{
+ I915_WRITE(HDCP_SHA_TEXT, sha_text);
+ if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+ DRM_ERROR("Timed out waiting for SHA1 ready\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static
+u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+{
+ enum port port = intel_dig_port->base.port;
+ switch (port) {
+ case PORT_A:
+ return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
+ case PORT_B:
+ return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
+ case PORT_C:
+ return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
+ case PORT_D:
+ return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
+ case PORT_E:
+ return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
+}
+
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+ return true;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv;
+ u32 vprime, sha_text, sha_leftovers, rep_ctl;
+ u8 bstatus[2], num_downstream, *ksv_fifo;
+ int ret, i, j, sha_idx;
+
+ dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+ ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+ if (ret) {
+ DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ return ret;
+ }
+
+ ret = shim->read_bstatus(intel_dig_port, bstatus);
+ if (ret)
+ return ret;
+
+ if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+ DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+ DRM_ERROR("Max Topology Limit Exceeded\n");
+ return -EPERM;
+ }
+
+ /*
+ * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+ * the HDCP encryption. That implies that repeater can't have its own
+ * display. As there is no consumption of encrypted content in the
+ * repeater with 0 downstream devices, we are failing the
+ * authentication.
+ */
+ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+ if (num_downstream == 0)
+ return -EINVAL;
+
+ ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!ksv_fifo)
+ return -ENOMEM;
+
+ ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+ if (ret)
+ return ret;
+
+ /* Process V' values from the receiver */
+ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
+ ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+ if (ret)
+ return ret;
+ I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+ }
+
+ /*
+ * We need to write the concatenation of all device KSVs, BINFO (DP) ||
+ * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
+ * stream is written via the HDCP_SHA_TEXT register in 32-bit
+ * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
+ * index will keep track of our progress through the 64 bytes as well as
+ * helping us work the 40-bit KSVs through our 32-bit register.
+ *
+ * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
+ */
+ sha_idx = 0;
+ sha_text = 0;
+ sha_leftovers = 0;
+ rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ for (i = 0; i < num_downstream; i++) {
+ unsigned int sha_empty;
+ u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
+
+ /* Fill up the empty slots in sha_text and write it out */
+ sha_empty = sizeof(sha_text) - sha_leftovers;
+ for (j = 0; j < sha_empty; j++)
+ sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Programming guide writes this every 64 bytes */
+ sha_idx += sizeof(sha_text);
+ if (!(sha_idx % 64))
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+
+ /* Store the leftover bytes from the ksv in sha_text */
+ sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
+ sha_text = 0;
+ for (j = 0; j < sha_leftovers; j++)
+ sha_text |= ksv[sha_empty + j] <<
+ ((sizeof(sha_text) - j - 1) * 8);
+
+ /*
+ * If we still have room in sha_text for more data, continue.
+ * Otherwise, write it out immediately.
+ */
+ if (sizeof(sha_text) > sha_leftovers)
+ continue;
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_leftovers = 0;
+ sha_text = 0;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
+ * bytes are leftover from the last ksv, we might be able to fit them
+ * all in sha_text (first 2 cases), or we might need to split them up
+ * into 2 writes (last 2 cases).
+ */
+ if (sha_leftovers == 0) {
+ /* Write 16 bits of text, 16 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv,
+ bstatus[0] << 8 | bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 16 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 1) {
+ /* Write 24 bits of text, 8 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
+ /* Only 24-bits of data, must be in the LSB */
+ sha_text = (sha_text & 0xffffff00) >> 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 24 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 2) {
+ /* Write 32 bits of text */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 64 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ for (i = 0; i < 2; i++) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+ } else if (sha_leftovers == 3) {
+ /* Write 32 bits of text */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0] << 24;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of text, 24 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of M0 */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ } else {
+ DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
+ return -EINVAL;
+ }
+
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
+ while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * Last write gets the length of the concatenation in bits. That is:
+ * - 5 bytes per device
+ * - 10 bytes for BINFO/BSTATUS(2), M0(8)
+ */
+ sha_text = (num_downstream * 5 + 10) * 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Tell the HW we're done with the hash and wait for it to ACK */
+ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE,
+ HDCP_SHA1_COMPLETE, 1)) {
+ DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ return -ETIMEDOUT;
+ }
+ if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
+ return -ENXIO;
+ }
+
+ DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
+ return 0;
+}
+
+/* Implements Part 1 of the HDCP authorization procedure */
+static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv;
+ enum port port;
+ unsigned long r0_prime_gen_start;
+ int ret, i, tries = 2;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_AN_LEN];
+ } an;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_KSV_LEN];
+ } bksv;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+ bool repeater_present, hdcp_capable;
+
+ dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+ port = intel_dig_port->base.port;
+
+ /*
+ * Detects whether the display is HDCP capable. Although we check for
+ * valid Bksv below, the HDCP over DP spec requires that we check
+ * whether the display supports HDCP before we write An. For HDMI
+ * displays, this is not necessary.
+ */
+ if (shim->hdcp_capable) {
+ ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
+ if (ret)
+ return ret;
+ if (!hdcp_capable) {
+ DRM_ERROR("Panel is not HDCP capable\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Initialize An with 2 random values and acquire it */
+ for (i = 0; i < 2; i++)
+ I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
+ I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+
+ /* Wait for An to be acquired */
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_AN_READY,
+ HDCP_STATUS_AN_READY, 1)) {
+ DRM_ERROR("Timed out waiting for An\n");
+ return -ETIMEDOUT;
+ }
+
+ an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
+ an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ ret = shim->write_an_aksv(intel_dig_port, an.shim);
+ if (ret)
+ return ret;
+
+ r0_prime_gen_start = jiffies;
+
+ memset(&bksv, 0, sizeof(bksv));
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(intel_dig_port, bksv.shim);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv.shim))
+ break;
+ }
+ if (i == tries) {
+ DRM_ERROR("HDCP failed, Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
+ I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+
+ ret = shim->repeater_present(intel_dig_port, &repeater_present);
+ if (ret)
+ return ret;
+ if (repeater_present)
+ I915_WRITE(HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(intel_dig_port));
+
+ ret = shim->toggle_signalling(intel_dig_port, true);
+ if (ret)
+ return ret;
+
+ I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+
+ /* Wait for R0 ready */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Timed out waiting for R0 ready\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Wait for R0' to become available. The spec says 100ms from Aksv, but
+ * some monitors can take longer than this. We'll set the timeout at
+ * 300ms just to be sure.
+ *
+ * On DP, there's an R0_READY bit available but no such bit
+ * exists on HDMI. Since the upper-bound is the same, we'll just do
+ * the stupid thing instead of polling on one and not the other.
+ */
+ wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
+
+ ri.reg = 0;
+ ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+ if (ret)
+ return ret;
+ I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for encryption confirmation */
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
+ DRM_ERROR("Timed out waiting for encryption\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * XXX: If we have MST-connected devices, we need to enable encryption
+ * on those as well.
+ */
+
+ if (repeater_present)
+ return intel_hdcp_auth_downstream(intel_dig_port, shim);
+
+ DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ return 0;
+}
+
+static
+struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
+static int _intel_hdcp_disable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ I915_WRITE(PORT_HDCP_CONF(port), 0);
+ if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+ 20)) {
+ DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable HDCP signalling\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("HDCP is disabled\n");
+ return 0;
+}
+
+static int _intel_hdcp_enable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ int i, ret, tries = 3;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
+ DRM_ERROR("PG1 is disabled, cannot load keys\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < KEY_LOAD_TRIES; i++) {
+ ret = intel_hdcp_load_keys(dev_priv);
+ if (!ret)
+ break;
+ intel_hdcp_clear_keys(dev_priv);
+ }
+ if (ret) {
+ DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+ return ret;
+ }
+
+ /* Incase of authentication failures, HDCP spec expects reauth. */
+ for (i = 0; i < tries; i++) {
+ ret = intel_hdcp_auth(conn_to_dig_port(connector),
+ connector->hdcp_shim);
+ if (!ret)
+ return 0;
+
+ DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+
+ /* Ensuring HDCP encryption and signalling are stopped. */
+ _intel_hdcp_disable(connector);
+ }
+
+ DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(to_delayed_work(work),
+ struct intel_connector,
+ hdcp_check_work);
+ if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&connector->hdcp_check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static void intel_hdcp_prop_work(struct work_struct *work)
+{
+ struct intel_connector *connector = container_of(work,
+ struct intel_connector,
+ hdcp_prop_work);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_connector_state *state;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&connector->hdcp_mutex);
+
+ /*
+ * This worker is only used to flip between ENABLED/DESIRED. Either of
+ * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+ * we're running just after hdcp has been disabled, so just exit
+ */
+ if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ state = connector->base.state;
+ state->content_protection = connector->hdcp_value;
+ }
+
+ mutex_unlock(&connector->hdcp_mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
+{
+ /* PORT E doesn't have HDCP, and PORT F is disabled */
+ return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
+}
+
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim)
+{
+ int ret;
+
+ ret = drm_connector_attach_content_protection_property(
+ &connector->base);
+ if (ret)
+ return ret;
+
+ connector->hdcp_shim = hdcp_shim;
+ mutex_init(&connector->hdcp_mutex);
+ INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
+ INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+ return 0;
+}
+
+int intel_hdcp_enable(struct intel_connector *connector)
+{
+ int ret;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret)
+ goto out;
+
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&connector->hdcp_prop_work);
+ schedule_delayed_work(&connector->hdcp_check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+out:
+ mutex_unlock(&connector->hdcp_mutex);
+ return ret;
+}
+
+int intel_hdcp_disable(struct intel_connector *connector)
+{
+ int ret = 0;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ ret = _intel_hdcp_disable(connector);
+ }
+
+ mutex_unlock(&connector->hdcp_mutex);
+ cancel_delayed_work_sync(&connector->hdcp_check_work);
+ return ret;
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ uint64_t old_cp = old_state->content_protection;
+ uint64_t new_cp = new_state->content_protection;
+ struct drm_crtc_state *crtc_state;
+
+ if (!new_state->crtc) {
+ /*
+ * If the connector is being disabled with CP enabled, mark it
+ * desired so it's re-enabled when the connector is brought back
+ */
+ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return;
+ }
+
+ /*
+ * Nothing to do if the state didn't change, or HDCP was activated since
+ * the last commit
+ */
+ if (old_cp == new_cp ||
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ crtc_state->mode_changed = true;
+}
+
+/* Implements Part 3 of the HDCP authorization procedure */
+int intel_hdcp_check_link(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret = 0;
+
+ if (!connector->hdcp_shim)
+ return -ENOENT;
+
+ mutex_lock(&connector->hdcp_mutex);
+
+ if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
+ DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
+ connector->base.name, connector->base.base.id,
+ I915_READ(PORT_HDCP_STATUS(port)));
+ ret = -ENXIO;
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+ if (connector->hdcp_shim->check_link(intel_dig_port)) {
+ if (connector->hdcp_value !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ connector->hdcp_value =
+ DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&connector->hdcp_prop_work);
+ }
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&connector->hdcp_prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&connector->hdcp_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4dea833..1baef4a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -186,7 +187,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -245,7 +246,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -362,7 +363,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -513,12 +514,14 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ conn_state->connector,
&crtc_state->base.adjusted_mode);
if (ret < 0)
return;
@@ -536,7 +539,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -585,7 +588,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -687,7 +690,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -726,7 +729,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -769,7 +772,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -783,7 +786,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
+ u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -822,7 +825,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -855,7 +858,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -874,6 +877,248 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
+static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+ u8 start = offset & 0xff;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buffer
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs))
+ return 0;
+ return ret >= 0 ? -EIO : ret;
+}
+
+static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+ u8 *write_buf;
+ struct i2c_msg msg;
+
+ write_buf = kzalloc(size + 1, GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ write_buf[0] = offset & 0xff;
+ memcpy(&write_buf[1], buffer, size);
+
+ msg.addr = DRM_HDCP_DDC_ADDR;
+ msg.flags = 0,
+ msg.len = size + 1,
+ msg.buf = write_buf;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret == 1)
+ return 0;
+ return ret >= 0 ? -EIO : ret;
+}
+
+static
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
+ u8 *an)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ hdmi->ddc_bus);
+ int ret;
+
+ ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
+ DRM_HDCP_AN_LEN);
+ if (ret) {
+ DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = intel_gmbus_output_aksv(adapter);
+ if (ret < 0) {
+ DRM_ERROR("Failed to output aksv (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
+ u8 *bksv)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret)
+ DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
+ u8 *bstatus)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret)
+ DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
+ bool *repeater_present)
+{
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
+ u8 *ri_prime)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret)
+ DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
+ bool *ksv_ready)
+{
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ int ret;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
+ ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
+ if (ret) {
+ DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
+ int i, u32 *part)
+{
+ int ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
+ part, DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret)
+ DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
+ bool enable)
+{
+ int ret;
+
+ if (!enable)
+ usleep_range(6, 60); /* Bspec says >= 6us */
+
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
+ if (ret) {
+ DRM_ERROR("%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv =
+ intel_dig_port->base.base.dev->dev_private;
+ enum port port = intel_dig_port->base.port;
+ int ret;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+
+ ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim);
+ if (ret)
+ return false;
+
+ I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
+ return false;
+ }
+ return true;
+}
+
+static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
+ .write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
+ .read_bksv = intel_hdmi_hdcp_read_bksv,
+ .read_bstatus = intel_hdmi_hdcp_read_bstatus,
+ .repeater_present = intel_hdmi_hdcp_repeater_present,
+ .read_ri_prime = intel_hdmi_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
+ .check_link = intel_hdmi_hdcp_check_link,
+};
+
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -958,6 +1203,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
u32 tmp, flags = 0;
int dotclock;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
@@ -1205,7 +1452,8 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@@ -1215,7 +1463,8 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(encoder);
+ intel_audio_codec_disable(encoder,
+ old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
@@ -1225,24 +1474,34 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
-static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
+static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
- if (IS_G4X(dev_priv))
- return 165000;
- else if (IS_GEMINILAKE(dev_priv))
- return 594000;
- else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
- return 300000;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[encoder->port];
+ int max_tmds_clock;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ max_tmds_clock = 594000;
+ else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ max_tmds_clock = 300000;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ max_tmds_clock = 225000;
else
- return 225000;
+ max_tmds_clock = 165000;
+
+ if (info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+
+ return max_tmds_clock;
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool force_dvi)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
if (respect_downstream_limits) {
struct intel_connector *connector = hdmi->attached_connector;
@@ -1298,9 +1557,6 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
clock = mode->clock;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1337,6 +1593,12 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
+ if (crtc_state->pipe_bpp <= 8*3)
+ return false;
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
@@ -1361,7 +1623,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
}
}
- /* Display Wa #1139 */
+ /* Display WA #1139: glk */
if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
@@ -1462,9 +1724,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
- hdmi_12bpc_possible(pipe_config)) {
+ if (hdmi_12bpc_possible(pipe_config) &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1493,7 +1754,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
+ IS_GEMINILAKE(dev_priv))) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -1527,7 +1789,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
- enum port port = hdmi_to_dig_port(hdmi)->port;
+ enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
@@ -1545,7 +1807,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
* there's nothing connected to the port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- if (has_edid &&
+ /* An overridden EDID imply that we want this port for testing.
+ * Make sure not to set limits for that port.
+ */
+ if (has_edid && !connector->override_edid &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
@@ -1573,12 +1838,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid;
bool connected = false;
+ struct i2c_adapter *i2c;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
@@ -1611,12 +1884,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi_unset_edid(connector);
- if (intel_hdmi_set_edid(connector)) {
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
+ if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
- } else
+ else
status = connector_status_disconnected;
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -1627,8 +1897,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
static void
intel_hdmi_force(struct drm_connector *connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -1638,7 +1906,6 @@ intel_hdmi_force(struct drm_connector *connector)
return;
intel_hdmi_set_edid(connector);
- hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
@@ -1671,10 +1938,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- vlv_phy_pre_encoder_enable(encoder);
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
/* HDMI 1.0V-2dB */
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
@@ -1695,7 +1961,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
- vlv_phy_pre_pll_enable(encoder);
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
@@ -1704,14 +1970,14 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
- chv_phy_pre_pll_enable(encoder);
+ chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- chv_phy_post_pll_disable(encoder);
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
@@ -1719,7 +1985,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- vlv_phy_reset_lanes(encoder);
+ vlv_phy_reset_lanes(encoder, old_crtc_state);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
@@ -1732,7 +1998,7 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
@@ -1745,7 +2011,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- chv_phy_pre_encoder_enable(encoder);
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
@@ -1909,6 +2175,9 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
case PORT_D:
ddc_pin = GMBUS_PIN_4_CNP;
break;
+ case PORT_F:
+ ddc_pin = GMBUS_PIN_3_BXT;
+ break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_1_BXT;
@@ -1917,6 +2186,37 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
+static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_A:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_9_TC1_ICP;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_10_TC2_ICP;
+ break;
+ case PORT_E:
+ ddc_pin = GMBUS_PIN_11_TC3_ICP;
+ break;
+ case PORT_F:
+ ddc_pin = GMBUS_PIN_12_TC4_ICP;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1959,6 +2259,8 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ else if (IS_ICELAKE(dev_priv))
+ ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2004,7 +2306,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_dig_port->port;
+ enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -2022,14 +2324,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
if (WARN_ON(port == PORT_A))
return;
- intel_encoder->hpd_pin = intel_hpd_pin(port);
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -2038,6 +2340,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
+ if (is_hdcp_supported(dev_priv, port)) {
+ int ret = intel_hdcp_init(intel_connector,
+ &intel_hdmi_hdcp_shim);
+ if (ret)
+ DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ }
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_hdmi->attached_connector = intel_connector;
@@ -2074,6 +2383,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
+ intel_encoder->hotplug = intel_encoder_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
@@ -2124,7 +2434,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
- intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 875d5d2..0e3d3e8 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -78,12 +78,14 @@
/**
* intel_hpd_port - return port hard associated with certain pin.
+ * @dev_priv: private driver data pointer
* @pin: the hpd pin to get associated port
*
* Return port that is associatade with @pin and PORT_NONE if no port is
* hard associated with that @pin.
*/
-enum port intel_hpd_pin_to_port(enum hpd_pin pin)
+enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
+ enum hpd_pin pin)
{
switch (pin) {
case HPD_PORT_A:
@@ -95,6 +97,8 @@ enum port intel_hpd_pin_to_port(enum hpd_pin pin)
case HPD_PORT_D:
return PORT_D;
case HPD_PORT_E:
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ return PORT_F;
return PORT_E;
default:
return PORT_NONE; /* no port for this pin */
@@ -102,13 +106,17 @@ enum port intel_hpd_pin_to_port(enum hpd_pin pin)
}
/**
- * intel_hpd_pin - return pin hard associated with certain port.
+ * intel_hpd_pin_default - return default pin associated with certain port.
+ * @dev_priv: private driver data pointer
* @port: the hpd port to get associated pin
*
+ * It is only valid and used by digital port encoder.
+ *
* Return pin that is associatade with @port and HDP_NONE if no pin is
* hard associated with that @port.
*/
-enum hpd_pin intel_hpd_pin(enum port port)
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port)
{
switch (port) {
case PORT_A:
@@ -121,6 +129,9 @@ enum hpd_pin intel_hpd_pin(enum port port)
return HPD_PORT_D;
case PORT_E:
return HPD_PORT_E;
+ case PORT_F:
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ return HPD_PORT_E;
default:
MISSING_CASE(port);
return HPD_NONE;
@@ -263,24 +274,26 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(dev_priv);
}
-static bool intel_hpd_irq_event(struct drm_device *dev,
- struct drm_connector *connector)
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
- old_status = connector->status;
+ old_status = connector->base.status;
- connector->status = drm_helper_probe_detect(connector, NULL, false);
+ connector->base.status =
+ drm_helper_probe_detect(&connector->base, NULL, false);
- if (old_status == connector->status)
+ if (old_status == connector->base.status)
return false;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
+ connector->base.base.id,
+ connector->base.name,
drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
+ drm_get_connector_status_name(connector->base.status));
return true;
}
@@ -370,10 +383,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
- if (intel_encoder->hot_plug)
- intel_encoder->hot_plug(intel_encoder);
- if (intel_hpd_irq_event(dev, connector))
- changed = true;
+
+ changed |= intel_encoder->hotplug(intel_encoder,
+ intel_connector);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -417,7 +429,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(i) & pin_mask))
continue;
- port = intel_hpd_pin_to_port(i);
+ port = intel_hpd_pin_to_port(dev_priv, i);
is_dig_port = port != PORT_NONE &&
dev_priv->hotplug.irq_port[port];
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index c8a48cb..65e2afb 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -27,157 +27,9 @@
#include "intel_huc.h"
#include "i915_drv.h"
-/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 07
-#define BXT_BLD_NUM 1398
-
-#define SKL_HUC_FW_MAJOR 01
-#define SKL_HUC_FW_MINOR 07
-#define SKL_BLD_NUM 1398
-
-#define KBL_HUC_FW_MAJOR 02
-#define KBL_HUC_FW_MINOR 00
-#define KBL_BLD_NUM 1810
-
-#define GLK_HUC_FW_MAJOR 02
-#define GLK_HUC_FW_MINOR 00
-#define GLK_BLD_NUM 1748
-
-#define HUC_FW_PATH(platform, major, minor, bld_num) \
- "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
- __stringify(minor) "_" __stringify(bld_num) ".bin"
-
-#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
- SKL_HUC_FW_MINOR, SKL_BLD_NUM)
-MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
-
-#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
- BXT_HUC_FW_MINOR, BXT_BLD_NUM)
-MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
-
-#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
- KBL_HUC_FW_MINOR, KBL_BLD_NUM)
-MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-
-#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
- GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-
-/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc: intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
+void intel_huc_init_early(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
-
- if (i915_modparams.huc_firmware_path) {
- huc->fw.path = i915_modparams.huc_firmware_path;
- huc->fw.major_ver_wanted = 0;
- huc->fw.minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc->fw.path = I915_SKL_HUC_UCODE;
- huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc->fw.path = I915_BXT_HUC_UCODE;
- huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc->fw.path = I915_KBL_HUC_UCODE;
- huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc->fw.path = I915_GLK_HUC_UCODE;
- huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else {
- DRM_ERROR("No HuC firmware known for platform with HuC!\n");
- return;
- }
-}
-
-/**
- * huc_ucode_xfer() - DMA's the firmware
- * @dev_priv: the drm_i915_private device
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- unsigned long offset = 0;
- u32 size;
- int ret;
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- /* Set the source address for the uCode */
- offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
- /* Hardware doesn't look at destination address for HuC. Set it to 0,
- * but still program the correct address space.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- size = huc_fw->header_size + huc_fw->ucode_size;
- I915_WRITE(DMA_COPY_SIZE, size);
-
- /* Start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
-
- /* Wait for DMA to finish */
- ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
-
- DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
-
- /* Disable the bits once DMA is over */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * intel_huc_init_hw() - load HuC uCode to device
- * @huc: intel_huc structure
- *
- * Called from guc_setup() during driver loading and also after a GPU reset.
- * Be note that HuC loading must be done before GuC loading.
- *
- * The firmware image should have already been fetched into memory by the
- * earlier call to intel_huc_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
- *
- */
-void intel_huc_init_hw(struct intel_huc *huc)
-{
- intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
+ intel_huc_fw_init_early(huc);
}
/**
@@ -191,42 +43,51 @@ void intel_huc_init_hw(struct intel_huc *huc)
* signature through intel_guc_auth_huc(). It then waits for 50ms for
* firmware verification ACK and unpins the object.
*/
-void intel_huc_auth(struct intel_huc *huc)
+int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
+ u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
+ return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin huc fw object %d\n",
- (int)PTR_ERR(vma));
- return;
+ ret = PTR_ERR(vma);
+ DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
+ goto fail;
}
ret = intel_guc_auth_huc(guc,
guc_ggtt_offset(vma) + huc->fw.rsa_offset);
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
- goto out;
+ goto fail_unpin;
}
/* Check authentication status, it should be done by now */
- ret = intel_wait_for_register(i915,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 50);
+ ret = __intel_wait_for_register(i915,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 2, 50, &status);
if (ret) {
- DRM_ERROR("HuC: Authentication failed %d\n", ret);
- goto out;
+ DRM_ERROR("HuC: Firmware not verified %#x\n", status);
+ goto fail_unpin;
}
-out:
i915_vma_unpin(vma);
+ return 0;
+
+fail_unpin:
+ i915_vma_unpin(vma);
+fail:
+ huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
+
+ DRM_ERROR("HuC: Authentication failed %d\n", ret);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aaa38b9..5d6e804 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -26,6 +26,7 @@
#define _INTEL_HUC_H_
#include "intel_uc_fw.h"
+#include "intel_huc_fw.h"
struct intel_huc {
/* Generic uC firmware management */
@@ -34,8 +35,7 @@ struct intel_huc {
/* HuC-specific additions */
};
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_huc_auth(struct intel_huc *huc);
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
new file mode 100644
index 0000000..c66afa9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -0,0 +1,166 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+#define BXT_HUC_FW_MAJOR 01
+#define BXT_HUC_FW_MINOR 07
+#define BXT_BLD_NUM 1398
+
+#define SKL_HUC_FW_MAJOR 01
+#define SKL_HUC_FW_MINOR 07
+#define SKL_BLD_NUM 1398
+
+#define KBL_HUC_FW_MAJOR 02
+#define KBL_HUC_FW_MINOR 00
+#define KBL_BLD_NUM 1810
+
+#define HUC_FW_PATH(platform, major, minor, bld_num) \
+ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
+ __stringify(minor) "_" __stringify(bld_num) ".bin"
+
+#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
+ SKL_HUC_FW_MINOR, SKL_BLD_NUM)
+MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
+
+#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
+ BXT_HUC_FW_MINOR, BXT_BLD_NUM)
+MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
+
+#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
+ KBL_HUC_FW_MINOR, KBL_BLD_NUM)
+MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+
+static void huc_fw_select(struct intel_uc_fw *huc_fw)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ if (!HAS_HUC(dev_priv))
+ return;
+
+ if (i915_modparams.huc_firmware_path) {
+ huc_fw->path = i915_modparams.huc_firmware_path;
+ huc_fw->major_ver_wanted = 0;
+ huc_fw->minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ huc_fw->path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ huc_fw->path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ huc_fw->path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else {
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(huc_fw->type));
+ }
+}
+
+/**
+ * intel_huc_fw_init_early() - initializes HuC firmware struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_fw_init_early(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
+
+ intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ huc_fw_select(huc_fw);
+}
+
+/**
+ * huc_fw_xfer() - DMA's the firmware
+ * @huc_fw: the firmware descriptor
+ * @vma: the firmware image (bound into the GGTT)
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ unsigned long offset = 0;
+ u32 size;
+ int ret;
+
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ * but still program the correct address space.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ size = huc_fw->header_size + huc_fw->ucode_size;
+ I915_WRITE(DMA_COPY_SIZE, size);
+
+ /* Start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+
+ DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
+
+ /* Disable the bits once DMA is over */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_uc_init_fw(), so here we need to only check that
+ * fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/intel_huc_fw.h
new file mode 100644
index 0000000..8a00a0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc_fw.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_FW_H_
+#define _INTEL_HUC_FW_H_
+
+struct intel_huc;
+
+void intel_huc_fw_init_early(struct intel_huc *huc);
+int intel_huc_fw_upload(struct intel_huc *huc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 49fdf09..e687550 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -30,6 +30,7 @@
#include <linux/i2c-algo-bit.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -75,11 +76,22 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
[GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
};
+static const struct gmbus_pin gmbus_pins_icp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+};
+
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ return &gmbus_pins_icp[pin];
+ else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
else if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
@@ -96,7 +108,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_ICP(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_icp);
+ else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
else if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
@@ -128,22 +142,46 @@ intel_i2c_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GMBUS4, 0);
}
-static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv))
- return;
-
val = I915_READ(DSPCLK_GATE_D);
- if (enable)
- val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ if (!enable)
+ val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
- val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
+static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ if (!enable)
+ val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+}
+
+static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(GEN9_CLKGATE_DIS_4);
+ if (!enable)
+ val |= BXT_GMBUS_GATING_DIS;
+ else
+ val &= ~BXT_GMBUS_GATING_DIS;
+ I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+}
+
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
@@ -221,7 +259,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_i2c_reset(dev_priv);
- intel_i2c_quirk_set(dev_priv, true);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, false);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -238,7 +279,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
set_data(bus, 1);
set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, true);
}
static void
@@ -373,7 +416,8 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
static int
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
- unsigned short addr, u8 *buf, unsigned int len)
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
unsigned int chunk_size = len;
u32 val, loop;
@@ -386,7 +430,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GMBUS3, val);
I915_WRITE_FW(GMBUS1,
- GMBUS_CYCLE_WAIT |
+ gmbus1_index | GMBUS_CYCLE_WAIT |
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
@@ -409,7 +453,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int tx_size = msg->len;
@@ -419,7 +464,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
do {
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+ gmbus1_index);
if (ret)
return ret;
@@ -431,21 +477,21 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
}
/*
- * The gmbus controller can combine a 1 or 2 byte write with a read that
- * immediately follows it by using an "INDEX" cycle.
+ * The gmbus controller can combine a 1 or 2 byte write with another read/write
+ * that immediately follows it by using an "INDEX" cycle.
*/
static bool
-gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
msgs[i].addr == msgs[i + 1].addr &&
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i].len == 1 || msgs[i].len == 2) &&
- (msgs[i + 1].flags & I2C_M_RD));
+ msgs[i + 1].len > 0);
}
static int
-gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
{
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
@@ -462,7 +508,10 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
if (gmbus5)
I915_WRITE_FW(GMBUS5, gmbus5);
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ if (msgs[1].flags & I2C_M_RD)
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+ else
+ ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
@@ -472,7 +521,8 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
}
static int
-do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
+ u32 gmbus0_source)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
@@ -481,18 +531,25 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int i = 0, inc, try = 0;
int ret = 0;
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, false);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, false);
+
retry:
- I915_WRITE_FW(GMBUS0, bus->reg0);
+ I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
- if (gmbus_is_index_read(msgs, i, num)) {
- ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
- inc = 2; /* an index read is two msgs */
+ if (gmbus_is_index_xfer(msgs, i, num)) {
+ ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+ inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+ ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
}
if (!ret)
@@ -582,6 +639,13 @@ timeout:
ret = -EAGAIN;
out:
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, true);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, true);
+
return ret;
}
@@ -600,7 +664,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
if (ret < 0)
bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
} else {
- ret = do_gmbus_xfer(adapter, msgs, num);
+ ret = do_gmbus_xfer(adapter, msgs, num, 0);
if (ret == -EAGAIN)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
@@ -610,6 +674,45 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
return ret;
}
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int ret;
+ u8 cmd = DRM_HDCP_DDC_AKSV;
+ u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(cmd),
+ .buf = &cmd,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ /*
+ * In order to output Aksv to the receiver, use an indexed write to
+ * pass the i2c command, and tell GMBUS to use the HW-provided value
+ * instead of sourcing GMBUS3 for the data.
+ */
+ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+
+ return ret;
+}
+
static u32 gmbus_func(struct i2c_adapter *adapter)
{
return i2c_bit_algo.functionality(adapter) &
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 5809b29..6269750 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -74,7 +74,6 @@
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
- int ret;
struct drm_device *dev = &dev_priv->drm;
struct platform_device_info pinfo = {};
struct resource *rsc;
@@ -119,24 +118,19 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
spin_lock_init(&pdata->lpe_audio_slock);
platdev = platform_device_register_full(&pinfo);
+ kfree(rsc);
+ kfree(pdata);
+
if (IS_ERR(platdev)) {
- ret = PTR_ERR(platdev);
DRM_ERROR("Failed to allocate LPE audio platform device\n");
- goto err;
+ return platdev;
}
- kfree(rsc);
-
pm_runtime_forbid(&platdev->dev);
pm_runtime_set_active(&platdev->dev);
pm_runtime_enable(&platdev->dev);
return platdev;
-
-err:
- kfree(rsc);
- kfree(pdata);
- return ERR_PTR(ret);
}
static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e71a8cd..697af5a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,6 +136,8 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_render_state.h"
+#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -153,64 +155,12 @@
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
#define GEN8_CTX_STATUS_COMPLETED_MASK \
- (GEN8_CTX_STATUS_ACTIVE_IDLE | \
- GEN8_CTX_STATUS_PREEMPTED | \
- GEN8_CTX_STATUS_ELEMENT_SWITCH)
-
-#define CTX_LRI_HEADER_0 0x01
-#define CTX_CONTEXT_CONTROL 0x02
-#define CTX_RING_HEAD 0x04
-#define CTX_RING_TAIL 0x06
-#define CTX_RING_BUFFER_START 0x08
-#define CTX_RING_BUFFER_CONTROL 0x0a
-#define CTX_BB_HEAD_U 0x0c
-#define CTX_BB_HEAD_L 0x0e
-#define CTX_BB_STATE 0x10
-#define CTX_SECOND_BB_HEAD_U 0x12
-#define CTX_SECOND_BB_HEAD_L 0x14
-#define CTX_SECOND_BB_STATE 0x16
-#define CTX_BB_PER_CTX_PTR 0x18
-#define CTX_RCS_INDIRECT_CTX 0x1a
-#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
-#define CTX_LRI_HEADER_1 0x21
-#define CTX_CTX_TIMESTAMP 0x22
-#define CTX_PDP3_UDW 0x24
-#define CTX_PDP3_LDW 0x26
-#define CTX_PDP2_UDW 0x28
-#define CTX_PDP2_LDW 0x2a
-#define CTX_PDP1_UDW 0x2c
-#define CTX_PDP1_LDW 0x2e
-#define CTX_PDP0_UDW 0x30
-#define CTX_PDP0_LDW 0x32
-#define CTX_LRI_HEADER_2 0x41
-#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
-
-#define CTX_REG(reg_state, pos, reg, val) do { \
- (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
- (reg_state)[(pos)+1] = (val); \
-} while (0)
-
-#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
- const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
- reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
- reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
-} while (0)
-
-#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
- reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
- reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
-} while (0)
-
-#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
-#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
-#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
+ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define PREEMPT_ID 0x1
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
@@ -219,35 +169,21 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
-/**
- * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev_priv: i915 device private
- * @enable_execlists: value of i915.enable_execlists module parameter.
- *
- * Only certain platforms support Execlists (the prerequisites being
- * support for Logical Ring Contexts and Aliasing PPGTT or better).
- *
- * Return: 1 if Execlists is supported and has to be enabled.
- */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
- /* On platforms with execlist available, vGPU will only
- * support execlist mode, no ring buffer mode.
- */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
- return 1;
-
- if (INTEL_GEN(dev_priv) >= 9)
- return 1;
-
- if (enable_execlists == 0)
- return 0;
+ return rb_entry(rb, struct i915_priolist, node);
+}
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv))
- return 1;
+static inline int rq_prio(const struct i915_request *rq)
+{
+ return rq->priotree.priority;
+}
- return 0;
+static inline bool need_preempt(const struct intel_engine_cs *engine,
+ const struct i915_request *last,
+ int prio)
+{
+ return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
}
/**
@@ -268,6 +204,18 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
* bits 32-52: ctx ID, a globally unique tag
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
+ *
+ * Starting from Gen11, the upper dword of the descriptor has a new format:
+ *
+ * bits 32-36: reserved
+ * bits 37-47: SW context ID
+ * bits 48:53: engine instance
+ * bit 54: mbz, reserved for use by hardware
+ * bits 55-60: SW counter
+ * bits 61-63: engine class
+ *
+ * engine info, SW context ID and SW counter need to form a unique number
+ * (Context ID) per lrc.
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
@@ -276,12 +224,32 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_context *ce = &ctx->engine[engine->id];
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
+ BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
desc = ctx->desc_template; /* bits 0-11 */
+ GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
+
desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
+ GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
+
+ if (INTEL_GEN(ctx->i915) >= 11) {
+ GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
+ desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
+ /* bits 37-47 */
+
+ desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
+ /* bits 48-53 */
+
+ /* TODO: decide what to do with SW counter (bits 55-60) */
+
+ desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
+ /* bits 61-63 */
+ } else {
+ GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
+ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
+ }
ce->lrc_desc = desc;
}
@@ -305,7 +273,7 @@ find_priolist:
parent = &execlists->queue.rb_node;
while (*parent) {
rb = *parent;
- p = rb_entry(rb, typeof(*p), node);
+ p = to_priolist(rb);
if (prio > p->priority) {
parent = &rb->rb_left;
} else if (prio < p->priority) {
@@ -345,18 +313,18 @@ find_priolist:
if (first)
execlists->first = &p->node;
- return ptr_pack_bits(p, first, 1);
+ return p;
}
-static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
assert_ring_tail_valid(rq->ring, rq->tail);
}
-static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
int last_prio = I915_PRIORITY_INVALID;
@@ -365,29 +333,35 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline->requests,
link) {
- if (i915_gem_request_completed(rq))
+ if (i915_request_completed(rq))
return;
- __i915_gem_request_unsubmit(rq);
+ __i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
- if (rq->priotree.priority != last_prio) {
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- p = ptr_mask_bits(p, 1);
-
- last_prio = rq->priotree.priority;
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != last_prio) {
+ last_prio = rq_prio(rq);
+ p = lookup_priolist(engine, &rq->priotree, last_prio);
}
list_add(&rq->priotree.link, &p->requests);
}
}
+void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
+{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ spin_lock_irq(&engine->timeline->lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->timeline->lock);
+}
+
static inline void
-execlists_context_status_change(struct drm_i915_gem_request *rq,
- unsigned long status)
+execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
/*
* Only used when GVT-g is enabled now. When GVT-g is disabled,
@@ -400,6 +374,20 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
status, rq);
}
+static inline void
+execlists_context_schedule_in(struct i915_request *rq)
+{
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(rq->engine);
+}
+
+static inline void
+execlists_context_schedule_out(struct i915_request *rq)
+{
+ intel_engine_context_out(rq->engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+}
+
static void
execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
@@ -409,7 +397,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
-static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
struct i915_hw_ppgtt *ppgtt =
@@ -429,21 +417,31 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
return ce->lrc_desc;
}
-static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
{
- writel(upper_32_bits(desc), elsp);
- writel(lower_32_bits(desc), elsp);
+ if (execlists->ctrl_reg) {
+ writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
+ writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ } else {
+ writel(upper_32_bits(desc), execlists->submit_reg);
+ writel(lower_32_bits(desc), execlists->submit_reg);
+ }
}
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
- struct drm_i915_gem_request *rq;
+ /*
+ * ELSQ note: the submit queue is not cleared after being submitted
+ * to the HW so we need to make sure we always clean it up. This is
+ * currently ensured by the fact that we always write the same number
+ * of elsq entries, keep this in mind before changing the loop below.
+ */
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct i915_request *rq;
unsigned int count;
u64 desc;
@@ -451,17 +449,29 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ execlists_context_schedule_in(rq);
port_set(&port[n], port_pack(rq, count));
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n",
+ engine->name, n,
+ port[n].context_id, count,
+ rq->global_seqno,
+ rq_prio(rq));
} else {
GEM_BUG_ON(!n);
desc = 0;
}
- elsp_write(desc, elsp);
+ write_desc(execlists, desc, n);
}
+
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -482,42 +492,47 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
return true;
}
-static void port_assign(struct execlist_port *port,
- struct drm_i915_gem_request *rq)
+static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
if (port_isset(port))
- i915_gem_request_put(port_request(port));
+ i915_request_put(port_request(port));
- port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
+ port_set(port, port_pack(i915_request_get(rq), port_count(port)));
}
static void inject_preempt_context(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id];
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
- GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
+ GEM_BUG_ON(execlists->preempt_complete_status !=
+ upper_32_bits(ce->lrc_desc));
+ GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
- memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
- ce->ring->tail += WA_TAIL_BYTES;
- ce->ring->tail &= (ce->ring->size - 1);
- ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
+ /*
+ * Switch to our empty preempt context so
+ * the state of the GPU is known (idle).
+ */
+ GEM_TRACE("%s\n", engine->name);
+ for (n = execlists_num_ports(execlists); --n; )
+ write_desc(execlists, 0, n);
- for (n = execlists_num_ports(&engine->execlists); --n; )
- elsp_write(0, elsp);
+ write_desc(execlists, ce->lrc_desc, n);
- elsp_write(ce->lrc_desc, elsp);
-}
+ /* we need to manually load the submit queue */
+ if (execlists->ctrl_reg)
+ writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-static bool can_preempt(struct intel_engine_cs *engine)
-{
- return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
+ execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
+ execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -526,7 +541,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct execlist_port *port = execlists->port;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct i915_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
@@ -554,8 +569,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
- if (!rb)
- goto unlock;
if (last) {
/*
@@ -564,58 +577,63 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
+ GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
- if (can_preempt(engine) &&
- rb_entry(rb, struct i915_priolist, node)->priority >
- max(last->priotree.priority, 0)) {
- /*
- * Switch to our empty preempt context so
- * the state of the GPU is known (idle).
- */
+ /*
+ * If we write to ELSP a second time before the HW has had
+ * a chance to respond to the previous write, we can confuse
+ * the HW and hit "undefined behaviour". After writing to ELSP,
+ * we must then wait until we see a context-switch event from
+ * the HW to indicate that it has had a chance to respond.
+ */
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
+ goto unlock;
+
+ if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
goto unlock;
- } else {
- /*
- * In theory, we could coalesce more requests onto
- * the second port (the first port is active, with
- * no preemptions pending). However, that means we
- * then have to deal with the possible lite-restore
- * of the second port (as we submit the ELSP, there
- * may be a context-switch) but also we may complete
- * the resubmission before the context-switch. Ergo,
- * coalescing onto the second port will cause a
- * preemption event, but we cannot predict whether
- * that will affect port[0] or port[1].
- *
- * If the second port is already active, we can wait
- * until the next context-switch before contemplating
- * new requests. The GPU will be busy and we should be
- * able to resubmit the new ELSP before it idles,
- * avoiding pipeline bubbles (momentary pauses where
- * the driver is unable to keep up the supply of new
- * work).
- */
- if (port_count(&port[1]))
- goto unlock;
-
- /* WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == req:TAIL as we resubmit the
- * request. See gen8_emit_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
}
+
+ /*
+ * In theory, we could coalesce more requests onto
+ * the second port (the first port is active, with
+ * no preemptions pending). However, that means we
+ * then have to deal with the possible lite-restore
+ * of the second port (as we submit the ELSP, there
+ * may be a context-switch) but also we may complete
+ * the resubmission before the context-switch. Ergo,
+ * coalescing onto the second port will cause a
+ * preemption event, but we cannot predict whether
+ * that will affect port[0] or port[1].
+ *
+ * If the second port is already active, we can wait
+ * until the next context-switch before contemplating
+ * new requests. The GPU will be busy and we should be
+ * able to resubmit the new ELSP before it idles,
+ * avoiding pipeline bubbles (momentary pauses where
+ * the driver is unable to keep up the supply of new
+ * work). However, we have to double check that the
+ * priorities of the ports haven't been switch.
+ */
+ if (port_count(&port[1]))
+ goto unlock;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == rq:TAIL as we resubmit the
+ * request. See gen8_emit_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
}
- do {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- struct drm_i915_gem_request *rq, *rn;
+ while (rb) {
+ struct i915_priolist *p = to_priolist(rb);
+ struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
/*
@@ -665,8 +683,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&rq->priotree.link);
- __i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, execlists));
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
@@ -676,11 +694,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- } while (rb);
+ }
done:
+ execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb;
if (submit)
port_assign(port, last);
+
+ /* We must always keep the beast fed if we have work piled up */
+ GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+
unlock:
spin_unlock_irq(&engine->timeline->lock);
@@ -688,20 +711,29 @@ unlock:
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
execlists_submit_ports(engine);
}
+
+ GEM_BUG_ON(port_isset(execlists->port) &&
+ !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
}
-static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
while (num_ports-- && port_isset(port)) {
- struct drm_i915_gem_request *rq = port_request(port);
+ struct i915_request *rq = port_request(port);
GEM_BUG_ON(!execlists->active);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
- i915_gem_request_put(rq);
+ intel_engine_context_out(rq->engine);
+
+ execlists_context_status_change(rq,
+ i915_request_completed(rq) ?
+ INTEL_CONTEXT_SCHEDULE_OUT :
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+
+ i915_request_put(rq);
memset(port, 0, sizeof(*port));
port++;
@@ -711,32 +743,50 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_gem_request *rq, *rn;
+ struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ local_irq_save(flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
+
+ spin_lock(&engine->timeline->lock);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_gem_request_completed(rq))
+ if (!i915_request_completed(rq))
dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
rb = execlists->first;
while (rb) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
INIT_LIST_HEAD(&rq->priotree.link);
dma_fence_set_error(&rq->fence, -EIO);
- __i915_gem_request_submit(rq);
+ __i915_request_submit(rq);
}
rb = rb_next(rb);
@@ -748,11 +798,13 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
-
+ execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT;
execlists->first = NULL;
GEM_BUG_ON(port_isset(execlists->port));
+ spin_unlock(&engine->timeline->lock);
+
/*
* The port is checked prior to scheduling a tasklet, but
* just in case we have suspended the tasklet to do the
@@ -761,21 +813,26 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
*/
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ /* Mark all CS interrupts as complete */
+ execlists->active = 0;
+
+ local_irq_restore(flags);
}
/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-static void intel_lrc_irq_handler(unsigned long data)
+static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
+ bool fw = false;
- /* We can skip acquiring intel_runtime_pm_get() here as it was taken
+ /*
+ * We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
* not be relinquished until the device is idle (see
* i915_gem_idle_work_handler()). As a precaution, we make sure
@@ -784,9 +841,8 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
GEM_BUG_ON(!dev_priv->gt.awake);
- intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
-
- /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+ /*
+ * Prefer doing test_and_clear_bit() as a two stage operation to avoid
* imposing the cost of a locked atomic transaction when submitting a
* new request (outside of the context-switch interrupt).
*/
@@ -802,18 +858,17 @@ static void intel_lrc_irq_handler(unsigned long data)
execlists->csb_head = -1; /* force mmio read of CSB ptrs */
}
- /* The write will be ordered by the uncached read (itself
- * a memory barrier), so we do not need another in the form
- * of a locked instruction. The race between the interrupt
- * handler and the split test/clear is harmless as we order
- * our clear before the CSB read. If the interrupt arrived
- * first between the test and the clear, we read the updated
- * CSB and clear the bit. If the interrupt arrives as we read
- * the CSB or later (i.e. after we had cleared the bit) the bit
- * is set and we do a new loop.
- */
- __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* Clear before reading to catch new interrupts */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ smp_mb__after_atomic();
+
if (unlikely(execlists->csb_head == -1)) { /* following a reset */
+ if (!fw) {
+ intel_uncore_forcewake_get(dev_priv,
+ execlists->fw_domains);
+ fw = true;
+ }
+
head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
tail = GEN8_CSB_WRITE_PTR(head);
head = GEN8_CSB_READ_PTR(head);
@@ -826,9 +881,13 @@ static void intel_lrc_irq_handler(unsigned long data)
head = execlists->csb_head;
tail = READ_ONCE(buf[write_idx]);
}
+ GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
+ engine->name,
+ head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
+ tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
while (head != tail) {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int status;
unsigned int count;
@@ -853,16 +912,31 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ engine->name, head,
+ status, buf[2*head + 1],
+ execlists->active);
+
+ if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+ GEN8_CTX_STATUS_PREEMPTED))
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_HWACK);
+
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
- buf[2*head + 1] == PREEMPT_ID) {
- execlist_cancel_port_requests(execlists);
+ /* We should never get a COMPLETED | IDLE_ACTIVE! */
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+
+ if (status & GEN8_CTX_STATUS_COMPLETE &&
+ buf[2*head + 1] == execlists->preempt_complete_status) {
+ GEM_TRACE("%s preempt-idle\n", engine->name);
- spin_lock_irq(&engine->timeline->lock);
- unwind_incomplete_requests(engine);
- spin_unlock_irq(&engine->timeline->lock);
+ execlists_cancel_port_requests(execlists);
+ execlists_unwind_incomplete_requests(execlists);
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT));
@@ -879,18 +953,28 @@ static void intel_lrc_irq_handler(unsigned long data)
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
+ rq = port_unpack(port, &count);
+ GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
+ engine->name,
+ port->context_id, count,
+ rq ? rq->global_seqno : 0,
+ rq ? rq_prio(rq) : 0);
+
/* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
- rq = port_unpack(port, &count);
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(!i915_gem_request_completed(rq));
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ GEM_BUG_ON(port_isset(&port[1]) &&
+ !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+ GEM_BUG_ON(!i915_request_completed(rq));
+ execlists_context_schedule_out(rq);
+ trace_i915_request_out(rq);
+ i915_request_put(rq);
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ GEM_TRACE("%s completed ctx=%d\n",
+ engine->name, port->context_id);
execlists_port_complete(execlists, port);
} else {
@@ -915,21 +999,26 @@ static void intel_lrc_irq_handler(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
- intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+ if (fw)
+ intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
}
-static void insert_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+static void queue_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
{
- struct i915_priolist *p = lookup_priolist(engine, pt, prio);
+ list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
+}
- list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
- if (ptr_unmask_bits(p, 1))
- tasklet_hi_schedule(&engine->execlists.irq_tasklet);
+static void submit_queue(struct intel_engine_cs *engine, int prio)
+{
+ if (prio > engine->execlists.queue_priority) {
+ engine->execlists.queue_priority = prio;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
}
-static void execlists_submit_request(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
@@ -937,7 +1026,8 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- insert_request(engine, &request->priotree, request->priotree.priority);
+ queue_request(engine, &request->priotree, rq_prio(request));
+ submit_queue(engine, rq_prio(request));
GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
@@ -945,9 +1035,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+static struct i915_request *pt_to_request(struct i915_priotree *pt)
{
- return container_of(pt, struct drm_i915_gem_request, priotree);
+ return container_of(pt, struct i915_request, priotree);
}
static struct intel_engine_cs *
@@ -965,7 +1055,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
return engine;
}
-static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
+static void execlists_schedule(struct i915_request *request, int prio)
{
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
@@ -974,7 +1064,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (i915_gem_request_completed(request))
+ if (i915_request_completed(request))
return;
if (prio <= READ_ONCE(request->priotree.priority))
@@ -986,13 +1076,14 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
stack.signaler = &request->priotree;
list_add(&stack.dfs_link, &dfs);
- /* Recursively bump all dependent priorities to match the new request.
+ /*
+ * Recursively bump all dependent priorities to match the new request.
*
* A naive approach would be to use recursion:
* static void update_priorities(struct i915_priotree *pt, prio) {
* list_for_each_entry(dep, &pt->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
- * insert_request(pt);
+ * queue_request(pt);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
@@ -1003,27 +1094,29 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* end result is a topological list of requests in reverse order, the
* last element in the list is the request we must execute first.
*/
- list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
+ list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
- /* Within an engine, there can be no cycle, but we may
+ /*
+ * Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
* (redundant dependencies are not eliminated) and across
* engines.
*/
list_for_each_entry(p, &pt->signalers_list, signal_link) {
- if (i915_gem_request_completed(pt_to_request(p->signaler)))
+ GEM_BUG_ON(p == dep); /* no cycles! */
+
+ if (i915_priotree_signaled(p->signaler))
continue;
GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
}
-
- list_safe_reset_next(dep, p, dfs_link);
}
- /* If we didn't need to bump any existing priorities, and we haven't
+ /*
+ * If we didn't need to bump any existing priorities, and we haven't
* yet submitted this request (i.e. there is no potential race with
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
@@ -1053,19 +1146,42 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
pt->priority = prio;
if (!list_empty(&pt->link)) {
__list_del_entry(&pt->link);
- insert_request(engine, pt, prio);
+ queue_request(engine, pt, prio);
}
+ submit_queue(engine, prio);
}
spin_unlock_irq(&engine->timeline->lock);
}
+static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
+{
+ unsigned int flags;
+ int err;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
+ }
+
+ flags = PIN_GLOBAL | PIN_HIGH;
+ if (ctx->ggtt_offset_bias)
+ flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+
+ return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+}
+
static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
- unsigned int flags;
void *vaddr;
int ret;
@@ -1075,18 +1191,12 @@ execlists_context_pin(struct intel_engine_cs *engine,
goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (!ce->state) {
- ret = execlists_context_deferred_alloc(ctx, engine);
- if (ret)
- goto err;
- }
+ ret = execlists_context_deferred_alloc(ctx, engine);
+ if (ret)
+ goto err;
GEM_BUG_ON(!ce->state);
- flags = PIN_GLOBAL | PIN_HIGH;
- if (ctx->ggtt_offset_bias)
- flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
-
- ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+ ret = __context_pin(ctx, ce->state);
if (ret)
goto err;
@@ -1106,9 +1216,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
- ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
-
i915_gem_context_get(ctx);
out:
return ce->ring;
@@ -1142,11 +1250,10 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
i915_gem_context_put(ctx);
}
-static int execlists_request_alloc(struct drm_i915_gem_request *request)
+static int execlists_request_alloc(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
- u32 *cs;
int ret;
GEM_BUG_ON(!ce->pin_count);
@@ -1157,17 +1264,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (!ce->initialised) {
- ret = engine->init_context(request);
- if (ret)
- return ret;
-
- ce->initialised = true;
- }
+ ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ if (ret)
+ return ret;
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
@@ -1328,6 +1427,40 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
+static u32 *
+gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
+{
+ int i;
+
+ /*
+ * WaPipeControlBefore3DStateSamplePattern: cnl
+ *
+ * Ensure the engine is idle prior to programming a
+ * 3DSTATE_SAMPLE_PATTERN during a context restore.
+ */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_CS_STALL,
+ 0);
+ /*
+ * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
+ * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
+ * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
+ * confusing. Since gen8_emit_pipe_control() already advances the
+ * batch by 6 dwords, we advance the other 10 here, completing a
+ * cacheline. It's not clear if the workaround requires this padding
+ * before other commands, or if it's just the regular padding we would
+ * already have for the workaround bb, so leave it here for now.
+ */
+ for (i = 0; i < 10; i++)
+ *batch++ = MI_NOOP;
+
+ /* Pad to end of cacheline */
+ while ((unsigned long)batch % CACHELINE_BYTES)
+ *batch++ = MI_NOOP;
+
+ return batch;
+}
+
#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
@@ -1376,12 +1509,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (WARN_ON(engine->id != RCS || !engine->scratch))
+ if (GEM_WARN_ON(engine->id != RCS))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
case 10:
- return 0;
+ wa_bb_fn[0] = gen10_init_indirectctx_bb;
+ wa_bb_fn[1] = NULL;
+ break;
case 9:
wa_bb_fn[0] = gen9_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
@@ -1411,7 +1546,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
*/
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
- if (WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, CACHELINE_BYTES))) {
+ if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+ CACHELINE_BYTES))) {
ret = -EINVAL;
break;
}
@@ -1437,9 +1573,37 @@ static u8 gtiir[] = {
[VECS] = 3,
};
-static int gen8_init_common_ring(struct intel_engine_cs *engine)
+static void enable_execlists(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+
+ /*
+ * Make sure we're not enabling the new 12-deep CSB
+ * FIFO as that requires a slightly updated handling
+ * in the ctx switch irq. Since we're currently only
+ * using only 2 elements of the enhanced execlists the
+ * deeper FIFO it's not needed and it's not worth adding
+ * more statements to the irq handler to support it.
+ */
+ if (INTEL_GEN(dev_priv) >= 11)
+ I915_WRITE(RING_MODE_GEN7(engine),
+ _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ else
+ I915_WRITE(RING_MODE_GEN7(engine),
+ _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ engine->status_page.ggtt_offset);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ engine->execlists.csb_head = -1;
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
+{
struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
@@ -1450,35 +1614,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
intel_engine_init_hangcheck(engine);
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- engine->status_page.ggtt_offset);
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-
- DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
-
- GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
- /*
- * Clear any pending interrupt state.
- *
- * We do it twice out of paranoia that some of the IIR are double
- * buffered, and if we only reset it once there may still be
- * an interrupt pending.
- */
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
- I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
- GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- execlists->csb_head = -1;
- execlists->active = 0;
+ enable_execlists(engine);
/* After a GPU reset, we may have requests to replay */
- if (!i915_modparams.enable_guc_submission && execlists->first)
- tasklet_schedule(&execlists->irq_tasklet);
+ if (execlists->first)
+ tasklet_schedule(&execlists->tasklet);
return 0;
}
@@ -1516,14 +1656,45 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return init_workarounds_ring(engine);
}
+static void reset_irq(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int i;
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+ /*
+ * Clear any pending interrupt state.
+ *
+ * We do it twice out of paranoia that some of the IIR are double
+ * buffered, and if we only reset it once there may still be
+ * an interrupt pending.
+ */
+ for (i = 0; i < 2; i++) {
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+ POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
+ }
+ GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
+ (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift));
+
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+}
+
static void reset_common_ring(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ GEM_TRACE("%s seqno=%x\n",
+ engine->name, request ? request->global_seqno : 0);
+
+ /* See execlists_cancel_requests() for the irq/spinlock split. */
+ local_irq_save(flags);
+
+ reset_irq(engine);
/*
* Catch up with any missed context-switch interrupts.
@@ -1534,14 +1705,20 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- unwind_incomplete_requests(engine);
+ spin_lock(&engine->timeline->lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock(&engine->timeline->lock);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ /* Mark all CS interrupts as complete */
+ execlists->active = 0;
- /* If the request was innocent, we leave the request in the ELSP
+ local_irq_restore(flags);
+
+ /*
+ * If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
* have been corrupted by the reset, in which case we may have
* to service a new GPU hang, but more likely we can continue on
@@ -1554,7 +1731,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
if (!request || request->fence.error != -EIO)
return;
- /* We want a simple context + ring to execute the breadcrumb update.
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
* so clear it and rebuild just what we need for the breadcrumb.
* All pending requests for this context will be zapped, and any
@@ -1577,15 +1755,15 @@ static void reset_common_ring(struct intel_engine_cs *engine,
unwind_wa_tail(request);
}
-static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+static int intel_logical_ring_emit_pdps(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *engine = req->engine;
+ struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
u32 *cs;
int i;
- cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+ cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1600,12 +1778,12 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
@@ -1618,18 +1796,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (req->ctx->ppgtt &&
- (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
- !intel_vgpu_active(req->i915)) {
- ret = intel_logical_ring_emit_pdps(req);
+ if (rq->ctx->ppgtt &&
+ (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+ !intel_vgpu_active(rq->i915)) {
+ ret = intel_logical_ring_emit_pdps(rq);
if (ret)
return ret;
- req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
+ rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1658,7 +1836,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1677,7 +1855,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
-static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
+static int gen8_emit_flush(struct i915_request *request, u32 mode)
{
u32 cmd, *cs;
@@ -1709,7 +1887,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
return 0;
}
-static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
struct intel_engine_cs *engine = request->engine;
@@ -1784,7 +1962,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
{
/* Ensure there's always at least one preemption point per-request. */
*cs++ = MI_ARB_CHECK;
@@ -1792,15 +1970,13 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
request->wa_tail = intel_ring_offset(request, cs);
}
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
+static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = request->global_seqno;
+ cs = gen8_emit_ggtt_write(cs, request->global_seqno,
+ intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@@ -1810,24 +1986,13 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
-static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
- u32 *cs)
+static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
- /* w/a for post sync ops following a GPGPU operation we
- * need a prior CS_STALL, which is emitted by the flush
- * following the batch.
- */
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_hws_seqno_address(request->engine);
- *cs++ = 0;
- *cs++ = request->global_seqno;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
+ cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
+ intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@@ -1835,17 +2000,17 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
gen8_emit_wa_tail(request, cs);
}
-static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
+static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
-static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
+static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret)
return ret;
- ret = intel_rcs_context_init_mocs(req);
+ ret = intel_rcs_context_init_mocs(rq);
/*
* Failing to program the MOCS is non-fatal.The system will not
* run at peak performance. So generate an error and carry on.
@@ -1853,7 +2018,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return i915_gem_render_state_emit(req);
+ return i915_gem_render_state_emit(rq);
}
/**
@@ -1868,8 +2033,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
*/
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
- tasklet_kill(&engine->execlists.irq_tasklet);
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)))
+ tasklet_kill(&engine->execlists.tasklet);
dev_priv = engine->i915;
@@ -1883,6 +2049,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
+
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
@@ -1893,7 +2060,18 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
- engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
+ engine->execlists.tasklet.func = execlists_submission_tasklet;
+
+ engine->park = NULL;
+ engine->unpark = NULL;
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+
+ engine->i915->caps.scheduler =
+ I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY;
+ if (engine->i915->preempt_context)
+ engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@@ -1914,8 +2092,17 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->set_default_submission = execlists_set_default_submission;
- engine->irq_enable = gen8_logical_ring_enable_irq;
- engine->irq_disable = gen8_logical_ring_disable_irq;
+ if (INTEL_GEN(engine->i915) < 11) {
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
+ } else {
+ /*
+ * TODO: On Gen11 interrupt masks need to be clear
+ * to allow C6 entry. Keep interrupts enabled at
+ * and take the hit of generating extra interrupts
+ * until a more refined solution exists.
+ */
+ }
engine->emit_bb_start = gen8_emit_bb_start;
}
@@ -1952,8 +2139,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
engine->execlists.fw_domains = fw_domains;
- tasklet_init(&engine->execlists.irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
+ tasklet_init(&engine->execlists.tasklet,
+ execlists_submission_tasklet, (unsigned long)engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -1967,6 +2154,21 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
goto error;
+ if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
+ engine->execlists.ctrl_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ } else {
+ engine->execlists.submit_reg = engine->i915->regs +
+ i915_mmio_reg_offset(RING_ELSP(engine));
+ }
+
+ engine->execlists.preempt_complete_status = ~0u;
+ if (engine->i915->preempt_context)
+ engine->execlists.preempt_complete_status =
+ upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+
return 0;
error:
@@ -1991,8 +2193,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
- engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
+ engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
+ engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
@@ -2046,7 +2248,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
+ rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2070,6 +2272,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
+ case 11:
+ indirect_ctx_offset =
+ GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
case 10:
indirect_ctx_offset =
GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -2108,8 +2314,9 @@ static void execlists_init_reg_state(u32 *regs,
MI_LRI_FORCE_POSTED;
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+ _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
@@ -2186,6 +2393,7 @@ populate_lr_context(struct i915_gem_context *ctx,
struct intel_ring *ring)
{
void *vaddr;
+ u32 *regs;
int ret;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
@@ -2202,11 +2410,35 @@ populate_lr_context(struct i915_gem_context *ctx,
}
ctx_obj->mm.dirty = true;
+ if (engine->default_state) {
+ /*
+ * We only want to copy over the template context state;
+ * skipping over the headers reserved for GuC communication,
+ * leaving those as zero.
+ */
+ const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
+ void *defaults;
+
+ defaults = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(defaults))
+ return PTR_ERR(defaults);
+
+ memcpy(vaddr + start, defaults + start, engine->context_size);
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
-
- execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
- ctx, engine, ring);
+ regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ execlists_init_reg_state(regs, ctx, engine, ring);
+ if (!engine->default_state)
+ regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+ regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
i915_gem_object_unpin_map(ctx_obj);
@@ -2223,7 +2455,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_ring *ring;
int ret;
- WARN_ON(ce->state);
+ if (ce->state)
+ return 0;
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
@@ -2259,7 +2492,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
- ce->initialised |= engine->init_context == NULL;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 689fde1..59d7b86 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -37,10 +37,14 @@
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
+#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
@@ -107,9 +111,4 @@ intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
-
-/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
- int enable_execlists);
-
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/intel_lrc_reg.h
new file mode 100644
index 0000000..169a223
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc_reg.h
@@ -0,0 +1,68 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef _INTEL_LRC_REG_H_
+#define _INTEL_LRC_REG_H_
+
+#include <linux/types.h>
+
+/* GEN8+ Reg State Context */
+#define CTX_LRI_HEADER_0 0x01
+#define CTX_CONTEXT_CONTROL 0x02
+#define CTX_RING_HEAD 0x04
+#define CTX_RING_TAIL 0x06
+#define CTX_RING_BUFFER_START 0x08
+#define CTX_RING_BUFFER_CONTROL 0x0a
+#define CTX_BB_HEAD_U 0x0c
+#define CTX_BB_HEAD_L 0x0e
+#define CTX_BB_STATE 0x10
+#define CTX_SECOND_BB_HEAD_U 0x12
+#define CTX_SECOND_BB_HEAD_L 0x14
+#define CTX_SECOND_BB_STATE 0x16
+#define CTX_BB_PER_CTX_PTR 0x18
+#define CTX_RCS_INDIRECT_CTX 0x1a
+#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
+#define CTX_LRI_HEADER_1 0x21
+#define CTX_CTX_TIMESTAMP 0x22
+#define CTX_PDP3_UDW 0x24
+#define CTX_PDP3_LDW 0x26
+#define CTX_PDP2_UDW 0x28
+#define CTX_PDP2_LDW 0x2a
+#define CTX_PDP1_UDW 0x2c
+#define CTX_PDP1_LDW 0x2e
+#define CTX_PDP0_UDW 0x30
+#define CTX_PDP0_LDW 0x32
+#define CTX_LRI_HEADER_2 0x41
+#define CTX_R_PWR_CLK_STATE 0x42
+#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+
+#define CTX_REG(reg_state, pos, reg, val) do { \
+ u32 *reg_state__ = (reg_state); \
+ const u32 pos__ = (pos); \
+ (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
+ (reg_state__)[(pos__) + 1] = (val); \
+} while (0)
+
+#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
+ u32 *reg_state__ = (reg_state); \
+ const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
+ (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
+} while (0)
+
+#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
+ u32 *reg_state__ = (reg_state); \
+ const u64 addr__ = px_dma(&ppgtt->pml4); \
+ (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
+} while (0)
+
+#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
+#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
+#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
+
+#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index dcbc786..8ae8f42 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -167,11 +167,10 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
unsigned long start = jiffies;
while (1) {
- if (intel_digital_port_connected(dev_priv, dig_port)) {
+ if (intel_digital_port_connected(&dig_port->base)) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 38572d6..d35d2d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -125,6 +125,8 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
@@ -187,7 +189,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (INTEL_INFO(dev_priv)->gen <= 4 &&
+ if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
DRM_DEBUG_KMS("Panel power timings uninitialized, "
"setting defaults\n");
@@ -266,7 +268,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
temp |= pipe_config->gmch_pfit.lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
+
+ /*
+ * Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (lvds_encoder->is_dual_link)
@@ -274,7 +278,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ /*
+ * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
@@ -282,12 +287,16 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
temp &= ~LVDS_A3_POWER_MASK;
temp |= lvds_encoder->a3_power;
- /* Set the dithering flag on LVDS as needed, note that there is no
+ /*
+ * Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg. */
+ * only controlled through the PIPECONF reg.
+ */
if (IS_GEN4(dev_priv)) {
- /* Bspec wording suggests that LVDS port dithering only exists
- * for 18bpp panels. */
+ /*
+ * Bspec wording suggests that LVDS port dithering only exists
+ * for 18bpp panels.
+ */
if (pipe_config->dither && pipe_config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
@@ -302,7 +311,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(lvds_encoder->reg, temp);
}
-/**
+/*
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
@@ -439,7 +448,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-/**
+/*
* Detect the LVDS connection.
*
* Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
@@ -462,7 +471,7 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
@@ -891,7 +900,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
if (dmi_check_system(intel_dual_link_lvds))
return true;
- /* BIOS should set the proper LVDS register value at boot, but
+ /*
+ * BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
@@ -905,13 +915,17 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
{
- /* With the introduction of the PCH we gained a dedicated
- * LVDS presence pin, use it. */
+ /*
+ * With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it.
+ */
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
return true;
- /* Otherwise LVDS was only attached to mobile products,
- * except for the inglorious 830gm */
+ /*
+ * Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm
+ */
if (INTEL_GEN(dev_priv) <= 4 &&
IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
return true;
@@ -921,7 +935,7 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev: drm device
+ * @dev_priv: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index f4c46b0..c0b34b7 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -187,7 +187,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
+ WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
"Platform that should have a MOCS table does not.\n");
}
@@ -265,7 +265,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
/**
* emit_mocs_control_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -273,17 +273,17 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+static int emit_mocs_control_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
- enum intel_engine_id engine = req->engine->id;
+ enum intel_engine_id engine = rq->engine->id;
unsigned int index;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -308,7 +308,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -323,7 +323,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
/**
* emit_mocs_l3cc_table() - emit the mocs control table
- * @req: Request to set up the MOCS table for.
+ * @rq: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
@@ -332,7 +332,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
*
* Return: 0 on success, otherwise the error status.
*/
-static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
unsigned int i;
@@ -341,7 +341,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -370,7 +370,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
}
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -417,7 +417,7 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
/**
* intel_rcs_context_init_mocs() - program the MOCS register.
- * @req: Request to set up the MOCS tables for.
+ * @rq: Request to set up the MOCS tables for.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
@@ -431,19 +431,19 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
*
* Return: 0 on success, otherwise the error status.
*/
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+int intel_rcs_context_init_mocs(struct i915_request *rq)
{
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(req->i915, &t)) {
+ if (get_mocs_settings(rq->i915, &t)) {
/* Program the RCS control registers */
- ret = emit_mocs_control_table(req, &t);
+ ret = emit_mocs_control_table(rq, &t);
if (ret)
return ret;
/* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(req, &t);
+ ret = emit_mocs_l3cc_table(rq, &t);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index ce4a5df..d1751f9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -52,7 +52,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
-int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
int intel_mocs_init_engine(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 28a778b..b398466 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,21 +30,6 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
-{
- u8 conn_type;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- conn_type = DRM_ELD_CONN_TYPE_DP;
- } else {
- conn_type = DRM_ELD_CONN_TYPE_HDMI;
- }
-
- connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
- connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
-}
-
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
@@ -57,9 +42,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
-
- intel_connector_update_eld_conn_type(connector);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 1d94624..c58e5f5 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,6 +32,8 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
+#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
@@ -367,7 +369,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
- port = intel_ddi_get_encoder_port(intel_encoder);
+ port = intel_encoder->port;
if (port == PORT_E) {
port = 0;
@@ -383,7 +385,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
- case INTEL_OUTPUT_UNKNOWN:
+ case INTEL_OUTPUT_DDI:
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
new file mode 100644
index 0000000..e0e437b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ u32 *lid_state;
+ struct work_struct asle_work;
+};
+
+#define OPREGION_SIZE (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+ return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1b397b4..36671a9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr,
PAGE_SIZE);
@@ -234,50 +234,50 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
}
static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
&overlay->i915->drm.struct_mutex);
- i915_gem_active_set(&overlay->last_flip, req);
- i915_add_request(req);
+ i915_gem_active_set(&overlay->last_flip, rq);
+ i915_request_add(rq);
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *req,
+ struct i915_request *rq,
i915_gem_retire_fn retire)
{
- intel_overlay_submit_request(overlay, req, retire);
+ intel_overlay_submit_request(overlay, rq, retire);
return i915_gem_active_retire(&overlay->last_flip,
&overlay->i915->drm.struct_mutex);
}
-static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
- return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_alloc(engine, dev_priv->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs;
WARN_ON(overlay->active);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
@@ -290,9 +290,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = overlay->flip_addr | OFC_UPDATE;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, req, NULL);
+ return intel_overlay_do_wait_request(overlay, rq, NULL);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -322,7 +322,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
@@ -336,23 +336,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
- intel_overlay_submit_request(overlay, req, NULL);
+ intel_overlay_submit_request(overlay, rq, NULL);
return 0;
}
@@ -373,7 +373,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -382,7 +382,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
}
static void intel_overlay_off_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -401,7 +401,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
@@ -412,13 +412,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
@@ -432,11 +432,11 @@ static int intel_overlay_off(struct intel_overlay *overlay)
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
- return intel_overlay_do_wait_request(overlay, req,
+ return intel_overlay_do_wait_request(overlay, rq,
intel_overlay_off_tail);
}
@@ -468,23 +468,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
- struct drm_i915_gem_request *req;
+ struct i915_request *rq;
- req = alloc_request(overlay);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ rq = alloc_request(overlay);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
- i915_add_request(req);
+ i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, req,
+ ret = intel_overlay_do_wait_request(overlay, rq,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -801,7 +801,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ vma = i915_gem_object_pin_to_display_plane(new_bo,
+ 0, NULL, PIN_MAPPABLE);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
@@ -1508,7 +1509,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index adc51e4..41d00b1 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -397,8 +397,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
/**
* scale - scale values from one range to another
- *
* @source_val: value in range [@source_min..@source_max]
+ * @source_min: minimum legal value for @source_val
+ * @source_max: maximum legal value for @source_val
+ * @target_min: corresponding target value for @source_min
+ * @target_max: corresponding target value for @source_max
*
* Return @source_val in range [@source_min..@source_max] scaled to range
* [@target_min..@target_max].
@@ -416,8 +419,9 @@ static uint32_t scale(uint32_t source_val,
source_val = clamp(source_val, source_min, source_max);
/* avoid overflows */
- target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
- (target_max - target_min), source_max - source_min);
+ target_val = mul_u32_u32(source_val - source_min,
+ target_max - target_min);
+ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
target_val += target_min;
return target_val;
@@ -497,7 +501,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
u32 val;
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (INTEL_INFO(dev_priv)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
@@ -543,25 +547,6 @@ static u32 pwm_get_backlight(struct intel_connector *connector)
return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
}
-static u32 intel_panel_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val = 0;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.enabled) {
- val = panel->backlight.get(connector);
- val = intel_panel_compute_brightness(connector, val);
- }
-
- mutex_unlock(&dev_priv->backlight_lock);
-
- DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
- return val;
-}
-
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -649,31 +634,6 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
panel->backlight.set(conn_state, level);
}
-/* set backlight brightness to level in range [0..max], scaling wrt hw min */
-static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- WARN_ON(panel->backlight.max == 0);
-
- hw_level = scale_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
/* set backlight brightness to level in range [0..max], assuming hw min is
* respected.
*/
@@ -1182,6 +1142,50 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.enabled) {
+ val = panel->backlight.get(connector);
+ val = intel_panel_compute_brightness(connector, val);
+ }
+
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
@@ -1719,9 +1723,9 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
u32 pwm_ctl, val;
/*
- * CNP has the BXT implementation of backlight, but with only
- * one controller. Future platforms could have multiple controllers
- * so let's make this extensible and prepared for the future.
+ * CNP has the BXT implementation of backlight, but with only one
+ * controller. TODO: ICP has multiple controllers but we only use
+ * controller 0 for now.
*/
panel->backlight.controller = 0;
@@ -1865,7 +1869,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_CNP(dev_priv)) {
+ } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 899839f..1f5cd57 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -269,7 +269,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
dig_port = enc_to_dig_port(&encoder->base);
- switch (dig_port->port) {
+ switch (dig_port->base.port) {
case PORT_B:
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
break;
@@ -281,7 +281,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
default:
WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->port));
+ port_name(dig_port->base.port));
break;
}
break;
@@ -541,8 +541,6 @@ retry:
* completely disable it.
*/
pipe_config->ips_force_disable = enable;
- if (pipe_config->ips_enabled == enable)
- pipe_config->base.connectors_changed = true;
}
if (IS_HASWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f0d0dba..b8da4dc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,16 +52,13 @@
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
-#define INTEL_RC6_ENABLE (1<<0)
-#define INTEL_RC6p_ENABLE (1<<1)
-#define INTEL_RC6pp_ENABLE (1<<2)
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
/*
* WaCompressedResourceDisplayNewHashMode:skl,kbl
- * Display WA#0390: skl,kbl
+ * Display WA #0390: skl,kbl
*
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
@@ -75,9 +72,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
- I915_WRITE(GEN8_CONFIG0,
- I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
-
/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
@@ -515,38 +509,41 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
- if (plane)
+ if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
- if (plane)
+ if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
{
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -554,9 +551,8 @@ static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
return size;
}
@@ -733,6 +729,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* intel_calculate_wm - calculate watermark level
* @pixel_rate: pixel clock
* @wm: chip FIFO params
+ * @fifo_size: size of the FIFO buffer
* @cpp: bytes per pixel
* @latency_ns: memory latency for the platform
*
@@ -922,7 +919,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
* and the size of 8 whole lines. This adjustment is always performed
* in the actual pixel depth regardless of whether FBC is enabled or not."
*/
-static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
+static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
{
int tlb_miss = fifo_size * 64 - width * cpp * 8;
@@ -1099,8 +1096,8 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int clock, htotal, cpp, width, wm;
- int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int clock, htotal, cpp, width, wm;
if (latency == 0)
return USHRT_MAX;
@@ -1139,7 +1136,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
level == G4X_WM_LEVEL_NORMAL) {
wm = intel_wm_method1(clock, cpp, latency);
} else {
- int small, large;
+ unsigned int small, large;
small = intel_wm_method1(clock, cpp, latency);
large = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1152,7 +1149,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
wm = DIV_ROUND_UP(wm, 64) + 2;
- return min_t(int, wm, USHRT_MAX);
+ return min_t(unsigned int, wm, USHRT_MAX);
}
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
@@ -1403,17 +1400,29 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *new_crtc_state)
{
- struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
- const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
- const struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(intel_state, crtc);
+ const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
+ if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ intermediate->hpll_en = false;
+ goto out;
+ }
+
intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -1455,12 +1464,13 @@ static int g4x_compute_intermediate_wm(struct drm_device *dev,
WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
+out:
/*
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- crtc_state->wm.need_postvbl_update = true;
+ new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
@@ -1596,7 +1606,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int clock, htotal, cpp, width, wm;
+ unsigned int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
@@ -1622,7 +1632,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
dev_priv->wm.pri_latency[level] * 10);
}
- return min_t(int, wm, USHRT_MAX);
+ return min_t(unsigned int, wm, USHRT_MAX);
}
static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
@@ -2023,16 +2033,27 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
static int vlv_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *new_crtc_state)
{
- struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
- const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
- const struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(intel_state, crtc);
+ const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
+ if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ goto out;
+ }
+
intermediate->num_levels = min(optimal->num_levels, active->num_levels);
intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !crtc_state->disable_cxsr;
+ !new_crtc_state->disable_cxsr;
for (level = 0; level < intermediate->num_levels; level++) {
enum plane_id plane_id;
@@ -2051,12 +2072,13 @@ static int vlv_compute_intermediate_wm(struct drm_device *dev,
vlv_invalidate_wms(crtc, intermediate, level);
+out:
/*
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- crtc_state->wm.need_postvbl_update = true;
+ new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
@@ -2255,8 +2277,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0);
- crtc = intel_get_crtc_for_plane(dev_priv, 0);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
+ crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
&crtc->config->base.adjusted_mode;
@@ -2282,8 +2304,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1);
- crtc = intel_get_crtc_for_plane(dev_priv, 1);
+ fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
+ crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
&crtc->config->base.adjusted_mode;
@@ -2395,7 +2417,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
adjusted_mode = &crtc->config->base.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev_priv, 0),
+ dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -2895,10 +2917,6 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
/* ILK cursor LP0 latency is 1300 ns */
if (IS_GEN5(dev_priv))
wm[0] = 13;
-
- /* WaDoubleCursorLP3Latency:ivb */
- if (IS_IVYBRIDGE(dev_priv))
- wm[3] *= 2;
}
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
@@ -3673,11 +3691,18 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct intel_crtc_state *cstate;
enum pipe pipe;
int level, latency;
- int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20;
+ int sagv_block_time_us;
if (!intel_has_sagv(dev_priv))
return false;
+ if (IS_GEN9(dev_priv))
+ sagv_block_time_us = 30;
+ else if (IS_GEN10(dev_priv))
+ sagv_block_time_us = 20;
+ else
+ sagv_block_time_us = 10;
+
/*
* SKL+ workaround: bspec recommends we disable the SAGV when we have
* more then one pipe enabled
@@ -3757,7 +3782,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
ddb_size = INTEL_INFO(dev_priv)->ddb_size;
WARN_ON(ddb_size == 0);
- ddb_size -= 4; /* 4 blocks for bypass path allocation */
+ if (INTEL_GEN(dev_priv) < 11)
+ ddb_size -= 4; /* 4 blocks for bypass path allocation */
/*
* If the state doesn't change the active CRTC's, then there's
@@ -3924,6 +3950,7 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_atomic_state *state = crtc_state->state;
struct drm_plane *plane;
@@ -3966,7 +3993,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
crtc_clock = crtc_state->adjusted_mode.crtc_clock;
dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
- if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev)))
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
dotclk *= 2;
pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
@@ -4289,7 +4316,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
- uint8_t cpp, uint32_t latency)
+ uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
{
uint32_t wm_intermediate_val;
uint_fixed_16_16_t ret;
@@ -4298,7 +4325,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate * cpp;
- ret = div_fixed16(wm_intermediate_val, 1000 * 512);
+ ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
if (INTEL_GEN(dev_priv) >= 10)
ret = add_fixed16_u32(ret, 1);
@@ -4408,6 +4435,12 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8)
+ wp->dbuf_block_size = 256;
+ else
+ wp->dbuf_block_size = 512;
+
if (drm_rotation_90_or_270(pstate->rotation)) {
switch (wp->cpp) {
@@ -4434,7 +4467,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->plane_bytes_per_line = wp->width * wp->cpp;
if (wp->y_tiled) {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
- wp->y_min_scanlines, 512);
+ wp->y_min_scanlines,
+ wp->dbuf_block_size);
if (INTEL_GEN(dev_priv) >= 10)
interm_pbpl++;
@@ -4442,10 +4476,12 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
} else if (wp->x_tiled && IS_GEN9(dev_priv)) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512);
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size);
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1;
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size) + 1;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
@@ -4475,6 +4511,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+ uint32_t min_disp_buf_needed;
if (latency == 0 ||
!intel_wm_plane_visible(cstate, intel_pstate)) {
@@ -4492,7 +4529,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
latency += 15;
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
- wp->cpp, latency);
+ wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
latency,
@@ -4502,7 +4539,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
- 512 < 1) && (wp->plane_bytes_per_line / 512 < 1))
+ wp->dbuf_block_size < 1) &&
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
selected_result = method2;
else if (ddb_allocation >=
fixed16_to_u32_round_up(wp->plane_blocks_per_line))
@@ -4532,7 +4570,32 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
- if (res_blocks >= ddb_allocation || res_lines > 31) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ if (wp->y_tiled) {
+ uint32_t extra_lines;
+ uint_fixed_16_16_t fp_min_disp_buf_needed;
+
+ if (res_lines % wp->y_min_scanlines == 0)
+ extra_lines = wp->y_min_scanlines;
+ else
+ extra_lines = wp->y_min_scanlines * 2 -
+ res_lines % wp->y_min_scanlines;
+
+ fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
+ extra_lines,
+ wp->plane_blocks_per_line);
+ min_disp_buf_needed = fixed16_to_u32_round_up(
+ fp_min_disp_buf_needed);
+ } else {
+ min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
+ }
+ } else {
+ min_disp_buf_needed = res_blocks;
+ }
+
+ if ((level > 0 && res_lines > 31) ||
+ res_blocks >= ddb_allocation ||
+ min_disp_buf_needed >= ddb_allocation) {
*enabled = false;
/*
@@ -4552,8 +4615,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
+ /* The number of lines are ignored for the level 0 watermark. */
+ *out_lines = level ? res_lines : 0;
*out_blocks = res_blocks;
- *out_lines = res_lines;
*enabled = true;
return 0;
@@ -4645,6 +4709,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
if (!dev_priv->ipc_enabled)
goto exit;
+ trans_min = 0;
if (INTEL_GEN(dev_priv) >= 10)
trans_min = 4;
@@ -4768,8 +4833,10 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
&ddb->plane[pipe][plane_id]);
- skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->y_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) < 11)
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id),
+ &ddb->y_plane[pipe][plane_id]);
}
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
@@ -5797,6 +5864,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
+ * @crtc: the #intel_crtc on which to compute the WM
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -6292,7 +6360,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct drm_i915_gem_request *rq,
+void gen6_rps_boost(struct i915_request *rq,
struct intel_rps_client *rps_client)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
@@ -6305,12 +6373,15 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
if (!rps->enabled)
return;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return;
+
+ /* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !i915_gem_request_completed(rq)) {
- atomic_inc(&rps->num_waiters);
+ if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+ boost = !atomic_fetch_inc(&rps->num_waiters);
rq->waitboost = true;
- boost = true;
}
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
@@ -6392,29 +6463,8 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
- mode = GEN6_RC_CTL_RC6_ENABLE;
- else
- mode = 0;
- }
- if (HAS_RC6p(dev_priv))
- DRM_DEBUG_DRIVER("Enabling RC6 states: "
- "RC6 %s RC6p %s RC6pp %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
-
- else
- DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
-}
-
static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
u32 rc_ctl;
@@ -6439,9 +6489,8 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
- ggtt->stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
+ (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -6474,42 +6523,30 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
return enable_rc6;
}
-int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
+static bool sanitize_rc6(struct drm_i915_private *i915)
{
- /* No RC6 before Ironlake and code is gone for ilk. */
- if (INTEL_INFO(dev_priv)->gen < 6)
- return 0;
+ struct intel_device_info *info = mkwrite_device_info(i915);
- if (!enable_rc6)
- return 0;
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(i915))
+ info->has_rc6 = 0;
- if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
+ if (info->has_rc6 &&
+ IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
DRM_INFO("RC6 disabled by BIOS\n");
- return 0;
+ info->has_rc6 = 0;
}
- /* Respect the kernel parameter if it is set */
- if (enable_rc6 >= 0) {
- int mask;
-
- if (HAS_RC6p(dev_priv))
- mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
- INTEL_RC6pp_ENABLE;
- else
- mask = INTEL_RC6_ENABLE;
-
- if ((enable_rc6 & mask) != enable_rc6)
- DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
- "(requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
-
- return enable_rc6 & mask;
- }
-
- if (IS_IVYBRIDGE(dev_priv))
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+ /*
+ * We assume that we do not have any deep rc6 levels if we don't have
+ * have the previous rc6 level supported, i.e. we use HAS_RC6()
+ * as the initial coarse check for rc6 in general, moving on to
+ * progressively finer/deeper levels.
+ */
+ if (!info->has_rc6 && info->has_rc6p)
+ info->has_rc6p = 0;
- return INTEL_RC6_ENABLE;
+ return info->has_rc6;
}
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
@@ -6579,9 +6616,10 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN9(dev_priv))
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
/* 1 second timeout*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6601,7 +6639,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6_mode, rc6_mask = 0;
+ u32 rc6_mode;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6614,12 +6652,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2b: Program RC6 thresholds.*/
-
- /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10) {
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(dev_priv)) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- else
+ } else {
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, dev_priv, id)
@@ -6630,14 +6675,31 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_SLEEP, 0);
- /* 2c: Program Coarse Power Gating Policies. */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from intel_pstate is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
/* 3a: Enable RC6 */
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
/* WaRsUseTimeoutMode:cnl (pre-prod) */
@@ -6647,17 +6709,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
+ * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
*/
if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
- I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
- (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
+ I915_WRITE(GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6666,7 +6730,6 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint32_t rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6688,13 +6751,11 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- intel_print_rc6_info(dev_priv, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- rc6_mask);
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6743,9 +6804,8 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 rc6vids, rc6_mask = 0;
+ u32 rc6vids, rc6_mask;
u32 gtfifodbg;
- int rc6_mode;
int ret;
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6780,22 +6840,12 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- /* Check if we are enabling RC6 */
- rc6_mode = intel_rc6_enabled();
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
/* We don't use those on Haswell */
- if (!IS_HASWELL(dev_priv)) {
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- }
-
- intel_print_rc6_info(dev_priv, rc6_mask);
-
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(dev_priv))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(dev_priv))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
@@ -6892,7 +6942,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_INFO(dev_priv)->gen >= 8) {
+ } else if (INTEL_GEN(dev_priv) >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
} else if (IS_HASWELL(dev_priv)) {
@@ -7037,7 +7087,7 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
{
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
- WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+ WARN_ON(pctx_addr != dev_priv->dsm.start +
dev_priv->vlv_pctx->stolen->start);
}
@@ -7052,16 +7102,15 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long pctx_paddr, paddr;
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32*1024;
u32 pcbr;
- int pctx_size = 32*1024;
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = (dev_priv->mm.stolen_base +
- (ggtt->stolen_size - pctx_size));
+ paddr = dev_priv->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -7073,16 +7122,16 @@ static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *pctx;
- unsigned long pctx_paddr;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24*1024;
u32 pcbr;
- int pctx_size = 24*1024;
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
- int pcbr_offset;
+ resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+ pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
@@ -7106,7 +7155,11 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
goto out;
}
- pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ GEM_BUG_ON(range_overflows_t(u64,
+ dev_priv->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
@@ -7238,7 +7291,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode = 0, pcbr;
+ u32 gtfifodbg, rc6_mode, pcbr;
gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
GT_FIFO_FREE_ENTRIES_CHV);
@@ -7279,10 +7332,9 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_rc6_enabled() & INTEL_RC6_ENABLE) &&
- (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = 0;
+ if (pcbr >> VLV_PCBR_ADDR_SHIFT)
rc6_mode = GEN7_RC_CTL_TO_MODE;
-
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -7334,7 +7386,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode = 0;
+ u32 gtfifodbg;
valleyview_check_pctx(dev_priv);
@@ -7367,12 +7419,8 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
- rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
-
- intel_print_rc6_info(dev_priv, rc6_mode);
-
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -7505,7 +7553,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return 0;
spin_lock_irq(&mchdev_lock);
@@ -7589,7 +7637,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return;
spin_lock_irq(&mchdev_lock);
@@ -7640,7 +7688,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
- if (INTEL_INFO(dev_priv)->gen != 5)
+ if (!IS_GEN5(dev_priv))
return 0;
spin_lock_irq(&mchdev_lock);
@@ -7899,12 +7947,11 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
- if (!i915_modparams.enable_rc6) {
+ if (!sanitize_rc6(dev_priv)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
- mutex_lock(&dev_priv->drm.struct_mutex);
mutex_lock(&dev_priv->pcu_lock);
/* Initialize RPS limits (for userspace) */
@@ -7946,9 +7993,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
rps->boost_freq = rps->max_freq;
mutex_unlock(&dev_priv->pcu_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_autoenable_gt_powersave(dev_priv);
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7956,7 +8000,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
- if (!i915_modparams.enable_rc6)
+ if (!HAS_RC6(dev_priv))
intel_runtime_pm_put(dev_priv);
}
@@ -7973,9 +8017,6 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
- intel_runtime_pm_put(dev_priv);
-
/* gen6_rps_idle() will be called later to disable interrupts */
}
@@ -7985,7 +8026,10 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_disable_gt_powersave(dev_priv);
- gen6_reset_rps_interrupts(dev_priv);
+ if (INTEL_GEN(dev_priv) < 11)
+ gen6_reset_rps_interrupts(dev_priv);
+ else
+ WARN_ON_ONCE(1);
}
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
@@ -8098,6 +8142,8 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
+ } else if (WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11)) {
+ /* TODO */
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rps(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
@@ -8126,7 +8172,8 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->pcu_lock);
- intel_enable_rc6(dev_priv);
+ if (HAS_RC6(dev_priv))
+ intel_enable_rc6(dev_priv);
intel_enable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_enable_llc_pstate(dev_priv);
@@ -8134,65 +8181,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-static void __intel_autoenable_gt_powersave(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work,
- typeof(*dev_priv),
- gt_pm.autoenable_work.work);
- struct intel_engine_cs *rcs;
- struct drm_i915_gem_request *req;
-
- rcs = dev_priv->engine[RCS];
- if (rcs->last_retired_context)
- goto out;
-
- if (!rcs->init_context)
- goto out;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
- if (IS_ERR(req))
- goto unlock;
-
- if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
- rcs->init_context(req);
-
- /* Mark the device busy, calling intel_enable_gt_powersave() */
- i915_add_request(req);
-
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-out:
- intel_runtime_pm_put(dev_priv);
-}
-
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- } else if (INTEL_INFO(dev_priv)->gen >= 6) {
- /*
- * PCU communication is slow and this doesn't need to be
- * done at any specific time, so do this out of our fast path
- * to make resume and init faster.
- *
- * We depend on the HW RC6 power context save/restore
- * mechanism when entering D3 through runtime PM suspend. So
- * disable RPM until RPS/RC6 is properly setup. We can only
- * get here via the driver load/system resume/runtime resume
- * paths, so the _noresume version is enough (and in case of
- * runtime resume it's necessary).
- */
- if (queue_delayed_work(dev_priv->wq,
- &dev_priv->gt_pm.autoenable_work,
- round_jiffies_up_relative(HZ)))
- intel_runtime_pm_get_noresume(dev_priv);
- }
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8504,13 +8492,14 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
if (!HAS_PCH_CNP(dev_priv))
return;
- /* Wa #1181 */
+ /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ u32 val;
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
@@ -8525,11 +8514,24 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
+ val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
+ /* ReadHitWriteOnlyDisable:cnl */
+ val |= RCCUNIT_CLKGATE_DIS;
/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- SARBUNIT_CLKGATE_DIS);
+ val |= SARBUNIT_CLKGATE_DIS;
+ I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+
+ /* Wa_2201832410:cnl */
+ val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
+ val |= GWUNIT_CLKGATE_DIS;
+ I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);
+
+ /* WaDisableVFclkgate:cnl */
+ /* WaVFUnitClockGatingDisable:cnl */
+ val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val |= VFUNIT_CLKGATE_DIS;
+ I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9227,8 +9229,9 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val,
+ int fast_timeout_us, int slow_timeout_ms)
{
int status;
@@ -9251,7 +9254,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
if (__intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500, 0, NULL)) {
+ fast_timeout_us, slow_timeout_ms,
+ NULL)) {
DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
val, mbox, __builtin_return_address(0));
return -ETIMEDOUT;
@@ -9324,7 +9328,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
ret = 0;
goto out;
}
- ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
if (!ret)
goto out;
@@ -9416,8 +9420,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->pcu_lock);
- INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
- __intel_autoenable_gt_powersave);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
dev_priv->runtime_pm.suspended = false;
@@ -9430,12 +9432,14 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u32 lower, upper, tmp;
int loop = 2;
- /* The register accessed do not need forcewake. We borrow
+ /*
+ * The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
*/
- spin_lock_irq(&dev_priv->uncore.lock);
+ lockdep_assert_held(&dev_priv->uncore.lock);
- /* vlv and chv residency counters are 40 bits in width.
+ /*
+ * vlv and chv residency counters are 40 bits in width.
* With a control bit, we can choose between upper or lower
* 32bit window into this counter.
*
@@ -9459,44 +9463,99 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
upper = I915_READ_FW(reg);
} while (upper != tmp && --loop);
- /* Everywhere else we always use VLV_COUNTER_CONTROL with the
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
* now.
*/
- spin_unlock_irq(&dev_priv->uncore.lock);
-
return lower | (u64)upper << 8;
}
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
- u64 time_hw, units, div;
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
- if (!intel_rc6_enabled())
+ if (!HAS_RC6(dev_priv))
return 0;
- intel_runtime_pm_get(dev_priv);
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- units = 1000;
+ mul = 1000000;
div = dev_priv->czclk_freq;
-
+ overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(dev_priv, reg);
- } else if (IS_GEN9_LP(dev_priv)) {
- units = 1000;
- div = 1200; /* 833.33ns */
-
- time_hw = I915_READ(reg);
} else {
- units = 128000; /* 1.28us */
- div = 100000;
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(dev_priv)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
- time_hw = I915_READ(reg);
+ overflow_hw = BIT_ULL(32);
+ time_hw = I915_READ_FW(reg);
}
- intel_runtime_pm_put(dev_priv);
- return DIV_ROUND_UP_ULL(time_hw * units, div);
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
+ dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
+ dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
+{
+ u32 cagf;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 55ea5eb..23175c5 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,12 +56,109 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static inline enum intel_display_power_domain
+psr_aux_domain(struct intel_dp *intel_dp)
{
- if (!intel_dp_is_edp(intel_dp))
+ /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_dp->aux_power_domain;
+}
+
+static void psr_aux_io_power_get(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (INTEL_GEN(dev_priv) < 10)
+ return;
+
+ intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static void psr_aux_io_power_put(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (INTEL_GEN(dev_priv) < 10)
+ return;
+
+ intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
+}
+
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+ uint8_t psr_caps = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
+ return false;
+ return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ uint8_t dprx = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ uint8_t alpm_caps = 0;
- return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &alpm_caps) != 1)
+ return false;
+ return alpm_caps & DP_ALPM_CAP;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+ uint8_t frame_sync_cap;
+
+ dev_priv->psr.sink_support = true;
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap) != 1)
+ frame_sync_cap = 0;
+ dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
+ /* PSR2 needs frame sync as well */
+ dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+ DRM_DEBUG_KMS("PSR2 %s on sink",
+ dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+ if (dev_priv->psr.psr2_support) {
+ dev_priv->psr.y_cord_support =
+ intel_dp_get_y_cord_status(intel_dp);
+ dev_priv->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.alpm =
+ intel_dp_get_alpm_status(intel_dp);
+ }
+ }
}
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
@@ -134,7 +231,7 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return DP_AUX_CH_CTL(port);
else
return EDP_PSR_AUX_CTL;
@@ -143,7 +240,7 @@ static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return DP_AUX_CH_DATA(port, index);
else
return EDP_PSR_AUX_DATA(index);
@@ -163,7 +260,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
- enum port port = dig_port->port;
+ enum port port = dig_port->base.port;
u32 aux_ctl;
int i;
@@ -349,6 +446,50 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp);
}
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0;
+
+ /*
+ * FIXME psr2_support is messed up. It's both computed
+ * dynamically during PSR enable, and extracted from sink
+ * caps during eDP detection.
+ */
+ if (!dev_priv->psr.psr2_support)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ psr_max_h = 4096;
+ psr_max_v = 2304;
+ } else if (IS_GEN9(dev_priv)) {
+ psr_max_h = 3640;
+ psr_max_v = 2304;
+ }
+
+ if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
+ DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
+ return false;
+ }
+
+ /*
+ * FIXME:enable psr2 only for y-cordinate psr2 panels
+ * After gtc implementation , remove this restriction.
+ */
+ if (!dev_priv->psr.y_cord_support) {
+ DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
+ return false;
+ }
+
+ return true;
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -358,10 +499,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
&crtc_state->base.adjusted_mode;
int psr_setup_time;
- if (!HAS_PSR(dev_priv))
- return;
-
- if (!is_edp_psr(intel_dp))
+ if (!CAN_PSR(dev_priv))
return;
if (!i915_modparams.enable_psr) {
@@ -376,7 +514,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* ones. Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A.
*/
- if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
+ if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return;
}
@@ -414,34 +552,14 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
- if (!dev_priv->psr.psr2_support) {
- crtc_state->has_psr = true;
- return;
- }
-
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (adjusted_mode->crtc_hdisplay > 3200 ||
- adjusted_mode->crtc_vdisplay > 2000) {
- DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
- return;
- }
-
- /*
- * FIXME:enable psr2 only for y-cordinate psr2 panels
- * After gtc implementation , remove this restriction.
- */
- if (!dev_priv->psr.y_cord_support) {
- DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
return;
}
crtc_state->has_psr = true;
- crtc_state->has_psr2 = true;
+ crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -470,13 +588,15 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 chicken;
+ psr_aux_io_power_get(intel_dp);
+
if (dev_priv->psr.psr2_support) {
chicken = PSR2_VSC_ENABLE_PROG_HEADER;
if (dev_priv->psr.y_cord_support)
chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
- I915_WRITE(EDP_PSR_DEBUG_CTL,
+ I915_WRITE(EDP_PSR_DEBUG,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
@@ -490,7 +610,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
* preventing other hw tracking issues now we can rely
* on frontbuffer tracking.
*/
- I915_WRITE(EDP_PSR_DEBUG_CTL,
+ I915_WRITE(EDP_PSR_DEBUG,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP);
@@ -514,6 +634,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
if (!crtc_state->has_psr)
return;
+ if (WARN_ON(!CAN_PSR(dev_priv)))
+ return;
+
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
@@ -522,8 +645,6 @@ void intel_psr_enable(struct intel_dp *intel_dp,
}
dev_priv->psr.psr2_support = crtc_state->has_psr2;
- dev_priv->psr.source_ok = true;
-
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.setup_vsc(intel_dp, crtc_state);
@@ -599,7 +720,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_status = EDP_PSR2_STATUS_CTL;
+ psr_status = EDP_PSR2_STATUS;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
I915_WRITE(EDP_PSR2_CTL,
@@ -607,7 +728,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_status = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
I915_WRITE(EDP_PSR_CTL,
@@ -627,6 +748,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
+
+ psr_aux_io_power_put(intel_dp);
}
/**
@@ -646,6 +769,9 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
+ if (WARN_ON(!CAN_PSR(dev_priv)))
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -679,19 +805,19 @@ static void intel_psr_work(struct work_struct *work)
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_support) {
if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS_CTL,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
+ EDP_PSR2_STATUS,
+ EDP_PSR2_STATUS_STATE_MASK,
+ 0,
+ 50)) {
DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
return;
}
} else {
if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
+ EDP_PSR_STATUS,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
@@ -796,7 +922,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 val;
- if (!HAS_PSR(dev_priv))
+ if (!CAN_PSR(dev_priv))
return;
/*
@@ -845,7 +971,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!HAS_PSR(dev_priv))
+ if (!CAN_PSR(dev_priv))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -885,7 +1011,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
- if (!HAS_PSR(dev_priv))
+ if (!CAN_PSR(dev_priv))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -926,6 +1052,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
+ if (!dev_priv->psr.sink_support)
+ return;
+
/* Per platform default: all disabled. */
if (i915_modparams.enable_psr == -1)
i915_modparams.enable_psr = 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8da1bde..1d59952 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -28,9 +28,12 @@
*/
#include <linux/log2.h>
+
#include <drm/drmP.h>
-#include "i915_drv.h"
#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -63,7 +66,7 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
}
static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
@@ -72,19 +75,19 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
@@ -119,22 +122,22 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+ if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
cmd |= MI_INVALIDATE_ISP;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-/**
+/*
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
@@ -172,13 +175,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
+intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs;
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -188,9 +191,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -200,21 +203,21 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(req);
+ ret = intel_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
@@ -244,7 +247,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -252,17 +255,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
+gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
u32 *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -270,16 +273,16 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -321,10 +324,10 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
- gen7_render_ring_cs_stall_wa(req);
+ gen7_render_ring_cs_stall_wa(rq);
}
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -332,51 +335,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
- intel_ring_advance(req, cs);
-
- return 0;
-}
-
-static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
-{
- u32 flags;
- u32 *cs;
-
- cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- flags = PIPE_CONTROL_CS_STALL;
-
- if (mode & EMIT_FLUSH) {
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
- }
- if (mode & EMIT_INVALIDATE) {
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD,
- 0);
- }
-
- cs = gen8_emit_pipe_control(cs, flags,
- i915_ggtt_offset(req->engine->scratch) +
- 2 * CACHELINE_BYTES);
-
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -424,7 +383,6 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
} else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
- /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(engine->mmio_base);
}
@@ -434,13 +392,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
- /*
- * Flush the TLB for this page
- *
- * FIXME: These two bits have disappeared on gen8, so a question
- * arises: do we still need this and if so how should we go about
- * invalidating the TLB?
- */
+ /* Flush the TLB for this page */
if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
@@ -480,10 +432,14 @@ static bool stop_ring(struct intel_engine_cs *engine)
}
}
- I915_WRITE_CTL(engine, 0);
+ I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_CTL(engine, 0);
+
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
@@ -497,13 +453,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
- DRM_DEBUG_KMS("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ DRM_DEBUG_DRIVER("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
@@ -536,8 +492,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(engine))
- DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
- engine->name, I915_READ_HEAD(engine));
+ DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
+ engine->name, I915_READ_HEAD(engine));
intel_ring_update_space(ring);
I915_WRITE_HEAD(engine, ring->head);
@@ -575,7 +531,7 @@ out:
}
static void reset_ring_common(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *request)
+ struct i915_request *request)
{
/*
* RC6 must be prevented until the reset is complete and the engine
@@ -604,8 +560,6 @@ static void reset_ring_common(struct intel_engine_cs *engine,
struct intel_context *ce = &request->ctx->engine[engine->id];
struct i915_hw_ppgtt *ppgtt;
- /* FIXME consider gen8 reset */
-
if (ce->state) {
I915_WRITE(CCID,
i915_ggtt_offset(ce->state) |
@@ -637,18 +591,19 @@ static void reset_ring_common(struct intel_engine_cs *engine,
request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
+ engine->legacy_active_ppgtt = NULL;
}
}
-static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
+static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(rq);
if (ret != 0)
return ret;
- ret = i915_gem_render_state_emit(req);
+ ret = i915_gem_render_state_emit(rq);
if (ret)
return ret;
@@ -700,71 +655,15 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (INTEL_INFO(dev_priv)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
return init_workarounds_ring(engine);
}
-static void render_ring_cleanup(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- i915_vma_unpin_and_release(&dev_priv->semaphore);
-}
-
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *waiter;
- enum intel_engine_id id;
-
- for_each_engine(waiter, dev_priv, id) {
- u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
- if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
- continue;
-
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL;
- *cs++ = lower_32_bits(gtt_offset);
- *cs++ = upper_32_bits(gtt_offset);
- *cs++ = req->global_seqno;
- *cs++ = 0;
- *cs++ = MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id);
- *cs++ = 0;
- }
-
- return cs;
-}
-
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
+static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
{
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *waiter;
- enum intel_engine_id id;
-
- for_each_engine(waiter, dev_priv, id) {
- u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
- if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
- continue;
-
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
- *cs++ = upper_32_bits(gtt_offset);
- *cs++ = req->global_seqno;
- *cs++ = MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id);
- *cs++ = 0;
- }
-
- return cs;
-}
-
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
-{
- struct drm_i915_private *dev_priv = req->i915;
+ struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int num_rings = 0;
@@ -775,11 +674,11 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
continue;
- mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
+ mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(mbox_reg);
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
num_rings++;
}
}
@@ -791,7 +690,7 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
static void cancel_requests(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->timeline->lock, flags);
@@ -799,7 +698,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline->requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_gem_request_completed(request))
+ if (!i915_request_completed(request))
dma_fence_set_error(&request->fence, -EIO);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -807,120 +706,46 @@ static void cancel_requests(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
+static void i9xx_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
- i915_gem_request_submit(request);
+ i915_request_submit(request);
I915_WRITE_TAIL(request->engine,
intel_ring_set_tail(request->ring, request->tail));
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *cs++ = req->global_seqno;
+ *cs++ = rq->global_seqno;
*cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req, cs);
- assert_ring_tail_valid(req->ring, req->tail);
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
-/**
- * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
- *
- * @request - request to write to the ring
- *
- * Update the mailbox registers in the *other* rings with the current seqno.
- * This acts like a signal in the canonical semaphore.
- */
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
+static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, cs));
-}
-
-static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *cs)
-{
- struct intel_engine_cs *engine = req->engine;
-
- if (engine->semaphore.signal)
- cs = engine->semaphore.signal(req, cs);
-
- *cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
- *cs++ = intel_hws_seqno_address(engine);
- *cs++ = 0;
- *cs++ = req->global_seqno;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- req->tail = intel_ring_offset(req, cs);
- assert_ring_tail_valid(req->ring, req->tail);
-}
-
-static const int gen8_render_emit_breadcrumb_sz = 8;
-
-/**
- * intel_ring_sync - sync the waiter to the signaller on seqno
- *
- * @waiter - ring that is waiting
- * @signaller - ring which has, or will signal
- * @seqno - seqno which the waiter will block on
- */
-
-static int
-gen8_ring_sync_to(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal)
-{
- struct drm_i915_private *dev_priv = req->i915;
- u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
- struct i915_hw_ppgtt *ppgtt;
- u32 *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD;
- *cs++ = signal->global_seqno;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- intel_ring_advance(req, cs);
-
- /* When the !RCS engines idle waiting upon a semaphore, they lose their
- * pagetables and we must reload them before executing the batch.
- * We do this on the i915_switch_context() following the wait and
- * before the dispatch.
- */
- ppgtt = req->ctx->ppgtt;
- if (ppgtt && req->engine->id != RCS)
- ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
- return 0;
+ return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
}
static int
-gen6_ring_sync_to(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
{
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
+ u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -932,7 +757,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
*cs++ = signal->global_seqno - 1;
*cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1031,17 +856,17 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
}
static int
-bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1083,40 +908,21 @@ hsw_vebox_irq_disable(struct intel_engine_cs *engine)
gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
}
-static void
-gen8_irq_enable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- engine->irq_keep_mask));
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
-}
-
-static void
-gen8_irq_disable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-}
-
static int
-i965_emit_bb_start(struct drm_i915_gem_request *req,
+i965_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1126,13 +932,13 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_emit_bb_start(struct drm_i915_gem_request *req,
+i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
+ u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1143,13 +949,13 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
*cs++ = cs_offset;
*cs++ = 0xdeadbeef;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- cs = intel_ring_begin(req, 6 + 2);
+ cs = intel_ring_begin(rq, 6 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1166,39 +972,39 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
/* ... and execute it. */
offset = cs_offset;
}
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-i915_emit_bb_start(struct drm_i915_gem_request *req,
+i915_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1359,12 +1165,13 @@ static int context_pin(struct i915_gem_context *ctx)
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out.
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (ret)
return ret;
}
@@ -1379,11 +1186,34 @@ alloc_context_vma(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ int err;
obj = i915_gem_object_create(i915, engine->context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
+ if (engine->default_state) {
+ void *defaults, *vaddr;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ defaults = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(defaults)) {
+ err = PTR_ERR(defaults);
+ goto err_map;
+ }
+
+ memcpy(vaddr, defaults, engine->context_size);
+
+ i915_gem_object_unpin_map(engine->default_state);
+ i915_gem_object_unpin_map(obj);
+ }
+
/*
* Try to make the context utilize L3 as well as LLC.
*
@@ -1405,10 +1235,18 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
- if (IS_ERR(vma))
- i915_gem_object_put(obj);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
return vma;
+
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
}
static struct intel_ring *
@@ -1441,20 +1279,9 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
if (ret)
goto err;
- ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
}
- /* The kernel context is only used as a placeholder for flushing the
- * active context. It is never used for submitting user rendering and
- * as such never requires the golden render context, and so we can skip
- * emitting it when we switch to the kernel context. This is required
- * as during eviction we cannot allocate and pin the renderstate in
- * order to initialise the context.
- */
- if (i915_gem_context_is_kernel(ctx))
- ce->initialised = true;
-
i915_gem_context_get(ctx);
out:
@@ -1548,10 +1375,194 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
intel_ring_reset(engine->buffer, 0);
}
-static int ring_request_alloc(struct drm_i915_gem_request *request)
+static inline int mi_set_context(struct i915_request *rq, u32 flags)
{
+ struct drm_i915_private *i915 = rq->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ enum intel_engine_id id;
+ const int num_rings =
+ /* Use an extended w/a on gen7 if signalling from other rings */
+ (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
+ INTEL_INFO(i915)->num_rings - 1 :
+ 0;
+ int len;
u32 *cs;
+ flags |= MI_MM_SPACE_GTT;
+ if (IS_HASWELL(i915))
+ /* These flags are for resource streamer on HSW+ */
+ flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
+ else
+ flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
+
+ len = 4;
+ if (IS_GEN7(i915))
+ len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+
+ cs = intel_ring_begin(rq, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
+ if (IS_GEN7(i915)) {
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(signaller, i915, id) {
+ if (signaller == engine)
+ continue;
+
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+ }
+ }
+
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+ /*
+ * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
+ * WaMiSetContext_Hang:snb,ivb,vlv
+ */
+ *cs++ = MI_NOOP;
+
+ if (IS_GEN7(i915)) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = {}; /* keep gcc quiet */
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(signaller, i915, id) {
+ if (signaller == engine)
+ continue;
+
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ /* Insert a delay before the next switch! */
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
+ }
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int remap_l3(struct i915_request *rq, int slice)
+{
+ u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
+ int i;
+
+ if (!remap_info)
+ return 0;
+
+ cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
+ for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+ *cs++ = remap_info[i];
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int switch_context(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *to_ctx = rq->ctx;
+ struct i915_hw_ppgtt *to_mm =
+ to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ struct i915_gem_context *from_ctx = engine->legacy_active_context;
+ struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+ u32 hw_flags = 0;
+ int ret, i;
+
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+
+ if (to_mm != from_mm ||
+ (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
+ trace_switch_mm(engine, to_ctx);
+ ret = to_mm->switch_mm(to_mm, rq);
+ if (ret)
+ goto err;
+
+ to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
+ engine->legacy_active_ppgtt = to_mm;
+ hw_flags = MI_FORCE_RESTORE;
+ }
+
+ if (to_ctx->engine[engine->id].state &&
+ (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+ GEM_BUG_ON(engine->id != RCS);
+
+ /*
+ * The kernel context(s) is treated as pure scratch and is not
+ * expected to retain any state (as we sacrifice it during
+ * suspend and on resume it may be corrupted). This is ok,
+ * as nothing actually executes using the kernel context; it
+ * is purely used for flushing user contexts.
+ */
+ if (i915_gem_context_is_kernel(to_ctx))
+ hw_flags = MI_RESTORE_INHIBIT;
+
+ ret = mi_set_context(rq, hw_flags);
+ if (ret)
+ goto err_mm;
+
+ engine->legacy_active_context = to_ctx;
+ }
+
+ if (to_ctx->remap_slice) {
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to_ctx->remap_slice & BIT(i)))
+ continue;
+
+ ret = remap_l3(rq, i);
+ if (ret)
+ goto err_ctx;
+ }
+
+ to_ctx->remap_slice = 0;
+ }
+
+ return 0;
+
+err_ctx:
+ engine->legacy_active_context = from_ctx;
+err_mm:
+ engine->legacy_active_ppgtt = from_mm;
+err:
+ return ret;
+}
+
+static int ring_request_alloc(struct i915_request *request)
+{
+ int ret;
+
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
/* Flush enough space to reduce the likelihood of waiting after
@@ -1560,37 +1571,28 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ if (ret)
+ return ret;
+
+ ret = switch_context(request);
+ if (ret)
+ return ret;
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
}
-static noinline int wait_for_space(struct drm_i915_gem_request *req,
- unsigned int bytes)
+static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
- struct intel_ring *ring = req->ring;
- struct drm_i915_gem_request *target;
+ struct i915_request *target;
long timeout;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
+ lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
if (intel_ring_update_space(ring) >= bytes)
return 0;
- /*
- * Space is reserved in the ringbuffer for finalising the request,
- * as that cannot be allowed to fail. During request finalisation,
- * reserved_space is set to 0 to stop the overallocation and the
- * assumption is that then we never need to wait (which has the
- * risk of failing with EINTR).
- *
- * See also i915_gem_request_alloc() and i915_add_request().
- */
- GEM_BUG_ON(!req->reserved_space);
-
list_for_each_entry(target, &ring->request_list, ring_link) {
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
@@ -1601,23 +1603,38 @@ static noinline int wait_for_space(struct drm_i915_gem_request *req,
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- timeout = i915_wait_request(target,
+ timeout = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
- i915_gem_request_retire_upto(target);
+ i915_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
return 0;
}
-u32 *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int num_dwords)
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
{
- struct intel_ring *ring = req->ring;
+ GEM_BUG_ON(bytes > ring->effective_size);
+ if (unlikely(bytes > ring->effective_size - ring->emit))
+ bytes += ring->size - ring->emit;
+
+ if (unlikely(bytes > ring->space)) {
+ int ret = wait_for_space(ring, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
const unsigned int remain_usable = ring->effective_size - ring->emit;
const unsigned int bytes = num_dwords * sizeof(u32);
unsigned int need_wrap = 0;
@@ -1627,7 +1644,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
/* Packets must be qword aligned. */
GEM_BUG_ON(num_dwords & 1);
- total_bytes = bytes + req->reserved_space;
+ total_bytes = bytes + rq->reserved_space;
GEM_BUG_ON(total_bytes > ring->effective_size);
if (unlikely(total_bytes > remain_usable)) {
@@ -1648,12 +1665,25 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
* wrap and only need to effectively wait for the
* reserved size from the start of ringbuffer.
*/
- total_bytes = req->reserved_space + remain_actual;
+ total_bytes = rq->reserved_space + remain_actual;
}
}
if (unlikely(total_bytes > ring->space)) {
- int ret = wait_for_space(req, total_bytes);
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring, total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
@@ -1680,29 +1710,28 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
+int intel_ring_cacheline_align(struct i915_request *rq)
{
- int num_dwords =
- (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
u32 *cs;
if (num_dwords == 0)
return 0;
- num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- cs = intel_ring_begin(req, num_dwords);
+ num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords;
+ cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
while (num_dwords--)
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
-static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
+static void gen6_bsd_submit_request(struct i915_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
@@ -1739,17 +1768,15 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
- if (INTEL_GEN(req->i915) >= 8)
- cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
@@ -1769,49 +1796,20 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- if (INTEL_GEN(req->i915) >= 8) {
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- } else {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
- intel_ring_advance(req, cs);
- return 0;
-}
-
-static int
-gen8_emit_bb_start(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned int dispatch_flags)
-{
- bool ppgtt = USES_PPGTT(req->i915) &&
- !(dispatch_flags & I915_DISPATCH_SECURE);
- u32 *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* FIXME(BDW): Address space and security selectors. */
- *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
- I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
-
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-hsw_emit_bb_start(struct drm_i915_gem_request *req,
+hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1821,19 +1819,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int
-gen6_emit_bb_start(struct drm_i915_gem_request *req,
+gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
- cs = intel_ring_begin(req, 2);
+ cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1841,24 +1839,22 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
+static int gen6_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
- cs = intel_ring_begin(req, 4);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
- if (INTEL_GEN(req->i915) >= 8)
- cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
@@ -1877,14 +1873,9 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
cmd |= MI_INVALIDATE_TLB;
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- if (INTEL_GEN(req->i915) >= 8) {
- *cs++ = 0; /* upper addr */
- *cs++ = 0; /* value */
- } else {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
- intel_ring_advance(req, cs);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
return 0;
}
@@ -1892,110 +1883,61 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj;
- int ret, i;
+ int i;
- if (!i915_modparams.semaphores)
+ if (!HAS_LEGACY_SEMAPHORES(dev_priv))
return;
- if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
- struct i915_vma *vma;
-
- obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj))
- goto err;
-
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret)
- goto err_obj;
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_obj;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
+ engine->semaphore.sync_to = gen6_ring_sync_to;
+ engine->semaphore.signal = gen6_signal;
- dev_priv->semaphore = vma;
- }
-
- if (INTEL_GEN(dev_priv) >= 8) {
- u32 offset = i915_ggtt_offset(dev_priv->semaphore);
-
- engine->semaphore.sync_to = gen8_ring_sync_to;
- engine->semaphore.signal = gen8_xcs_signal;
-
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- u32 ring_offset;
-
- if (i != engine->id)
- ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
- else
- ring_offset = MI_SEMAPHORE_SYNC_INVALID;
-
- engine->semaphore.signal_ggtt[i] = ring_offset;
- }
- } else if (INTEL_GEN(dev_priv) >= 6) {
- engine->semaphore.sync_to = gen6_ring_sync_to;
- engine->semaphore.signal = gen6_signal;
-
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between VCS2 and RCS later.
- */
- for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
- static const struct {
- u32 wait_mbox;
- i915_reg_t mbox_reg;
- } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
- [RCS_HW] = {
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
- },
- [VCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
- },
- [BCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
- },
- [VECS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
- },
- };
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID.
+ */
+ for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
+ static const struct {
u32 wait_mbox;
i915_reg_t mbox_reg;
+ } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
+ [RCS_HW] = {
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+ },
+ [VCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+ },
+ [BCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+ },
+ [VECS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+ },
+ };
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
- if (i == engine->hw_id) {
- wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
- mbox_reg = GEN6_NOSYNC;
- } else {
- wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
- mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
- }
-
- engine->semaphore.mbox.wait[i] = wait_mbox;
- engine->semaphore.mbox.signal[i] = mbox_reg;
+ if (i == engine->hw_id) {
+ wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+ mbox_reg = GEN6_NOSYNC;
+ } else {
+ wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
+ mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
}
- }
- return;
-
-err_obj:
- i915_gem_object_put(obj);
-err:
- DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
- i915_modparams.semaphores = 0;
+ engine->semaphore.mbox.wait[i] = wait_mbox;
+ engine->semaphore.mbox.signal[i] = mbox_reg;
+ }
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
@@ -2003,11 +1945,7 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
{
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->irq_enable = gen8_irq_enable;
- engine->irq_disable = gen8_irq_disable;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
engine->irq_seqno_barrier = gen6_seqno_barrier;
@@ -2028,17 +1966,23 @@ static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
engine->cancel_requests = cancel_requests;
+
+ engine->park = NULL;
+ engine->unpark = NULL;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
+ i9xx_set_default_submission(engine);
engine->submit_request = gen6_bsd_submit_request;
- engine->cancel_requests = cancel_requests;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
+ /* gen8+ are only supported with execlists */
+ GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
+
intel_ring_init_irq(dev_priv, engine);
intel_ring_init_semaphores(dev_priv, engine);
@@ -2052,26 +1996,20 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (i915_modparams.semaphores) {
+ if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->emit_breadcrumb_sz += num_rings * 6;
- } else {
- engine->emit_breadcrumb_sz += num_rings * 3;
- if (num_rings & 1)
- engine->emit_breadcrumb_sz++;
- }
+ engine->emit_breadcrumb_sz += num_rings * 3;
+ if (num_rings & 1)
+ engine->emit_breadcrumb_sz++;
}
engine->set_default_submission = i9xx_set_default_submission;
- if (INTEL_GEN(dev_priv) >= 8)
- engine->emit_bb_start = gen8_emit_bb_start;
- else if (INTEL_GEN(dev_priv) >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
@@ -2091,20 +2029,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->init_context = intel_rcs_ctx_init;
- engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
- engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
- engine->emit_flush = gen8_render_ring_flush;
- if (i915_modparams.semaphores) {
- int num_rings;
-
- engine->semaphore.signal = gen8_rcs_signal;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- engine->emit_breadcrumb_sz += num_rings * 8;
- }
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
@@ -2123,7 +2048,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->emit_bb_start = hsw_emit_bb_start;
engine->init_hw = init_render_ring;
- engine->cleanup = render_ring_cleanup;
ret = intel_init_ring_buffer(engine);
if (ret)
@@ -2153,8 +2077,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
if (IS_GEN6(dev_priv))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
- if (INTEL_GEN(dev_priv) < 8)
- engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
engine->mmio_base = BSD_RING_BASE;
engine->emit_flush = bsd_ring_flush;
@@ -2174,8 +2097,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
- if (INTEL_GEN(dev_priv) < 8)
- engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
return intel_init_ring_buffer(engine);
}
@@ -2187,12 +2109,9 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
-
- if (INTEL_GEN(dev_priv) < 8) {
- engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_enable = hsw_vebox_irq_enable;
- engine->irq_disable = hsw_vebox_irq_disable;
- }
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_enable = hsw_vebox_irq_enable;
+ engine->irq_disable = hsw_vebox_irq_disable;
return intel_init_ring_buffer(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2863d5a..0320c2c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,9 +3,12 @@
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+
#include "i915_gem_batch_pool.h"
-#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+
+#include "i915_pmu.h"
+#include "i915_request.h"
#include "i915_selftest.h"
struct drm_printer;
@@ -47,16 +50,6 @@ struct intel_hw_status_page {
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-#define gen8_semaphore_seqno_size sizeof(uint64_t)
-#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
- (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
-#define GEN8_SIGNAL_OFFSET(__ring, to) \
- (dev_priv->semaphore->node.start + \
- GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
-#define GEN8_WAIT_OFFSET(__ring, from) \
- (dev_priv->semaphore->node.start + \
- GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-
enum intel_engine_hangcheck_action {
ENGINE_IDLE = 0,
ENGINE_WAIT,
@@ -99,7 +92,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define instdone_subslice_mask(dev_priv__) \
(INTEL_GEN(dev_priv__) == 7 ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
+ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
@@ -124,7 +117,7 @@ struct intel_engine_hangcheck {
unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
- struct drm_i915_gem_request *active_request;
+ struct i915_request *active_request;
bool stalled;
};
@@ -165,8 +158,10 @@ struct i915_ctx_workarounds {
struct i915_vma *vma;
};
-struct drm_i915_gem_request;
-struct intel_render_state;
+struct i915_request;
+
+#define I915_MAX_VCS 4
+#define I915_MAX_VECS 2
/*
* Engine IDs definitions.
@@ -177,8 +172,12 @@ enum intel_engine_id {
BCS,
VCS,
VCS2,
+ VCS3,
+ VCS4,
#define _VCS(n) (VCS + (n))
- VECS
+ VECS,
+ VECS2
+#define _VECS(n) (VECS + (n))
};
struct i915_priolist {
@@ -195,9 +194,9 @@ struct i915_priolist {
*/
struct intel_engine_execlists {
/**
- * @irq_tasklet: softirq tasklet for bottom handler
+ * @tasklet: softirq tasklet for bottom handler
*/
- struct tasklet_struct irq_tasklet;
+ struct tasklet_struct tasklet;
/**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
@@ -210,6 +209,19 @@ struct intel_engine_execlists {
bool no_priolist;
/**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+ /**
* @port: execlist port states
*
* For each hardware ELSP (ExecList Submission Port) we keep
@@ -223,7 +235,7 @@ struct intel_engine_execlists {
/**
* @request_count: combined request and submission count
*/
- struct drm_i915_gem_request *request_count;
+ struct i915_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
@@ -253,6 +265,7 @@ struct intel_engine_execlists {
unsigned int active;
#define EXECLISTS_ACTIVE_USER 0
#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
/**
* @port_mask: number of execlist ports - 1
@@ -260,6 +273,16 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @queue_priority: Highest pending priority.
+ *
+ * When we add requests into the queue, or adjust the priority of
+ * executing requests, we compute the maximum priority of those
+ * pending requests. We can then use this value to determine if
+ * we need to preempt the executing requests to service the queue.
+ */
+ int queue_priority;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -283,6 +306,11 @@ struct intel_engine_execlists {
* @csb_use_mmio: access csb through mmio, instead of hwsp
*/
bool csb_use_mmio;
+
+ /**
+ * @preempt_complete_status: expected CSB upon completing preemption
+ */
+ u32 preempt_complete_status;
};
#define INTEL_ENGINE_CS_MAX_NAME 8
@@ -290,11 +318,14 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
char name[INTEL_ENGINE_CS_MAX_NAME];
+
enum intel_engine_id id;
- unsigned int uabi_id;
unsigned int hw_id;
unsigned int guc_id;
+ u8 uabi_id;
+ u8 uabi_class;
+
u8 class;
u8 instance;
u32 context_size;
@@ -304,7 +335,7 @@ struct intel_engine_cs {
struct intel_ring *buffer;
struct intel_timeline *timeline;
- struct intel_render_state *render_state;
+ struct drm_i915_gem_object *default_state;
atomic_t irq_count;
unsigned long irq_posted;
@@ -333,19 +364,42 @@ struct intel_engine_cs {
spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
struct rb_root waiters; /* sorted by retirement, priority */
- struct rb_root signals; /* sorted by retirement */
+ struct list_head signals; /* sorted by retirement */
struct task_struct *signaler; /* used for fence signalling */
- struct drm_i915_gem_request __rcu *first_signal;
+
struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */
unsigned int hangcheck_interrupts;
+ unsigned int irq_enabled;
bool irq_armed : 1;
- bool irq_enabled : 1;
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
+ struct {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to the bit number from @enable.
+ */
+ unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ */
+#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+ } pmu;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -364,7 +418,10 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *engine);
void (*reset_hw)(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *req);
+ struct i915_request *rq);
+
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
void (*set_default_submission)(struct intel_engine_cs *engine);
@@ -372,22 +429,20 @@ struct intel_engine_cs {
struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
- int (*request_alloc)(struct drm_i915_gem_request *req);
- int (*init_context)(struct drm_i915_gem_request *req);
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
- int (*emit_flush)(struct drm_i915_gem_request *request,
- u32 mode);
+ int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct drm_i915_gem_request *req,
+ int (*emit_bb_start)(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
- void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
- u32 *cs);
+ void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
@@ -396,7 +451,7 @@ struct intel_engine_cs {
* This is called from an atomic context with irqs disabled; must
* be irq safe.
*/
- void (*submit_request)(struct drm_i915_gem_request *req);
+ void (*submit_request)(struct i915_request *rq);
/* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
@@ -404,8 +459,7 @@ struct intel_engine_cs {
*
* Called under the struct_mutex.
*/
- void (*schedule)(struct drm_i915_gem_request *request,
- int priority);
+ void (*schedule)(struct i915_request *request, int priority);
/*
* Cancel all requests on the hardware, or queued for execution.
@@ -462,23 +516,20 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- union {
#define GEN6_SEMAPHORE_LAST VECS_HW
#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
- struct {
- /* our mbox written by others */
- u32 wait[GEN6_NUM_SEMAPHORES];
- /* mboxes this ring signals to */
- i915_reg_t signal[GEN6_NUM_SEMAPHORES];
- } mbox;
- u64 signal_ggtt[I915_NUM_ENGINES];
- };
+ struct {
+ /* our mbox written by others */
+ u32 wait[GEN6_NUM_SEMAPHORES];
+ /* mboxes this ring signals to */
+ i915_reg_t signal[GEN6_NUM_SEMAPHORES];
+ } mbox;
/* AKA wait() */
- int (*sync_to)(struct drm_i915_gem_request *req,
- struct drm_i915_gem_request *signal);
- u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
+ int (*sync_to)(struct i915_request *rq,
+ struct i915_request *signal);
+ u32 *(*signal)(struct i915_request *rq, u32 *cs);
} semaphore;
struct intel_engine_execlists execlists;
@@ -501,13 +552,16 @@ struct intel_engine_cs {
* stream (ring).
*/
struct i915_gem_context *legacy_active_context;
+ struct i915_hw_ppgtt *legacy_active_ppgtt;
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
struct intel_engine_hangcheck hangcheck;
- bool needs_cmd_parser;
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+ unsigned int flags;
/*
* Table of commands the command parser needs to know about
@@ -532,8 +586,50 @@ struct intel_engine_cs {
* certain bits to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ spinlock_t lock;
+ /**
+ * @enabled: Reference count indicating number of listeners.
+ */
+ unsigned int enabled;
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+ /**
+ * @enabled_at: Timestamp when busy stats were enabled.
+ */
+ ktime_t enabled_at;
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+ } stats;
};
+static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -555,6 +651,12 @@ execlists_is_active(const struct intel_engine_execlists *execlists,
return test_bit(bit, (unsigned long *)&execlists->active);
}
+void
+execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
+
+void
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
@@ -581,7 +683,7 @@ intel_engine_flag(const struct intel_engine_cs *engine)
}
static inline u32
-intel_read_status_page(struct intel_engine_cs *engine, int reg)
+intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
return READ_ONCE(engine->status_page.page_addr[reg]);
@@ -624,6 +726,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_PREEMPT_INDEX 0x32
+#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
@@ -646,13 +750,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
+int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
- unsigned int n);
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
+u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-static inline void
-intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
{
/* Dummy function.
*
@@ -662,22 +765,20 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
}
-static inline u32
-intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
return pos & (ring->size - 1);
}
-static inline u32
-intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - req->ring->vaddr;
- GEM_BUG_ON(offset > req->ring->size);
- return intel_ring_wrap(req->ring, offset);
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
}
static inline void
@@ -715,7 +816,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
- * read by i915_gem_request_retire() just as it is being updated
+ * read by i915_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
@@ -736,8 +837,8 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
@@ -757,7 +858,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
}
int init_workarounds_ring(struct intel_engine_cs *engine);
-int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
+int intel_ring_workarounds_emit(struct i915_request *rq);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -776,11 +877,16 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
}
+static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
+}
+
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
wait->tsk = current;
wait->request = rq;
@@ -806,9 +912,9 @@ intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
static inline bool
intel_wait_update_request(struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool
@@ -819,9 +925,9 @@ intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
static inline bool
intel_wait_check_request(const struct intel_wait *wait,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
- return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
+ return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool intel_wait_complete(const struct intel_wait *wait)
@@ -833,9 +939,8 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
- bool wakeup);
-void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
+void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
+void intel_engine_cancel_signaling(struct i915_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
@@ -846,12 +951,14 @@ unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
#define ENGINE_WAKEUP_ASLEEP BIT(1)
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
+
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
@@ -864,14 +971,123 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
return batch + 6;
}
+static inline u32 *
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+{
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ /* w/a for post sync ops following a GPGPU operation we
+ * need a prior CS_STALL, which is emitted by the flush
+ * following the batch.
+ */
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+ /* We're thrashing one dword of HWS. */
+ *cs++ = 0;
+
+ return cs;
+}
+
+static inline u32 *
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+{
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ GEM_BUG_ON(gtt_offset & (1 << 5));
+ /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
+}
+
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-void intel_engines_mark_idle(struct drm_i915_private *i915);
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+
+void intel_engines_park(struct drm_i915_private *i915);
+void intel_engines_unpark(struct drm_i915_private *i915);
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+__printf(3, 4)
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...);
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ spin_lock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ spin_unlock_irqrestore(&engine->stats.lock, flags);
+}
+
+int intel_enable_engine_stats(struct intel_engine_cs *engine);
+void intel_disable_engine_stats(struct intel_engine_cs *engine);
+
+ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7e115f3..53ea564 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -94,6 +94,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
return "PORT_DDI_E_LANES";
+ case POWER_DOMAIN_PORT_DDI_F_LANES:
+ return "PORT_DDI_F_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -104,6 +106,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
return "PORT_DDI_E_IO";
+ case POWER_DOMAIN_PORT_DDI_F_IO:
+ return "PORT_DDI_F_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -124,12 +128,18 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_C";
case POWER_DOMAIN_AUX_D:
return "AUX_D";
+ case POWER_DOMAIN_AUX_F:
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_IO_A:
+ return "AUX_IO_A";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
return "INIT";
case POWER_DOMAIN_MODESET:
return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
default:
MISSING_CASE(domain);
return "?";
@@ -388,6 +398,15 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
hsw_wait_for_power_well_enable(dev_priv, power_well);
+ /* Display WA #1178: cnl */
+ if (IS_CANNONLAKE(dev_priv) &&
+ (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
+ id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
+ val = I915_READ(CNL_AUX_ANAOVRD1(id));
+ val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
+ }
+
if (wait_fuses)
gen9_wait_for_power_well_fuses(dev_priv, pg);
@@ -715,7 +734,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
- WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1714,6 +1734,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -1732,12 +1753,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
@@ -1794,6 +1816,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_GMBUS) | \
@@ -1810,9 +1833,11 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -1830,6 +1855,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -1840,8 +1866,15 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2389,6 +2422,18 @@ static struct i915_power_well cnl_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
+ {
+ .name = "DDI F IO power well",
+ .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = CNL_DISP_PW_DDI_F,
+ },
+ {
+ .name = "AUX F",
+ .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = CNL_DISP_PW_AUX_F,
+ },
};
static int
@@ -2504,6 +2549,16 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
set_power_wells(power_domains, cnl_power_wells);
+
+ /*
+ * DDI and Aux IO are getting enabled for all ports
+ * regardless the presence or use. So, in order to avoid
+ * timeouts, lets remove them from the list
+ * for the SKUs without port F.
+ */
+ if (!IS_CNL_WITH_PORT_F(dev_priv))
+ power_domains->power_well_count -= 2;
+
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
@@ -2594,6 +2649,48 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("DBuf power disable timeout!\n");
}
+/*
+ * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
+ * needed and keep it disabled as much as possible.
+ */
+static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power disable timeout!\n");
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
+
+ I915_WRITE(MBUS_ABOX_CTL, val);
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -2742,12 +2839,19 @@ static const struct cnl_procmon {
{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
{
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(CNL_PORT_COMP_DW3);
+ val = I915_READ(ICL_PORT_COMP_DW3(port));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -2768,13 +2872,13 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
break;
}
- val = I915_READ(CNL_PORT_COMP_DW1);
+ val = I915_READ(ICL_PORT_COMP_DW1(port));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(CNL_PORT_COMP_DW1, val);
+ I915_WRITE(ICL_PORT_COMP_DW1(port), val);
- I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
- I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+ I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
}
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
@@ -2795,7 +2899,8 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
- cnl_set_procmon_ref_values(dev_priv);
+ /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+ cnl_set_procmon_ref_values(dev_priv, PORT_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -2859,6 +2964,80 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
}
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ enum port port;
+ u32 val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH reset handshake. */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ val |= RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+ for (port = PORT_A; port <= PORT_B; port++) {
+ /* 2. Enable DDI combo PHY comp. */
+ val = I915_READ(ICL_PHY_MISC(port));
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ cnl_set_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val |= COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+ /* 3. Set power down enable. */
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ }
+
+ /* 4. Enable power well 1 (PG1) and aux IO power. */
+ /* FIXME: ICL power wells code not here yet. */
+
+ /* 5. Enable CDCLK. */
+ icl_init_cdclk(dev_priv);
+
+ /* 6. Enable DBUF. */
+ icl_dbuf_enable(dev_priv);
+
+ /* 7. Setup MBUS. */
+ icl_mbus_init(dev_priv);
+
+ /* 8. CHICKEN_DCPR_1 */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ CNL_DDI_CLOCK_REG_ACCESS_ON);
+}
+
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+ u32 val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ icl_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ icl_uninit_cdclk(dev_priv);
+
+ /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
+ /* FIXME: ICL power wells code not here yet. */
+
+ /* 5. Disable Comp */
+ for (port = PORT_A; port <= PORT_B; port++) {
+ val = I915_READ(ICL_PHY_MISC(port));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+ }
+}
+
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
@@ -2991,7 +3170,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true;
- if (IS_CANNONLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ icl_display_core_init(dev_priv, resume);
+ } else if (IS_CANNONLAKE(dev_priv)) {
cnl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
@@ -3032,7 +3213,9 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- if (IS_CANNONLAKE(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_display_core_uninit(dev_priv);
+ else if (IS_CANNONLAKE(dev_priv))
cnl_display_core_uninit(dev_priv);
else if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
@@ -3148,18 +3331,19 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
- * already in use and ensures that it is powered up.
+ * already in use and ensures that it is powered up. It is illegal to try
+ * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: True if the wakeref was acquired, or False otherwise.
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct device *kdev = &pdev->dev;
-
if (IS_ENABLED(CONFIG_PM)) {
- int ret = pm_runtime_get_if_in_use(kdev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
/*
* In cases runtime PM is disabled by the RPM core and we get
@@ -3167,9 +3351,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- WARN_ONCE(ret < 0,
- "pm_runtime_get_if_in_use() failed: %d\n", ret);
- if (ret <= 0)
+ if (pm_runtime_get_if_in_use(kdev) <= 0)
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7437944..96e213e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -214,7 +214,7 @@ static bool
intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector);
-/**
+/*
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
@@ -250,10 +250,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
- for (i = 0; i < 2; i++)
- {
+ for (i = 0; i < 2; i++) {
I915_WRITE(GEN3_SDVOB, bval);
POSTING_READ(GEN3_SDVOB);
+
I915_WRITE(GEN3_SDVOC, cval);
POSTING_READ(GEN3_SDVOC);
}
@@ -643,7 +643,7 @@ static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
&targets, sizeof(targets));
}
-/**
+/*
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
@@ -1061,8 +1061,10 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
-/* Asks the sdvo controller for the preferred input mode given the output mode.
- * Unfortunately we have to set up the full output mode to do that. */
+/*
+ * Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that.
+ */
static bool
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *mode,
@@ -1095,8 +1097,10 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
+ /*
+ * SDVO TV has fixed PLL values depend on its clock range,
+ * this mirrors vbios setting.
+ */
if (dotclock >= 100000 && dotclock < 140500) {
clock->p1 = 2;
clock->p2 = 10;
@@ -1132,7 +1136,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
- /* We need to construct preferred input timings based on our
+ /*
+ * We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
@@ -1155,7 +1160,8 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
}
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ /*
+ * Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
pipe_config->pixel_multiplier =
@@ -1169,9 +1175,12 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = true;
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- /* FIXME: This bit is only valid when using TMDS encoding and 8
- * bit per color mode. */
+ /*
+ * See CEA-861-E - 5.1 Default Encoding Parameters
+ *
+ * FIXME: This bit is only valid when using TMDS encoding and 8
+ * bit per color mode.
+ */
if (pipe_config->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
pipe_config->limited_color_range = true;
@@ -1272,7 +1281,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_update_props(intel_sdvo, sdvo_state);
- /* First, set the input mapping for the first input to our controlled
+ /*
+ * First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
* channel on the motherboard. In a two-input device, the first input
@@ -1429,12 +1439,16 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
u8 val;
bool ret;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
+
sdvox = I915_READ(intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
- /* Some sdvo encoders are not spec compliant and don't
- * implement the mandatory get_timings function. */
+ /*
+ * Some sdvo encoders are not spec compliant and don't
+ * implement the mandatory get_timings function.
+ */
DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
@@ -1510,7 +1524,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1569,7 +1583,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
u32 temp;
bool input1, input2;
int i;
@@ -1583,7 +1597,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
- /* Warn if the device reported failure to sync.
+ /*
+ * Warn if the device reported failure to sync.
+ *
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
@@ -1605,9 +1621,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
@@ -1673,8 +1686,10 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
- /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
- * on the line. */
+ /*
+ * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line.
+ */
if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
return 0;
@@ -1690,7 +1705,15 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ &intel_sdvo->hotplug_active, 2);
+}
+
+static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ intel_sdvo_enable_hotplug(encoder);
+
+ return intel_encoder_hotplug(encoder, connector);
}
static bool
@@ -1958,7 +1981,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- /* Read the list of supported input resolutions for the selected TV
+ /*
+ * Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << conn_state->tv.mode;
@@ -2269,7 +2293,8 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
uint16_t mask = 0;
unsigned int num_bits;
- /* Make a mask of outputs less than or equal to our own priority in the
+ /*
+ * Make a mask of outputs less than or equal to our own priority in the
* list.
*/
switch (sdvo->controlled_output) {
@@ -2299,7 +2324,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
sdvo->ddc_bus = 1 << num_bits;
}
-/**
+/*
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
@@ -2343,9 +2368,11 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ /*
+ * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
* our code totally fails once we start using gmbus. Hence fall back to
- * bit banging for now. */
+ * bit banging for now.
+ */
intel_gmbus_force_bit(sdvo->i2c, true);
}
@@ -2380,7 +2407,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
if (my_mapping->slave_addr)
return my_mapping->slave_addr;
- /* If the BIOS only described a different SDVO device, use the
+ /*
+ * If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
if (other_mapping->slave_addr) {
@@ -2390,7 +2418,8 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
return 0x70;
}
- /* No SDVO device info is found for another DVO port,
+ /*
+ * No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (sdvo->port == PORT_B)
@@ -2491,10 +2520,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) {
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
- /* Some SDVO devices have one-shot hotplug interrupts.
+ /*
+ * Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
- intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
@@ -2790,7 +2820,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
to_intel_sdvo_connector_state(conn_state);
uint16_t response, data_value[2];
- /* when horizontal overscan is supported, Add the left/right property */
+ /* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_H,
@@ -3075,7 +3105,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
goto err_output;
}
- /* Only enable the hotplug irq if we need it, to work around noisy
+ /*
+ * Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
if (intel_sdvo->hotplug_active) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4a8a5d9..dbdcf85 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -41,8 +41,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-static bool
-format_is_yuv(uint32_t format)
+bool intel_format_is_yuv(u32 format)
{
switch (format) {
case DRM_FORMAT_YUYV:
@@ -263,12 +262,9 @@ skl_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- PLANE_COLOR_PIPE_GAMMA_ENABLE |
- PLANE_COLOR_PIPE_CSC_ENABLE |
- PLANE_COLOR_PLANE_GAMMA_DISABLE);
- }
+ plane_state->color_ctl);
if (key->flags) {
I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
@@ -350,44 +346,103 @@ skl_plane_get_hw_state(struct intel_plane *plane)
}
static void
-chv_update_csc(struct intel_plane *plane, uint32_t format)
+chv_update_csc(const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = plane->id;
-
- /* Seems RGB data bypasses the CSC always */
- if (!format_is_yuv(format))
- return;
-
/*
- * BT.601 limited range YCbCr -> full range RGB
+ * |r| | c0 c1 c2 | |cr|
+ * |g| = | c3 c4 c5 | x |y |
+ * |b| | c6 c7 c8 | |cb|
*
- * |r| | 6537 4769 0| |cr |
- * |g| = |-3330 4769 -1605| x |y-64|
- * |b| | 0 4769 8263| |cb |
+ * Coefficients are s3.12.
*
- * Cb and Cr apparently come in as signed already, so no
- * need for any offset. For Y we need to remove the offset.
+ * Cb and Cr apparently come in as signed already, and
+ * we always get full range data in on account of CLRC0/1.
*/
- I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ static const s16 csc_matrix[][9] = {
+ /* BT.601 full range YCbCr -> full range RGB */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 5743, 4096, 0,
+ -2925, 4096, -1410,
+ 0, 4096, 7258,
+ },
+ /* BT.709 full range YCbCr -> full range RGB */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 6450, 4096, 0,
+ -1917, 4096, -767,
+ 0, 4096, 7601,
+ },
+ };
+ const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+
+ /* Seems RGB data bypasses the CSC always */
+ if (!intel_format_is_yuv(fb->format->format))
+ return;
+
+ I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
- I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
- I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
- I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
- I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
+ I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+ I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+ I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+ I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+ I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
- I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
- I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
- I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+ I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
+#define SIN_0 0
+#define COS_0 1
+
+static void
+vlv_update_clrc(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ int contrast, brightness, sh_scale, sh_sin, sh_cos;
+
+ if (intel_format_is_yuv(fb->format->format) &&
+ plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+ /*
+ * Expand limited range to full range:
+ * Contrast is applied first and is used to expand Y range.
+ * Brightness is applied second and is used to remove the
+ * offset from Y. Saturation/hue is used to expand CbCr range.
+ */
+ contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
+ brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
+ sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
+ sh_sin = SIN_0 * sh_scale;
+ sh_cos = COS_0 * sh_scale;
+ } else {
+ /* Pass-through everything. */
+ contrast = 1 << 6;
+ brightness = 0;
+ sh_scale = 1 << 7;
+ sh_sin = SIN_0 * sh_scale;
+ sh_cos = COS_0 * sh_scale;
+ }
+
+ /* FIXME these register are single buffered :( */
+ I915_WRITE_FW(SPCLRC0(pipe, plane_id),
+ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+ I915_WRITE_FW(SPCLRC1(pipe, plane_id),
+ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+}
+
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -437,6 +492,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
+ if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ sprctl |= SP_YUV_FORMAT_BT709;
+
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
@@ -481,8 +539,10 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ vlv_update_clrc(plane_state);
+
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
- chv_update_csc(plane, fb->format->format);
+ chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
@@ -588,6 +648,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
+ if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
+
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
@@ -744,6 +810,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
+ if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ dvscntr |= DVS_YUV_FORMAT_BT709;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
+
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
@@ -868,7 +940,8 @@ intel_check_sprite_plane(struct intel_plane *plane,
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *src = &state->base.src;
struct drm_rect *dst = &state->base.dst;
- const struct drm_rect *clip = &state->clip;
+ struct drm_rect clip = {};
+ int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384;
int hscale, vscale;
int max_scale, min_scale;
bool can_scale;
@@ -889,7 +962,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
}
/* FIXME check all gen limits */
- if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
+ if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > max_stride) {
DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
return -EINVAL;
}
@@ -897,7 +970,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
/* setup can_scale, min_scale, max_scale */
if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
- if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ if (!state->ckey.flags) {
can_scale = 1;
min_scale = 1;
max_scale = skl_max_scale(crtc, crtc_state);
@@ -926,7 +999,11 @@ intel_check_sprite_plane(struct intel_plane *plane,
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- state->base.visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+ if (crtc_state->base.enable)
+ drm_mode_get_hv_timing(&crtc_state->base.mode,
+ &clip.x2, &clip.y2);
+
+ state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
crtc_x = dst->x1;
crtc_y = dst->y1;
@@ -978,7 +1055,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- if (format_is_yuv(fb->format->format)) {
+ if (intel_format_is_yuv(fb->format->format)) {
src_x &= ~1;
src_w &= ~1;
@@ -1031,7 +1108,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
dst->y2 = crtc_y + crtc_h;
if (INTEL_GEN(dev_priv) >= 9) {
- ret = skl_check_plane_surface(state);
+ ret = skl_check_plane_surface(crtc_state, state);
if (ret)
return ret;
@@ -1056,11 +1133,14 @@ intel_check_sprite_plane(struct intel_plane *plane,
state->ctl = g4x_sprite_ctl(crtc_state, state);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ state->color_ctl = glk_plane_color_ctl(crtc_state, state);
+
return 0;
}
-int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
@@ -1070,6 +1150,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
+ /* ignore the pointless "none" flag */
+ set->flags &= ~I915_SET_COLORKEY_NONE;
+
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
@@ -1162,18 +1248,27 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint64_t skl_plane_format_modifiers[] = {
+static const uint64_t skl_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
-static bool g4x_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static const uint64_t skl_plane_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Yf_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
{
switch (format) {
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
@@ -1188,22 +1283,38 @@ static bool g4x_sprite_plane_format_mod_supported(struct drm_plane *plane,
}
}
-static bool vlv_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool snb_mod_supported(uint32_t format, uint64_t modifier)
{
switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED)
return true;
@@ -1213,16 +1324,17 @@ static bool vlv_sprite_plane_format_mod_supported(struct drm_plane *plane,
}
}
-static bool skl_sprite_plane_format_mod_supported(struct drm_plane *plane,
- uint32_t format,
- uint64_t modifier)
+static bool skl_mod_supported(uint32_t format, uint64_t modifier)
{
- /* This is the same as primary plane since SKL has universal planes */
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ return true;
+ /* fall through */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
@@ -1258,13 +1370,13 @@ static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
return false;
if (INTEL_GEN(dev_priv) >= 9)
- return skl_sprite_plane_format_mod_supported(plane, format, modifier);
+ return skl_mod_supported(format, modifier);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_sprite_plane_format_mod_supported(plane, format, modifier);
+ return vlv_mod_supported(format, modifier);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ return snb_mod_supported(format, modifier);
else
- return g4x_sprite_plane_format_mod_supported(plane, format, modifier);
-
- unreachable();
+ return g4x_mod_supported(format, modifier);
}
static const struct drm_plane_funcs intel_sprite_plane_funcs = {
@@ -1278,6 +1390,23 @@ static const struct drm_plane_funcs intel_sprite_plane_funcs = {
.format_mod_supported = intel_sprite_plane_format_mod_supported,
};
+bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (plane_id == PLANE_CURSOR)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ return true;
+
+ if (IS_GEMINILAKE(dev_priv))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
@@ -1304,7 +1433,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
intel_plane->base.state = &state->base;
- if (INTEL_GEN(dev_priv) >= 10) {
+ if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
state->scaler_id = -1;
@@ -1314,18 +1443,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- modifiers = skl_plane_format_modifiers;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- intel_plane->can_scale = true;
- state->scaler_id = -1;
-
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- intel_plane->get_hw_state = skl_plane_get_hw_state;
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- modifiers = skl_plane_format_modifiers;
+ if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1385,9 +1507,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
intel_plane->pipe = pipe;
- intel_plane->plane = plane;
+ intel_plane->i9xx_plane = plane;
intel_plane->id = PLANE_SPRITE0 + plane;
- intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
+ intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
intel_plane->check_plane = intel_check_sprite_plane;
possible_crtcs = (1 << pipe);
@@ -1413,6 +1535,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0,
supported_rotations);
+ drm_plane_create_color_properties(&intel_plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
return intel_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index a79a759..885fc38 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -43,7 +43,6 @@ enum tv_margin {
TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
};
-/** Private structure for the integrated TV support */
struct intel_tv {
struct intel_encoder base;
@@ -370,12 +369,11 @@ struct tv_mode {
* The constants below were all computed using a 107.520MHz clock
*/
-/**
+/*
* Register programming values for TV modes.
*
* These values account for -1s required.
*/
-
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
@@ -822,7 +820,7 @@ intel_enable_tv(struct intel_encoder *encoder,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(dev_priv,
- to_intel_crtc(encoder->base.crtc)->pipe);
+ to_intel_crtc(pipe_config->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -868,6 +866,8 @@ static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
+
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
@@ -980,7 +980,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
u32 tv_ctl;
@@ -1124,14 +1124,6 @@ static const struct drm_display_mode reported_modes[] = {
},
};
-/**
- * Detects TV presence by checking for load.
- *
- * Requires that the current pipe's DPLL is active.
-
- * \return true if TV is connected.
- * \return false if TV is disconnected.
- */
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
@@ -1257,12 +1249,6 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
connector->state->tv.mode = i;
}
-/**
- * Detect the TV connection.
- *
- * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
- * we have a pipe programmed in order to probe the TV.
- */
static int
intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
@@ -1337,13 +1323,6 @@ intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
}
}
-/**
- * Stub get_modes function.
- *
- * This should probably return a set of fixed modes, unless we can figure out
- * how to probe modes off of TV connections.
- */
-
static int
intel_tv_get_modes(struct drm_connector *connector)
{
@@ -1510,7 +1489,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
connector = &intel_connector->base;
state = connector->state;
- /* The documentation, for the older chipsets at least, recommend
+ /*
+ * The documentation, for the older chipsets at least, recommend
* using a polling method rather than hotplug detection for TVs.
* This is because in order to perform the hotplug detection, the PLLs
* for the TV must be kept alive increasing power drain and starving
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 25bd162..e5bf0d3 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -23,8 +23,11 @@
*/
#include "intel_uc.h"
+#include "intel_guc_submission.h"
+#include "intel_guc.h"
#include "i915_drv.h"
-#include "i915_guc_submission.h"
+
+static void guc_free_load_err_log(struct intel_guc *guc);
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
@@ -33,9 +36,9 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
int ret;
u32 guc_status;
- ret = intel_guc_reset(dev_priv);
+ ret = intel_reset_guc(dev_priv);
if (ret) {
- DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
return ret;
}
@@ -47,55 +50,143 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
+static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ int enable_guc = 0;
+
+ /* Default is to enable GuC/HuC if we know their firmwares */
+ if (intel_uc_fw_is_selected(guc_fw))
+ enable_guc |= ENABLE_GUC_SUBMISSION;
+ if (intel_uc_fw_is_selected(huc_fw))
+ enable_guc |= ENABLE_GUC_LOAD_HUC;
+
+ /* Any platform specific fine-tuning can be done here */
+
+ return enable_guc;
+}
+
+static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+{
+ int guc_log_level = 0; /* disabled */
+
+ /* Enable if we're running on platform with GuC and debug config */
+ if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() &&
+ (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)))
+ guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
+
+ /* Any platform specific fine-tuning can be done here */
+
+ return guc_log_level;
+}
+
+/**
+ * intel_uc_sanitize_options - sanitize uC related modparam options
+ * @dev_priv: device private
+ *
+ * In case of "enable_guc" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)". Default value for this
+ * modparam varies between platforms and it is hardcoded in driver code.
+ * Any other modparam value is only monitored against availability of the
+ * related hardware or firmware definitions.
+ *
+ * In case of "guc_log_level" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)" or if initial value was
+ * "enable(1..4)" on platforms without the GuC. Default value for this
+ * modparam varies between platforms and is usually set to "disable(0)"
+ * unless GuC is enabled on given platform and the driver is compiled with
+ * debug config when this modparam will default to "enable(1..4)".
+ */
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
{
- if (!HAS_GUC(dev_priv)) {
- if (i915_modparams.enable_guc_loading > 0 ||
- i915_modparams.enable_guc_submission > 0)
- DRM_INFO("Ignoring GuC options, no hardware\n");
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- i915_modparams.enable_guc_loading = 0;
- i915_modparams.enable_guc_submission = 0;
- return;
+ /* A negative value means "use platform default" */
+ if (i915_modparams.enable_guc < 0)
+ i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+
+ DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_is_using_guc_submission()),
+ yesno(intel_uc_is_using_huc()));
+
+ /* Verify GuC firmware availability */
+ if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "enable_guc", i915_modparams.enable_guc,
+ !HAS_GUC(dev_priv) ? "no GuC hardware" :
+ "no GuC firmware");
}
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_loading < 0)
- i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ /* Verify HuC firmware availability */
+ if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "enable_guc", i915_modparams.enable_guc,
+ !HAS_HUC(dev_priv) ? "no HuC hardware" :
+ "no HuC firmware");
+ }
- /* Verify firmware version */
- if (i915_modparams.enable_guc_loading) {
- if (HAS_HUC_UCODE(dev_priv))
- intel_huc_select_fw(&dev_priv->huc);
+ /* A negative value means "use platform/config default" */
+ if (i915_modparams.guc_log_level < 0)
+ i915_modparams.guc_log_level =
+ __get_default_guc_log_level(dev_priv);
+
+ if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915_modparams.guc_log_level,
+ !HAS_GUC(dev_priv) ? "no GuC hardware" :
+ "GuC not enabled");
+ i915_modparams.guc_log_level = 0;
+ }
- if (intel_guc_fw_select(&dev_priv->guc))
- i915_modparams.enable_guc_loading = 0;
+ if (i915_modparams.guc_log_level > 1 + GUC_LOG_VERBOSITY_MAX) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915_modparams.guc_log_level,
+ "verbosity too high");
+ i915_modparams.guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX;
}
- /* Can't enable guc submission without guc loaded */
- if (!i915_modparams.enable_guc_loading)
- i915_modparams.enable_guc_submission = 0;
+ DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s verbosity:%d)\n",
+ i915_modparams.guc_log_level,
+ yesno(i915_modparams.guc_log_level),
+ i915_modparams.guc_log_level - 1);
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_submission < 0)
- i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ /* Make sure that sanitization was done */
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ GEM_BUG_ON(i915_modparams.guc_log_level < 0);
}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
intel_guc_init_early(&dev_priv->guc);
+ intel_huc_init_early(&dev_priv->huc);
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ if (!USES_GUC(dev_priv))
+ return;
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+
intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
}
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
+ if (!USES_GUC(dev_priv))
+ return;
+
intel_uc_fw_fini(&dev_priv->guc.fw);
- intel_uc_fw_fini(&dev_priv->huc.fw);
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fini(&dev_priv->huc.fw);
+
+ guc_free_load_err_log(&dev_priv->guc);
}
/**
@@ -113,7 +204,7 @@ void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || i915_modparams.guc_log_level < 0)
+ if (!guc->log.vma || !i915_modparams.guc_log_level)
return;
if (!guc->load_err_log)
@@ -149,30 +240,105 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- int ret, attempts;
+ int ret;
- if (!i915_modparams.enable_guc_loading)
+ if (!USES_GUC(dev_priv))
return 0;
- guc_disable_communication(guc);
- gen9_reset_guc_interrupts(dev_priv);
+ ret = intel_guc_init_wq(guc);
+ if (ret) {
+ DRM_ERROR("Couldn't allocate workqueues for GuC\n");
+ goto err;
+ }
+
+ ret = intel_guc_log_relay_create(guc);
+ if (ret) {
+ DRM_ERROR("Couldn't allocate relay for GuC log\n");
+ goto err_relay;
+ }
+
+ return 0;
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
+err_relay:
+ intel_guc_fini_wq(guc);
+err:
+ return ret;
+}
+
+void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ if (!USES_GUC(dev_priv))
+ return;
+
+ intel_guc_fini_wq(guc);
+
+ intel_guc_log_relay_destroy(guc);
+}
+
+int intel_uc_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ int ret;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
+ ret = intel_guc_init(guc);
+ if (ret)
+ return ret;
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
- ret = i915_guc_submission_init(dev_priv);
- if (ret)
- goto err_guc;
+ ret = intel_guc_submission_init(guc);
+ if (ret) {
+ intel_guc_fini(guc);
+ return ret;
+ }
}
+ return 0;
+}
+
+void intel_uc_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ if (!USES_GUC(dev_priv))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ if (USES_GUC_SUBMISSION(dev_priv))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_fini(guc);
+}
+
+int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct intel_huc *huc = &dev_priv->huc;
+ int ret, attempts;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ guc_disable_communication(guc);
+ gen9_reset_guc_interrupts(dev_priv);
+
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET,
@@ -192,9 +358,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
*/
ret = __intel_uc_reset_hw(dev_priv);
if (ret)
- goto err_submission;
+ goto err_out;
+
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_fw_upload(huc);
+ if (ret)
+ goto err_out;
+ }
- intel_huc_init_hw(&dev_priv->huc);
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN)
@@ -212,79 +383,110 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- intel_huc_auth(&dev_priv->huc);
- if (i915_modparams.enable_guc_submission) {
- if (i915_modparams.guc_log_level >= 0)
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_auth(huc);
+ if (ret)
+ goto err_communication;
+ }
+
+ if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (i915_modparams.guc_log_level)
gen9_enable_guc_interrupts(dev_priv);
- ret = i915_guc_submission_enable(dev_priv);
+ ret = intel_guc_submission_enable(guc);
if (ret)
goto err_interrupts;
}
- dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
- i915_modparams.enable_guc_submission ? "submission enabled" :
- "loaded",
- guc->fw.path,
+ dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
+ dev_info(dev_priv->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
+ dev_info(dev_priv->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(dev_priv)));
return 0;
/*
* We've failed to load the firmware :(
- *
- * Decide whether to disable GuC submission and fall back to
- * execlist mode, and whether to hide the error by returning
- * zero or to return -EIO, which the caller will treat as a
- * nonfatal error (i.e. it doesn't prevent driver load, but
- * marks the GPU as wedged until reset).
*/
err_interrupts:
- guc_disable_communication(guc);
gen9_disable_guc_interrupts(dev_priv);
+err_communication:
+ guc_disable_communication(guc);
err_log_capture:
guc_capture_load_err_log(guc);
-err_submission:
- if (i915_modparams.enable_guc_submission)
- i915_guc_submission_fini(dev_priv);
-err_guc:
- i915_ggtt_disable_guc(dev_priv);
-
- if (i915_modparams.enable_guc_loading > 1 ||
- i915_modparams.enable_guc_submission > 1) {
- DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
- ret = -EIO;
- } else {
- DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
- ret = 0;
- }
-
- if (i915_modparams.enable_guc_submission) {
- i915_modparams.enable_guc_submission = 0;
- DRM_NOTE("Falling back from GuC submission to execlist mode\n");
- }
-
- i915_modparams.enable_guc_loading = 0;
+err_out:
+ /*
+ * Note that there is no fallback as either user explicitly asked for
+ * the GuC or driver default option was to run with the GuC enabled.
+ */
+ if (GEM_WARN_ON(ret == -EIO))
+ ret = -EINVAL;
+ dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
- guc_free_load_err_log(&dev_priv->guc);
+ struct intel_guc *guc = &dev_priv->guc;
- if (!i915_modparams.enable_guc_loading)
+ if (!USES_GUC(dev_priv))
return;
- if (i915_modparams.enable_guc_submission)
- i915_guc_submission_disable(dev_priv);
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ if (USES_GUC_SUBMISSION(dev_priv))
+ intel_guc_submission_disable(guc);
- guc_disable_communication(&dev_priv->guc);
+ guc_disable_communication(guc);
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv))
gen9_disable_guc_interrupts(dev_priv);
- i915_guc_submission_fini(dev_priv);
+}
+
+int intel_uc_suspend(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ int err;
+
+ if (!USES_GUC(i915))
+ return 0;
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ err = intel_guc_suspend(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+ return err;
}
- i915_ggtt_disable_guc(dev_priv);
+ gen9_disable_guc_interrupts(i915);
+
+ return 0;
+}
+
+int intel_uc_resume(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ int err;
+
+ if (!USES_GUC(i915))
+ return 0;
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ if (i915_modparams.guc_log_level)
+ gen9_enable_guc_interrupts(i915);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index e18d3bb..f76d51d 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -26,13 +26,38 @@
#include "intel_guc.h"
#include "intel_huc.h"
+#include "i915_params.h"
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
+int intel_uc_init_misc(struct drm_i915_private *dev_priv);
+void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
+int intel_uc_init(struct drm_i915_private *dev_priv);
+void intel_uc_fini(struct drm_i915_private *dev_priv);
+int intel_uc_suspend(struct drm_i915_private *dev_priv);
+int intel_uc_resume(struct drm_i915_private *dev_priv);
+
+static inline bool intel_uc_is_using_guc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc > 0;
+}
+
+static inline bool intel_uc_is_using_guc_submission(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+static inline bool intel_uc_is_using_huc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
+}
#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 973888e..3ec0ce5 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -105,7 +105,7 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
}
/* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
err = -ENOEXEC;
@@ -197,11 +197,12 @@ fail:
/**
* intel_uc_fw_upload - load uC firmware using custom loader
- *
* @uc_fw: uC firmware
- * @loader: custom uC firmware loader function
+ * @xfer: custom uC firmware loader function
*
* Loads uC firmware using custom loader and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
@@ -214,7 +215,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -EIO;
+ return -ENOEXEC;
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -299,7 +300,7 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
*
* Pretty printer for uC firmware.
*/
-void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 1329036..d5fd460 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -110,12 +110,17 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
uc_fw->type = type;
}
+static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->path != NULL;
+}
+
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
struct i915_vma *vma));
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8c2ce81..4df7c2ef 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -37,6 +37,12 @@ static const char * const forcewake_domain_names[] = {
"render",
"blitter",
"media",
+ "vdbox0",
+ "vdbox1",
+ "vdbox2",
+ "vdbox3",
+ "vebox0",
+ "vebox1",
};
const char *
@@ -69,17 +75,104 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
HRTIMER_MODE_REL);
}
+static inline int
+__wait_for_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack,
+ const u32 value)
+{
+ return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ FORCEWAKE_ACK_TIMEOUT_MS);
+}
+
+static inline int
+wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, 0);
+}
+
+static inline int
+wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const u32 ack)
+{
+ return __wait_for_ack(i915, d, ack, ack);
+}
+
static inline void
fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
+enum ack_type {
+ ACK_CLEAR = 0,
+ ACK_SET
+};
+
+static int
+fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d,
+ const enum ack_type type)
+{
+ const u32 ack_bit = FORCEWAKE_KERNEL;
+ const u32 value = type == ACK_SET ? ack_bit : 0;
+ unsigned int pass;
+ bool ack_detected;
+
+ /*
+ * There is a possibility of driver's wake request colliding
+ * with hardware's own wake requests and that can cause
+ * hardware to not deliver the driver's ack message.
+ *
+ * Use a fallback bit toggle to kick the gpu state machine
+ * in the hope that the original ack will be delivered along with
+ * the fallback ack.
+ *
+ * This workaround is described in HSDES #1604254524
+ */
+
+ pass = 1;
+ do {
+ wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ /* Give gt some time to relax before the polling frenzy */
+ udelay(10 * pass);
+ wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+
+ ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+
+ __raw_i915_write32(i915, d->reg_set,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ } while (!ack_detected && pass++ < 10);
+
+ DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
+ intel_uncore_forcewake_domain_to_str(d->id),
+ type == ACK_SET ? "set" : "clear",
+ __raw_i915_read32(i915, d->reg_ack),
+ pass);
+
+ return ack_detected ? 0 : -ETIMEDOUT;
+}
+
+static inline void
+fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(i915, d);
+}
+
static inline void
fw_domain_get(struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
@@ -88,17 +181,26 @@ fw_domain_get(struct drm_i915_private *i915,
}
static inline void
-fw_domain_wait_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
+ if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
+fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
+{
+ if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ return;
+
+ if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
+ fw_domain_wait_ack_set(i915, d);
+}
+
+static inline void
fw_domain_put(const struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
@@ -119,7 +221,27 @@ fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
}
for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack(i915, d);
+ fw_domain_wait_ack_set(i915, d);
+
+ i915->uncore.fw_domains_active |= fw_domains;
+}
+
+static void
+fw_domains_get_with_fallback(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear_fallback(i915, d);
+ fw_domain_get(i915, d);
+ }
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack_set_fallback(i915, d);
i915->uncore.fw_domains_active |= fw_domains;
}
@@ -229,6 +351,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
bool restore)
{
@@ -237,6 +360,8 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
int retry_count = 100;
enum forcewake_domains fw, active_domains;
+ iosf_mbi_assert_punit_acquired();
+
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly. Wait until all pending
* timers are run before holding.
@@ -416,14 +541,18 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
+ iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+ iosf_mbi_punit_release();
}
void intel_uncore_suspend(struct drm_i915_private *dev_priv)
{
- iosf_mbi_unregister_pmic_bus_access_notifier(
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
}
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
@@ -442,9 +571,6 @@ void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915_modparams.enable_rc6 =
- sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
-
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_sanitize_gt_powersave(dev_priv);
}
@@ -654,6 +780,9 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv,
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
+#define GEN11_NEEDS_FORCE_WAKE(reg) \
+ ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
+
#define __gen6_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
@@ -706,6 +835,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
if (!entry)
return 0;
+ /*
+ * The list of FW domains depends on the SKU in gen11+ so we
+ * can't determine it statically. We use FORCEWAKE_ALL and
+ * translate it here to the list of available domains.
+ */
+ if (entry->domains == FORCEWAKE_ALL)
+ return dev_priv->uncore.fw_domains;
+
WARN(entry->domains & ~dev_priv->uncore.fw_domains,
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
entry->domains & ~dev_priv->uncore.fw_domains, offset);
@@ -740,6 +877,14 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
__fwd; \
})
+#define __gen11_fwtable_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (GEN11_NEEDS_FORCE_WAKE((offset))) \
+ __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd; \
+})
+
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
@@ -751,6 +896,20 @@ static const i915_reg_t gen8_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen11_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -763,14 +922,17 @@ static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
return 0;
}
-static bool is_gen8_shadowed(u32 offset)
-{
- const i915_reg_t *regs = gen8_shadowed_regs;
-
- return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
- mmio_reg_cmp);
+#define __is_genX_shadowed(x) \
+static bool is_gen##x##_shadowed(u32 offset) \
+{ \
+ const i915_reg_t *regs = gen##x##_shadowed_regs; \
+ return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
+ mmio_reg_cmp); \
}
+__is_genX_shadowed(8)
+__is_genX_shadowed(11)
+
#define __gen8_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
@@ -809,6 +971,14 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
__fwd; \
})
+#define __gen11_fwtable_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
+ __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd; \
+})
+
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
@@ -845,6 +1015,40 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen11_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -975,7 +1179,12 @@ func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) {
}
#define __gen6_read(x) __gen_read(gen6, x)
#define __fwtable_read(x) __gen_read(fwtable, x)
+#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
+__gen11_fwtable_read(8)
+__gen11_fwtable_read(16)
+__gen11_fwtable_read(32)
+__gen11_fwtable_read(64)
__fwtable_read(8)
__fwtable_read(16)
__fwtable_read(32)
@@ -985,6 +1194,7 @@ __gen6_read(16)
__gen6_read(32)
__gen6_read(64)
+#undef __gen11_fwtable_read
#undef __fwtable_read
#undef __gen6_read
#undef GEN6_READ_FOOTER
@@ -1061,7 +1271,11 @@ func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, boo
}
#define __gen8_write(x) __gen_write(gen8, x)
#define __fwtable_write(x) __gen_write(fwtable, x)
+#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
+__gen11_fwtable_write(8)
+__gen11_fwtable_write(16)
+__gen11_fwtable_write(32)
__fwtable_write(8)
__fwtable_write(16)
__fwtable_write(32)
@@ -1072,6 +1286,7 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
+#undef __gen11_fwtable_write
#undef __fwtable_write
#undef __gen8_write
#undef __gen6_write
@@ -1120,6 +1335,13 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
+
d->mask = BIT(domain_id);
@@ -1147,7 +1369,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ int i;
+
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1156,6 +1380,32 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ continue;
+
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+ FORCEWAKE_MEDIA_VDBOX_GEN11(i),
+ FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
+ }
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ continue;
+
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+ FORCEWAKE_MEDIA_VEBOX_GEN11(i),
+ FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
+ }
+ } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_fallback;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER_GEN9,
+ FORCEWAKE_ACK_RENDER_GEN9);
+ fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ FORCEWAKE_BLITTER_GEN9,
+ FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -1301,26 +1551,30 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else {
+ } else if (IS_GEN(dev_priv, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
}
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
-
- i915_check_and_clear_faults(dev_priv);
}
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
- iosf_mbi_unregister_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
-
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev_priv);
+
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ &dev_priv->uncore.pmic_bus_access_nb);
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
}
static const struct reg_whitelist {
@@ -1331,7 +1585,7 @@ static const struct reg_whitelist {
} reg_read_whitelist[] = { {
.offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .gen_mask = INTEL_GEN_MASK(4, 10),
+ .gen_mask = INTEL_GEN_MASK(4, 11),
.size = 8
} };
@@ -1400,9 +1654,15 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
engine->name);
- I915_WRITE_FW(RING_CTL(base), 0);
+ I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+ POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+
I915_WRITE_FW(RING_HEAD(base), 0);
I915_WRITE_FW(RING_TAIL(base), 0);
+ POSTING_READ_FW(RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_FW(RING_CTL(base), 0);
/* Check acts as a post */
if (I915_READ_FW(RING_HEAD(base)) != 0)
@@ -1423,24 +1683,31 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
gen3_stop_engine(engine);
}
-static bool i915_reset_complete(struct pci_dev *pdev)
+static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_STATUS) == 0;
+ return gdrst & GRDOM_RESET_STATUS;
}
static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
+ int err;
- /* assert reset for at least 20 usec */
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
usleep_range(50, 200);
+ err = wait_for(i915_in_reset(pdev), 500);
+
+ /* Clear the reset request. */
pci_write_config_byte(pdev, I915_GDRST, 0);
+ usleep_range(50, 200);
+ if (!err)
+ err = wait_for(!i915_in_reset(pdev), 500);
- return wait_for(i915_reset_complete(pdev), 500);
+ return err;
}
static bool g4x_reset_complete(struct pci_dev *pdev)
@@ -1642,12 +1909,14 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
}
/**
- * intel_wait_for_register - wait until register matches expected state
+ * __intel_wait_for_register - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
- * @timeout_ms: timeout in millisecond
+ * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
+ * @slow_timeout_ms: slow timeout in millisecond
+ * @out_value: optional placeholder to hold registry value
*
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
@@ -1658,14 +1927,17 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms)
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
{
unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ u32 reg_value;
int ret;
might_sleep();
@@ -1675,14 +1947,18 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv,
ret = __intel_wait_for_register_fw(dev_priv,
reg, mask, value,
- 2, 0, NULL);
+ fast_timeout_us, 0, &reg_value);
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irq(&dev_priv->uncore.lock);
if (ret)
- ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
- timeout_ms);
+ ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+ (reg_value & mask) == value,
+ slow_timeout_ms * 1000, 10, 1000);
+
+ if (out_value)
+ *out_value = reg_value;
return ret;
}
@@ -1740,9 +2016,9 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
if (!i915_modparams.reset)
return NULL;
- if (INTEL_INFO(dev_priv)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return gen8_reset_engines;
- else if (INTEL_INFO(dev_priv)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return gen6_reset_engines;
else if (IS_GEN5(dev_priv))
return ironlake_do_reset;
@@ -1750,7 +2026,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
return g4x_do_reset;
else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
return g33_do_reset;
- else if (INTEL_INFO(dev_priv)->gen >= 3)
+ else if (INTEL_GEN(dev_priv) >= 3)
return i915_do_reset;
else
return NULL;
@@ -1801,23 +2077,17 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
return intel_get_gpu_reset(dev_priv) != NULL;
}
-/*
- * When GuC submission is enabled, GuC manages ELSP and can initiate the
- * engine reset too. For now, fall back to full GPU reset if it is enabled.
- */
bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (dev_priv->info.has_reset_engine &&
- !dev_priv->guc.execbuf_client &&
i915_modparams.reset >= 2);
}
-int intel_guc_reset(struct drm_i915_private *dev_priv)
+int intel_reset_guc(struct drm_i915_private *dev_priv)
{
int ret;
- if (!HAS_GUC(dev_priv))
- return -EINVAL;
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
@@ -1857,7 +2127,9 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (HAS_FWTABLE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
+ } else if (HAS_FWTABLE(dev_priv)) {
fw_domains = __fwtable_reg_read_fw_domains(offset);
} else if (INTEL_GEN(dev_priv) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(offset);
@@ -1878,7 +2150,9 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
+ } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
fw_domains = __fwtable_reg_write_fw_domains(offset);
} else if (IS_GEN8(dev_priv)) {
fw_domains = __gen8_reg_write_fw_domains(offset);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 9ce079b..dfdf444 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -37,17 +37,28 @@ enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
FW_DOMAIN_ID_MEDIA,
+ FW_DOMAIN_ID_MEDIA_VDBOX0,
+ FW_DOMAIN_ID_MEDIA_VDBOX1,
+ FW_DOMAIN_ID_MEDIA_VDBOX2,
+ FW_DOMAIN_ID_MEDIA_VDBOX3,
+ FW_DOMAIN_ID_MEDIA_VEBOX0,
+ FW_DOMAIN_ID_MEDIA_VEBOX1,
FW_DOMAIN_ID_COUNT
};
enum forcewake_domains {
- FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
- FORCEWAKE_ALL = (FORCEWAKE_RENDER |
- FORCEWAKE_BLITTER |
- FORCEWAKE_MEDIA)
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
+ FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
+ FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
+ FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
+ FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
+ FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
+
+ FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1
};
struct intel_uncore_funcs {
@@ -163,11 +174,23 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value);
+static inline
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
u32 value,
- unsigned int timeout_ms);
+ unsigned int timeout_ms)
+{
+ return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+ timeout_ms, NULL);
+}
int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
@@ -186,4 +209,9 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
2, timeout_ms, NULL);
}
+#define raw_reg_read(base, reg) \
+ readl(base + i915_mmio_reg_offset(reg))
+#define raw_reg_write(base, reg, value) \
+ writel(value, base + i915_mmio_reg_offset(reg))
+
#endif /* !__INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index f225c28..4584682 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -227,7 +227,7 @@ struct bdb_general_features {
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_LVDS_SIGNALING (1 << 5)
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
@@ -243,7 +243,7 @@ struct bdb_general_features {
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_LVDS_SIGNALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
@@ -253,7 +253,7 @@ struct bdb_general_features {
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_LVDS_SIGNALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
@@ -299,11 +299,17 @@ struct bdb_general_features {
#define DVO_PORT_DPA 10
#define DVO_PORT_DPE 11 /* 193 */
#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_DPF 13 /* N/A */
+#define DVO_PORT_HDMIF 14 /* N/A */
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
#define DVO_PORT_MIPID 24 /* 171 */
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
+
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
/* DDC Bus DDI Type 155+ */
@@ -314,6 +320,11 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_F,
};
+#define VBT_DP_MAX_LINK_RATE_HBR3 0
+#define VBT_DP_MAX_LINK_RATE_HBR2 1
+#define VBT_DP_MAX_LINK_RATE_HBR 2
+#define VBT_DP_MAX_LINK_RATE_LBR 3
+
/*
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
@@ -342,8 +353,8 @@ struct child_device_config {
u8 i2c_speed;
u8 dp_onboard_redriver; /* 158 */
u8 dp_ondock_redriver; /* 158 */
- u8 hdmi_level_shifter_value:4; /* 169 */
- u8 hdmi_max_data_rate:4; /* 204 */
+ u8 hdmi_level_shifter_value:5; /* 169 */
+ u8 hdmi_max_data_rate:3; /* 204 */
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
@@ -408,6 +419,8 @@ struct child_device_config {
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */
+ u8 dp_max_link_rate:2; /* 216 CNL+ */
+ u8 dp_max_link_rate_reserved:6; /* 216 */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index a2632df..391f3d9 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -129,8 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops);
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size;
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5cc8101..05bbef3 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -27,6 +27,7 @@
#include <linux/prime_numbers.h>
#include "mock_drm.h"
+#include "i915_random.h"
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
@@ -177,8 +178,8 @@ huge_pages_object(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
obj->mm.page_mask = page_mask;
@@ -328,8 +329,8 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
else
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
return obj;
@@ -963,7 +964,7 @@ static int gpu_write(struct i915_vma *vma,
u32 dword,
u32 value)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *batch;
int flags = 0;
int err;
@@ -974,7 +975,7 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -989,17 +990,9 @@ static int gpu_write(struct i915_vma *vma,
i915_vma_unpin(batch);
i915_vma_close(batch);
- err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
- if (err)
- goto err_request;
-
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
if (err)
goto err_request;
@@ -1010,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma,
reservation_object_unlock(vma->resv);
err_request:
- __i915_add_request(rq, err == 0);
+ __i915_request_add(rq, err == 0);
return err;
}
@@ -1047,19 +1040,78 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
+static int __igt_write_huge(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ u64 size, u64 offset,
+ u32 dword, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ struct i915_vma *vma;
+ int err;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_vma_close;
+
+ err = i915_vma_pin(vma, size, 0, flags | offset);
+ if (err) {
+ /*
+ * The ggtt may have some pages reserved so
+ * refrain from erroring out.
+ */
+ if (err == -ENOSPC && i915_is_ggtt(vm))
+ err = 0;
+
+ goto out_vma_close;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ err = gpu_write(vma, ctx, engine, dword, val);
+ if (err) {
+ pr_err("gpu-write failed at offset=%llx\n", offset);
+ goto out_vma_unpin;
+ }
+
+ err = cpu_check(obj, dword, val);
+ if (err) {
+ pr_err("cpu-check failed at offset=%llx\n", offset);
+ goto out_vma_unpin;
+ }
+
+out_vma_unpin:
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+
+ return err;
+}
+
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
unsigned int max_page_size;
unsigned int id;
u64 max;
u64 num;
u64 size;
+ int *order;
+ int i, n;
int err = 0;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -1071,78 +1123,56 @@ static int igt_write_huge(struct i915_gem_context *ctx,
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64((vm->total - size), max_page_size);
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
+ n = 0;
for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
-
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ pr_info("store-dword-imm not supported on engine=%u\n", id);
continue;
}
+ engines[n++] = engine;
+ }
- /*
- * Try various offsets until we timeout -- we want to avoid
- * issues hidden by effectively always using offset = 0.
- */
- for_each_prime_number_from(num, 0, max) {
- u64 offset = num * max_page_size;
- u32 dword;
-
- err = i915_vma_unbind(vma);
- if (err)
- goto out_vma_close;
-
- err = i915_vma_pin(vma, size, max_page_size, flags | offset);
- if (err) {
- /*
- * The ggtt may have some pages reserved so
- * refrain from erroring out.
- */
- if (err == -ENOSPC && i915_is_ggtt(vm)) {
- err = 0;
- continue;
- }
-
- goto out_vma_close;
- }
+ if (!n)
+ return 0;
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_vma_unpin;
+ /*
+ * To keep things interesting when alternating between engines in our
+ * randomized order, lets also make feeding to the same engine a few
+ * times in succession a possibility by enlarging the permutation array.
+ */
+ order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ if (!order)
+ return -ENOMEM;
- dword = offset_in_page(num) / 4;
+ /*
+ * Try various offsets in an ascending/descending fashion until we
+ * timeout -- we want to avoid issues hidden by effectively always using
+ * offset = 0.
+ */
+ i = 0;
+ for_each_prime_number_from(num, 0, max) {
+ u64 offset_low = num * max_page_size;
+ u64 offset_high = (max - num) * max_page_size;
+ u32 dword = offset_in_page(num) / 4;
- err = gpu_write(vma, ctx, engine, dword, num + 1);
- if (err) {
- pr_err("gpu-write failed at offset=%llx", offset);
- goto out_vma_unpin;
- }
+ engine = engines[order[i] % n];
+ i = (i + 1) % (n * I915_NUM_ENGINES);
- err = cpu_check(obj, dword, num + 1);
- if (err) {
- pr_err("cpu-check failed at offset=%llx", offset);
- goto out_vma_unpin;
- }
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ if (err)
+ break;
- i915_vma_unpin(vma);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ if (err)
+ break;
- if (num > 0 &&
- igt_timeout(end_time,
- "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
- __func__, id, offset, max_page_size))
- break;
- }
+ if (igt_timeout(end_time,
+ "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, engine->id, offset_low, offset_high, max_page_size))
+ break;
}
-out_vma_unpin:
- if (i915_vma_is_pinned(vma))
- i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_close(vma);
+ kfree(order);
return err;
}
@@ -1159,6 +1189,9 @@ static int igt_ppgtt_exhaust_huge(void *arg)
int n, i;
int err = -ENODEV;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
* Sanity check creating objects with a varying mix of page sizes --
* ensuring that our writes lands in the right place.
@@ -1604,7 +1637,7 @@ static int igt_shrink_thp(void *arg)
* shmem to truncate our pages.
*/
i915_gem_shrink_all(i915);
- if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
goto out_close;
@@ -1716,6 +1749,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1726,6 +1760,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 35d778d..340a98c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -33,7 +33,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- typeof(v) *map;
+ u32 *map;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
@@ -59,7 +59,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- typeof(v) map;
+ u32 *map;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
@@ -82,7 +82,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
u32 v)
{
struct i915_vma *vma;
- typeof(v) *map;
+ u32 __iomem *map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -98,7 +98,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
if (IS_ERR(map))
return PTR_ERR(map);
- map[offset / sizeof(*map)] = v;
+ iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
@@ -109,7 +109,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
u32 *v)
{
struct i915_vma *vma;
- typeof(v) map;
+ u32 __iomem *map;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -125,7 +125,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
if (IS_ERR(map))
return PTR_ERR(map);
- *v = map[offset / sizeof(*map)];
+ *v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
@@ -135,7 +135,7 @@ static int wc_set(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 v)
{
- typeof(v) *map;
+ u32 *map;
int err;
err = i915_gem_object_set_to_wc_domain(obj, true);
@@ -156,7 +156,7 @@ static int wc_get(struct drm_i915_gem_object *obj,
unsigned long offset,
u32 *v)
{
- typeof(v) map;
+ u32 *map;
int err;
err = i915_gem_object_set_to_wc_domain(obj, false);
@@ -178,7 +178,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
u32 v)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
@@ -191,7 +191,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
i915_vma_unpin(vma);
return PTR_ERR(cs);
}
@@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index def5052..7ecaed5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -114,7 +114,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
unsigned int flags;
@@ -152,20 +152,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto err_request;
-
- err = i915_switch_context(rq);
- if (err)
- goto err_request;
-
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -188,12 +180,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
reservation_object_add_excl_fence(obj->resv, &rq->fence);
reservation_object_unlock(obj->resv);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
return 0;
err_request:
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
err_batch:
i915_vma_unpin(batch);
err_vma:
@@ -223,8 +215,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
}
i915_gem_obj_finish_shmem_access(obj);
- obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = 0;
+ obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->write_domain = 0;
return 0;
}
@@ -272,6 +264,23 @@ out_unmap:
return err;
}
+static int file_add_object(struct drm_file *file,
+ struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ GEM_BUG_ON(obj->base.handle_count);
+
+ /* tie the object to the drm_file for easy reaping */
+ err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ i915_gem_object_get(obj);
+ obj->base.handle_count++;
+ return 0;
+}
+
static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context *ctx,
struct drm_file *file,
@@ -281,7 +290,6 @@ create_test_object(struct i915_gem_context *ctx,
struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
u64 size;
- u32 handle;
int err;
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
@@ -291,8 +299,7 @@ create_test_object(struct i915_gem_context *ctx,
if (IS_ERR(obj))
return obj;
- /* tie the handle to the drm_file for easy reaping */
- err = drm_gem_handle_create(file, &obj->base, &handle);
+ err = file_add_object(file, obj);
i915_gem_object_put(obj);
if (err)
return ERR_PTR(err);
@@ -325,7 +332,7 @@ static int igt_ctx_exec(void *arg)
LIST_HEAD(objects);
unsigned long ncontexts, ndwords, dw;
bool first_shared_gtt = true;
- int err;
+ int err = -ENODEV;
/* Create a few different contexts (with different mm) and write
* through each ctx/mm using the GPU making sure those writes end
@@ -369,7 +376,9 @@ static int igt_ctx_exec(void *arg)
}
}
+ intel_runtime_pm_get(i915);
err = gpu_fill(obj, ctx, engine, dw);
+ intel_runtime_pm_put(i915);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index f463105..ab9d7be 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -355,6 +355,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -406,7 +407,7 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
@@ -415,7 +416,7 @@ static int igt_evict_contexts(void *arg)
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, ctx);
igt_evict_ctl.fail_if_busy = false;
if (IS_ERR(rq)) {
@@ -436,7 +437,7 @@ static int igt_evict_contexts(void *arg)
if (err < 0)
break;
- i915_add_request(rq);
+ i915_request_add(rq);
count++;
err = 0;
} while(1);
@@ -463,6 +464,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9da0c9f..f7dc926 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -113,8 +113,8 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &fake_ops);
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
@@ -216,13 +216,21 @@ static int lowlevel_hole(struct drm_i915_private *i915,
hole_size = (hole_end - hole_start) >> size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
- count = hole_size;
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
do {
- count >>= 1;
order = i915_random_order(count, &prng);
- } while (!order && count);
- if (!order)
- break;
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count)
+ return -ENOMEM;
+ GEM_BUG_ON(!order);
GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
@@ -267,7 +275,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
+ intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
+ intel_runtime_pm_put(i915);
}
count = n;
@@ -697,18 +707,26 @@ static int drunk_hole(struct drm_i915_private *i915,
unsigned int *order, count, n;
struct i915_vma *vma;
u64 hole_size;
- int err;
+ int err = -ENODEV;
hole_size = (hole_end - hole_start) >> size;
if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
- count = hole_size;
+ count = hole_size >> 1;
+ if (!count) {
+ pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
+ __func__, hole_start, hole_end, size, hole_size);
+ break;
+ }
+
do {
- count >>= 1;
order = i915_random_order(count, &prng);
- } while (!order && count);
- if (!order)
- break;
+ if (order)
+ break;
+ } while (count >>= 1);
+ if (!count)
+ return -ENOMEM;
+ GEM_BUG_ON(!order);
/* Ignore allocation failures (i.e. don't report them as
* a test failure) as we are purposefully allocating very
@@ -867,6 +885,84 @@ static int shrink_hole(struct drm_i915_private *i915,
return err;
}
+static int shrink_boom(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ unsigned int sizes[] = { SZ_2M, SZ_1G };
+ struct drm_i915_gem_object *purge;
+ struct drm_i915_gem_object *explode;
+ int err;
+ int i;
+
+ /*
+ * Catch the case which shrink_hole seems to miss. The setup here
+ * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
+ * ensuring that all vma assiocated with the respective pd/pdp are
+ * unpinned at the time.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int size = sizes[i];
+ struct i915_vma *vma;
+
+ purge = fake_dma_object(i915, size);
+ if (IS_ERR(purge))
+ return PTR_ERR(purge);
+
+ vma = i915_vma_instance(purge, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_purge;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto err_purge;
+
+ /* Should now be ripe for purging */
+ i915_vma_unpin(vma);
+
+ explode = fake_dma_object(i915, size);
+ if (IS_ERR(explode)) {
+ err = PTR_ERR(explode);
+ goto err_purge;
+ }
+
+ vm->fault_attr.probability = 100;
+ vm->fault_attr.interval = 1;
+ atomic_set(&vm->fault_attr.times, -1);
+
+ vma = i915_vma_instance(explode, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_explode;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags | size);
+ if (err)
+ goto err_explode;
+
+ i915_vma_unpin(vma);
+
+ i915_gem_object_put(purge);
+ i915_gem_object_put(explode);
+
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ }
+
+ return 0;
+
+err_explode:
+ i915_gem_object_put(explode);
+err_purge:
+ i915_gem_object_put(purge);
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+ return err;
+}
+
static int exercise_ppgtt(struct drm_i915_private *dev_priv,
int (*func)(struct drm_i915_private *i915,
struct i915_address_space *vm,
@@ -935,6 +1031,11 @@ static int igt_ppgtt_shrink(void *arg)
return exercise_ppgtt(arg, shrink_hole);
}
+static int igt_ppgtt_shrink_boom(void *arg)
+{
+ return exercise_ppgtt(arg, shrink_boom);
+}
+
static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
{
struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
@@ -956,7 +1057,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
u64 hole_start, hole_end, last = 0;
struct drm_mm_node *node;
IGT_TIMEOUT(end_time);
- int err;
+ int err = 0;
mutex_lock(&i915->drm.struct_mutex);
restart:
@@ -1034,13 +1135,23 @@ static int igt_ggtt_page(void *arg)
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
- 1024 * PAGE_SIZE, 0,
+ count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
if (err)
goto out_unpin;
+ intel_runtime_pm_get(i915);
+
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + n * PAGE_SIZE;
+
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+ }
+
order = i915_random_order(count, &prng);
if (!order) {
err = -ENOMEM;
@@ -1051,17 +1162,11 @@ static int igt_ggtt_page(void *arg)
u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
-
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
-
- wmb();
- ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
}
+ i915_gem_flush_ggtt_writes(i915);
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
@@ -1069,16 +1174,10 @@ static int igt_ggtt_page(void *arg)
u32 __iomem *vaddr;
u32 val;
- ggtt->base.insert_page(&ggtt->base,
- i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
-
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr);
- ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
-
if (val != n) {
pr_err("insert page failed: found %d, expected %d\n",
val, n);
@@ -1089,6 +1188,8 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
+ ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+ intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1160,7 +1261,7 @@ static int igt_gtt_reserve(void *arg)
struct drm_i915_gem_object *obj, *on;
LIST_HEAD(objects);
u64 total;
- int err;
+ int err = -ENODEV;
/* i915_gem_gtt_reserve() tries to reserve the precise range
* for the node, and evicts if it has to. So our test checks that
@@ -1351,7 +1452,7 @@ static int igt_gtt_insert(void *arg)
}, *ii;
LIST_HEAD(objects);
u64 total;
- int err;
+ int err = -ENODEV;
/* i915_gem_gtt_insert() tries to allocate some free space in the GTT
* to the node, evicting if required.
@@ -1559,6 +1660,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_pot),
SUBTEST(igt_ppgtt_fill),
SUBTEST(igt_ppgtt_shrink),
+ SUBTEST(igt_ppgtt_shrink_boom),
SUBTEST(igt_ggtt_lowlevel),
SUBTEST(igt_ggtt_drunk),
SUBTEST(igt_ggtt_walk),
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 1b8774a..fbdb241 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -212,8 +212,11 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return -EINTR;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
- if (err)
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
return err;
+ }
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
@@ -230,13 +233,16 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(view.partial.size > nreal);
err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err)
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n",
+ err);
return err;
+ }
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
- pr_err("Failed to pin partial view: offset=%lu\n",
- page);
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
@@ -246,8 +252,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
io = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(io)) {
- pr_err("Failed to iomap partial view: offset=%lu\n",
- page);
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
return PTR_ERR(io);
}
@@ -317,6 +323,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
@@ -418,6 +425,7 @@ next_tiling: ;
}
out_unlock:
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -428,7 +436,7 @@ out:
static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_vma *vma;
int err;
@@ -440,14 +448,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
}
i915_vma_move_to_active(vma, rq, 0);
- i915_add_request(rq);
+ i915_request_add(rq);
i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 4795877..3000e6a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -79,7 +79,7 @@ static int igt_sync(void *arg)
}, *p;
struct intel_timeline *tl;
int order, offset;
- int ret;
+ int ret = -ENODEV;
tl = mock_timeline(0);
if (!tl)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d7dd98a..9c76f03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,7 +11,7 @@
*/
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
-selftest(requests, i915_gem_request_live_selftests)
+selftest(requests, i915_request_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
@@ -20,3 +20,4 @@ selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
+selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 19c6fce..9a48aa44 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -16,7 +16,7 @@ selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(timelines, i915_gem_timeline_mock_selftests)
-selftest(requests, i915_gem_request_mock_selftests)
+selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index b85872c..1f415ce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -57,7 +57,8 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
{
unsigned int *order, i;
- order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
+ order = kmalloc_array(count, sizeof(*order),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!order)
return order;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index a999161..94bc2e1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,7 +32,7 @@
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
@@ -44,7 +44,7 @@ static int igt_add_request(void *arg)
if (!request)
goto out_unlock;
- i915_add_request(request);
+ i915_request_add(request);
err = 0;
out_unlock:
@@ -56,7 +56,7 @@ static int igt_wait_request(void *arg)
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, then wait upon it */
@@ -68,49 +68,49 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
goto out_unlock;
}
- i915_add_request(request);
+ i915_request_add(request);
- if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
- if (!i915_gem_request_completed(request)) {
+ if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
goto out_unlock;
}
- if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
@@ -126,7 +126,7 @@ static int igt_fence_wait(void *arg)
{
const long T = HZ / 4;
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err = -EINVAL;
/* Submit a request, treat it as a fence and wait upon it */
@@ -145,7 +145,7 @@ static int igt_fence_wait(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- i915_add_request(request);
+ i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
@@ -185,7 +185,7 @@ out_locked:
static int igt_request_rewind(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request, *vip;
+ struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
int err = -EINVAL;
@@ -197,8 +197,8 @@ static int igt_request_rewind(void *arg)
goto err_context_0;
}
- i915_gem_request_get(request);
- i915_add_request(request);
+ i915_request_get(request);
+ i915_request_add(request);
ctx[1] = mock_context(i915, "B");
vip = mock_request(i915->engine[RCS], ctx[1], 0);
@@ -210,35 +210,35 @@ static int igt_request_rewind(void *arg)
/* Simulate preemption by manual reordering */
if (!mock_cancel_request(request)) {
pr_err("failed to cancel request (already executed)!\n");
- i915_add_request(vip);
+ i915_request_add(vip);
goto err_context_1;
}
- i915_gem_request_get(vip);
- i915_add_request(vip);
+ i915_request_get(vip);
+ i915_request_add(vip);
rcu_read_lock();
request->engine->submit_request(request);
rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
- if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+ if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
goto err;
}
- if (i915_gem_request_completed(request)) {
+ if (i915_request_completed(request)) {
pr_err("low priority request already completed\n");
goto err;
}
err = 0;
err:
- i915_gem_request_put(vip);
+ i915_request_put(vip);
mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
- i915_gem_request_put(request);
+ i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
@@ -246,7 +246,7 @@ err_context_0:
return err;
}
-int i915_gem_request_mock_selftests(void)
+int i915_request_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_add_request),
@@ -303,7 +303,7 @@ static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
@@ -332,7 +332,7 @@ static int live_nop_request(void *arg)
struct intel_engine_cs *engine;
struct live_test t;
unsigned int id;
- int err;
+ int err = -ENODEV;
/* Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
@@ -343,7 +343,7 @@ static int live_nop_request(void *arg)
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
@@ -355,8 +355,8 @@ static int live_nop_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- request = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request = i915_request_alloc(engine,
+ i915->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
@@ -375,9 +375,9 @@ static int live_nop_request(void *arg)
* for latency.
*/
- i915_add_request(request);
+ i915_request_add(request);
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -447,26 +447,17 @@ err:
return ERR_PTR(err);
}
-static struct drm_i915_gem_request *
+static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
int err;
- request = i915_gem_request_alloc(engine,
- engine->i915->kernel_context);
+ request = i915_request_alloc(engine, engine->i915->kernel_context);
if (IS_ERR(request))
return request;
- err = engine->emit_flush(request, EMIT_INVALIDATE);
- if (err)
- goto out_request;
-
- err = i915_switch_context(request);
- if (err)
- goto out_request;
-
err = engine->emit_bb_start(request,
batch->node.start,
batch->node.size,
@@ -475,7 +466,7 @@ empty_request(struct intel_engine_cs *engine,
goto out_request;
out_request:
- __i915_add_request(request, err == 0);
+ __i915_request_add(request, err == 0);
return err ? ERR_PTR(err) : request;
}
@@ -503,7 +494,7 @@ static int live_empty_request(void *arg)
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
unsigned long n, prime;
ktime_t times[2] = {};
@@ -517,7 +508,7 @@ static int live_empty_request(void *arg)
err = PTR_ERR(request);
goto out_batch;
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -531,7 +522,7 @@ static int live_empty_request(void *arg)
goto out_batch;
}
}
- i915_wait_request(request,
+ i915_request_wait(request,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
@@ -641,7 +632,7 @@ static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+ struct i915_request *request[I915_NUM_ENGINES];
struct i915_vma *batch;
struct live_test t;
unsigned int id;
@@ -666,8 +657,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -675,12 +665,6 @@ static int live_all_engines(void *arg)
goto out_request;
}
- err = engine->emit_flush(request[id], EMIT_INVALIDATE);
- GEM_BUG_ON(err);
-
- err = i915_switch_context(request[id]);
- GEM_BUG_ON(err);
-
err = engine->emit_bb_start(request[id],
batch->node.start,
batch->node.size,
@@ -694,12 +678,12 @@ static int live_all_engines(void *arg)
}
i915_vma_move_to_active(batch, request[id], 0);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
}
for_each_engine(engine, i915, id) {
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
@@ -716,7 +700,7 @@ static int live_all_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -726,8 +710,8 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
- i915_gem_request_put(request[id]);
+ GEM_BUG_ON(!i915_request_completed(request[id]));
+ i915_request_put(request[id]);
request[id] = NULL;
}
@@ -736,7 +720,7 @@ static int live_all_engines(void *arg)
out_request:
for_each_engine(engine, i915, id)
if (request[id])
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
@@ -747,8 +731,8 @@ out_unlock:
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
- struct drm_i915_gem_request *prev = NULL;
+ struct i915_request *request[I915_NUM_ENGINES] = {};
+ struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
struct live_test t;
unsigned int id;
@@ -777,8 +761,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
- request[id] = i915_gem_request_alloc(engine,
- i915->kernel_context);
+ request[id] = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -787,22 +770,16 @@ static int live_sequential_engines(void *arg)
}
if (prev) {
- err = i915_gem_request_await_dma_fence(request[id],
- &prev->fence);
+ err = i915_request_await_dma_fence(request[id],
+ &prev->fence);
if (err) {
- i915_add_request(request[id]);
+ i915_request_add(request[id]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
}
- err = engine->emit_flush(request[id], EMIT_INVALIDATE);
- GEM_BUG_ON(err);
-
- err = i915_switch_context(request[id]);
- GEM_BUG_ON(err);
-
err = engine->emit_bb_start(request[id],
batch->node.start,
batch->node.size,
@@ -814,8 +791,8 @@ static int live_sequential_engines(void *arg)
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
- i915_gem_request_get(request[id]);
- i915_add_request(request[id]);
+ i915_request_get(request[id]);
+ i915_request_add(request[id]);
prev = request[id];
}
@@ -823,7 +800,7 @@ static int live_sequential_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- if (i915_gem_request_completed(request[id])) {
+ if (i915_request_completed(request[id])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
@@ -837,7 +814,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- timeout = i915_wait_request(request[id],
+ timeout = i915_request_wait(request[id],
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
@@ -847,7 +824,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[id]));
}
err = end_live_test(&t);
@@ -869,14 +846,14 @@ out_request:
}
i915_vma_put(request[id]->batch);
- i915_gem_request_put(request[id]);
+ i915_request_put(request[id]);
}
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
-int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+int i915_request_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_request),
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index ea01d0f..570e325 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -606,6 +606,139 @@ err:
return -EINVAL;
}
+static const char *mock_name(struct dma_fence *fence)
+{
+ return "mock";
+}
+
+static bool mock_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static const struct dma_fence_ops mock_fence_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .enable_signaling = mock_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = dma_fence_free,
+};
+
+static DEFINE_SPINLOCK(mock_fence_lock);
+
+static struct dma_fence *alloc_dma_fence(void)
+{
+ struct dma_fence *dma;
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (dma)
+ dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
+
+ return dma;
+}
+
+static struct i915_sw_fence *
+wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
+{
+ struct i915_sw_fence *fence;
+ int err;
+
+ fence = alloc_fence();
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
+ i915_sw_fence_commit(fence);
+ if (err < 0) {
+ free_fence(fence);
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static int test_dma_fence(void *arg)
+{
+ struct i915_sw_fence *timeout = NULL, *not = NULL;
+ unsigned long delay = i915_selftest.timeout_jiffies;
+ unsigned long end, sleep;
+ struct dma_fence *dma;
+ int err;
+
+ dma = alloc_dma_fence();
+ if (!dma)
+ return -ENOMEM;
+
+ timeout = wrap_dma_fence(dma, delay);
+ if (IS_ERR(timeout)) {
+ err = PTR_ERR(timeout);
+ goto err;
+ }
+
+ not = wrap_dma_fence(dma, 0);
+ if (IS_ERR(not)) {
+ err = PTR_ERR(not);
+ goto err;
+ }
+
+ err = -EINVAL;
+ if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
+ pr_err("Fences immediately signaled\n");
+ goto err;
+ }
+
+ /* We round the timeout for the fence up to the next second */
+ end = round_jiffies_up(jiffies + delay);
+
+ sleep = jiffies_to_usecs(delay) / 3;
+ usleep_range(sleep, 2 * sleep);
+ if (time_after(jiffies, end)) {
+ pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
+ delay, end, jiffies);
+ goto skip;
+ }
+
+ if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
+ pr_err("Fences signaled too early\n");
+ goto err;
+ }
+
+ if (!wait_event_timeout(timeout->wait,
+ i915_sw_fence_done(timeout),
+ 2 * (end - jiffies) + 1)) {
+ pr_err("Timeout fence unsignaled!\n");
+ goto err;
+ }
+
+ if (i915_sw_fence_done(not)) {
+ pr_err("No timeout fence signaled!\n");
+ goto err;
+ }
+
+skip:
+ dma_fence_signal(dma);
+
+ if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
+ pr_err("Fences unsignaled\n");
+ goto err;
+ }
+
+ free_fence(not);
+ free_fence(timeout);
+ dma_fence_put(dma);
+
+ return 0;
+
+err:
+ dma_fence_signal(dma);
+ if (!IS_ERR_OR_NULL(timeout))
+ free_fence(timeout);
+ if (!IS_ERR_OR_NULL(not))
+ free_fence(not);
+ dma_fence_put(dma);
+ return err;
+}
+
int i915_sw_fence_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -618,6 +751,7 @@ int i915_sw_fence_mock_selftests(void)
SUBTEST(test_chain),
SUBTEST(test_ipc),
SUBTEST(test_timer),
+ SUBTEST(test_dma_fence),
};
return i915_subtests(tests, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
index bcab3d0..47f4ae1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -333,7 +333,7 @@ static int igt_syncmap_join_below(void *arg)
{
struct i915_syncmap *sync;
unsigned int step, order, idx;
- int err;
+ int err = -ENODEV;
i915_syncmap_init(&sync);
@@ -402,7 +402,7 @@ static int igt_syncmap_neighbours(void *arg)
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
struct i915_syncmap *sync;
- int err;
+ int err = -ENODEV;
/*
* Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP
@@ -447,7 +447,7 @@ static int igt_syncmap_compact(void *arg)
{
struct i915_syncmap *sync;
unsigned int idx, order;
- int err;
+ int err = -ENODEV;
i915_syncmap_init(&sync);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 2e86ec1..eb89e30 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -150,7 +150,7 @@ static int igt_vma_create(void *arg)
IGT_TIMEOUT(end_time);
LIST_HEAD(contexts);
LIST_HEAD(objects);
- int err;
+ int err = -ENOMEM;
/* Exercise creating many vma amonst many objections, checking the
* vma creation and lookup routines.
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 54fc571..4658002 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -271,18 +271,13 @@ struct igt_wakeup {
u32 seqno;
};
-static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
-{
- return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
-}
-
static bool wait_for_ready(struct igt_wakeup *w)
{
DEFINE_WAIT(ready);
set_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->done))
- wake_up_atomic_t(w->done);
+ wake_up_var(w->done);
if (test_bit(STOP, &w->flags))
goto out;
@@ -299,7 +294,7 @@ static bool wait_for_ready(struct igt_wakeup *w)
out:
clear_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->set))
- wake_up_atomic_t(w->set);
+ wake_up_var(w->set);
return !test_bit(STOP, &w->flags);
}
@@ -342,7 +337,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0);
wake_up_all(wq);
- wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
+ wait_var_event(set, !atomic_read(set));
atomic_set(ready, count);
atomic_set(done, count);
}
@@ -350,7 +345,6 @@ static void igt_wake_all_sync(atomic_t *ready,
static int igt_wakeup(void *arg)
{
I915_RND_STATE(prng);
- const int state = TASK_UNINTERRUPTIBLE;
struct intel_engine_cs *engine = arg;
struct igt_wakeup *waiters;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
@@ -418,7 +412,7 @@ static int igt_wakeup(void *arg)
* that they are ready for the next test. We wait until all
* threads are complete and waiting for us (i.e. not a seqno).
*/
- err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
+ err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
if (err) {
pr_err("Timed out waiting for %d remaining waiters\n",
atomic_read(&done));
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
new file mode 100644
index 0000000..fb74e2c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+/* max doorbell number + negative test for each client type */
+#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
+
+static struct intel_guc_client *clients[ATTEMPTS];
+
+static bool available_dbs(struct intel_guc *guc, u32 priority)
+{
+ unsigned long offset;
+ unsigned long end;
+ u16 id;
+
+ /* first half is used for normal priority, second half for high */
+ offset = 0;
+ end = GUC_NUM_DOORBELLS / 2;
+ if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
+ offset = end;
+ end += offset;
+ }
+
+ id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
+ if (id < end)
+ return true;
+
+ return false;
+}
+
+static int check_all_doorbells(struct intel_guc *guc)
+{
+ u16 db_id;
+
+ pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
+ if (!doorbell_ok(guc, db_id)) {
+ pr_err("doorbell %d, not ok\n", db_id);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Basic client sanity check, handy to validate create_clients.
+ */
+static int validate_client(struct intel_guc_client *client,
+ int client_priority,
+ bool is_preempt_client)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+ struct i915_gem_context *ctx_owner = is_preempt_client ?
+ dev_priv->preempt_context : dev_priv->kernel_context;
+
+ if (client->owner != ctx_owner ||
+ client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->priority != client_priority ||
+ client->doorbell_id == GUC_DOORBELL_INVALID)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static bool client_doorbell_in_sync(struct intel_guc_client *client)
+{
+ return !client || doorbell_ok(client->guc, client->doorbell_id);
+}
+
+/*
+ * Check that we're able to synchronize guc_clients with their doorbells
+ *
+ * We're creating clients and reserving doorbells once, at module load. During
+ * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
+ * GuC being reset. In other words - GuC clients are still around, but the
+ * status of their doorbells may be incorrect. This is the reason behind
+ * validating that the doorbells status expected by the driver matches what the
+ * GuC/HW have.
+ */
+static int igt_guc_clients(void *args)
+{
+ struct drm_i915_private *dev_priv = args;
+ struct intel_guc *guc;
+ int err = 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ guc = &dev_priv->guc;
+ if (!guc) {
+ pr_err("No guc object!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto unlock;
+
+ /*
+ * Get rid of clients created during driver load because the test will
+ * recreate them.
+ */
+ guc_clients_destroy(guc);
+ if (guc->execbuf_client || guc->preempt_client) {
+ pr_err("guc_clients_destroy lied!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = guc_clients_create(guc);
+ if (err) {
+ pr_err("Failed to create clients\n");
+ goto unlock;
+ }
+ GEM_BUG_ON(!guc->execbuf_client);
+
+ err = validate_client(guc->execbuf_client,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+ if (err) {
+ pr_err("execbug client validation failed\n");
+ goto out;
+ }
+
+ if (guc->preempt_client) {
+ err = validate_client(guc->preempt_client,
+ GUC_CLIENT_PRIORITY_KMD_HIGH, true);
+ if (err) {
+ pr_err("preempt client validation failed\n");
+ goto out;
+ }
+ }
+
+ /* each client should now have reserved a doorbell */
+ if (!has_doorbell(guc->execbuf_client) ||
+ (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
+ pr_err("guc_clients_create didn't reserve doorbells\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Now create the doorbells */
+ guc_clients_doorbell_init(guc);
+
+ /* each client should now have received a doorbell */
+ if (!client_doorbell_in_sync(guc->execbuf_client) ||
+ !client_doorbell_in_sync(guc->preempt_client)) {
+ pr_err("failed to initialize the doorbells\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Basic test - an attempt to reallocate a valid doorbell to the
+ * client it is currently assigned should not cause a failure.
+ */
+ err = guc_clients_doorbell_init(guc);
+ if (err)
+ goto out;
+
+ /*
+ * Negative test - a client with no doorbell (invalid db id).
+ * After destroying the doorbell, the db id is changed to
+ * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
+ * allocate a doorbell with an invalid id (db has to be reserved before
+ * allocation).
+ */
+ destroy_doorbell(guc->execbuf_client);
+ if (client_doorbell_in_sync(guc->execbuf_client)) {
+ pr_err("destroy db did not work\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ unreserve_doorbell(guc->execbuf_client);
+ err = guc_clients_doorbell_init(guc);
+ if (err != -EIO) {
+ pr_err("unexpected (err = %d)", err);
+ goto out;
+ }
+
+ if (!available_dbs(guc, guc->execbuf_client->priority)) {
+ pr_err("doorbell not available when it should\n");
+ err = -EIO;
+ goto out;
+ }
+
+ /* clean after test */
+ err = reserve_doorbell(guc->execbuf_client);
+ if (err) {
+ pr_err("failed to reserve back the doorbell back\n");
+ }
+ err = create_doorbell(guc->execbuf_client);
+ if (err) {
+ pr_err("recreate doorbell failed\n");
+ goto out;
+ }
+
+out:
+ /*
+ * Leave clean state for other test, plus the driver always destroy the
+ * clients during unload.
+ */
+ destroy_doorbell(guc->execbuf_client);
+ if (guc->preempt_client)
+ destroy_doorbell(guc->preempt_client);
+ guc_clients_destroy(guc);
+ guc_clients_create(guc);
+ guc_clients_doorbell_init(guc);
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return err;
+}
+
+/*
+ * Create as many clients as number of doorbells. Note that there's already
+ * client(s)/doorbell(s) created during driver load, but this test creates
+ * its own and do not interact with the existing ones.
+ */
+static int igt_guc_doorbells(void *arg)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_guc *guc;
+ int i, err = 0;
+ u16 db_id;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ guc = &dev_priv->guc;
+ if (!guc) {
+ pr_err("No guc object!\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < ATTEMPTS; i++) {
+ clients[i] = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ i % GUC_CLIENT_PRIORITY_NUM,
+ dev_priv->kernel_context);
+
+ if (!clients[i]) {
+ pr_err("[%d] No guc client\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ERR(clients[i])) {
+ if (PTR_ERR(clients[i]) != -ENOSPC) {
+ pr_err("[%d] unexpected error\n", i);
+ err = PTR_ERR(clients[i]);
+ goto out;
+ }
+
+ if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
+ pr_err("[%d] non-db related alloc fail\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* expected, ran out of dbs for this client type */
+ continue;
+ }
+
+ /*
+ * The check below is only valid because we keep a doorbell
+ * assigned during the whole life of the client.
+ */
+ if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
+ pr_err("[%d] more clients than doorbells (%d >= %d)\n",
+ i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = validate_client(clients[i],
+ i % GUC_CLIENT_PRIORITY_NUM, false);
+ if (err) {
+ pr_err("[%d] client_alloc sanity check failed!\n", i);
+ err = -EINVAL;
+ goto out;
+ }
+
+ db_id = clients[i]->doorbell_id;
+
+ err = create_doorbell(clients[i]);
+ if (err) {
+ pr_err("[%d] Failed to create a doorbell\n", i);
+ goto out;
+ }
+
+ /* doorbell id shouldn't change, we are holding the mutex */
+ if (db_id != clients[i]->doorbell_id) {
+ pr_err("[%d] doorbell id changed (%d != %d)\n",
+ i, db_id, clients[i]->doorbell_id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = check_all_doorbells(guc);
+ if (err)
+ goto out;
+ }
+
+out:
+ for (i = 0; i < ATTEMPTS; i++)
+ if (!IS_ERR_OR_NULL(clients[i])) {
+ destroy_doorbell(clients[i]);
+ guc_client_free(clients[i]);
+ }
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return err;
+}
+
+int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_guc_clients),
+ SUBTEST(igt_guc_doorbells),
+ };
+
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return 0;
+
+ return i915_subtests(tests, dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 71ce066..df7898c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -33,6 +33,7 @@ struct hang {
struct drm_i915_private *i915;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
u32 *seqno;
u32 *batch;
};
@@ -45,9 +46,15 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
memset(h, 0, sizeof(*h));
h->i915 = i915;
+ h->ctx = kernel_context(i915);
+ if (IS_ERR(h->ctx))
+ return PTR_ERR(h->ctx);
+
h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(h->hws))
- return PTR_ERR(h->hws);
+ if (IS_ERR(h->hws)) {
+ err = PTR_ERR(h->hws);
+ goto err_ctx;
+ }
h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(h->obj)) {
@@ -79,17 +86,19 @@ err_obj:
i915_gem_object_put(h->obj);
err_hws:
i915_gem_object_put(h->hws);
+err_ctx:
+ kernel_context_close(h->ctx);
return err;
}
static u64 hws_address(const struct i915_vma *hws,
- const struct drm_i915_gem_request *rq)
+ const struct i915_request *rq)
{
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
static int emit_recurse_batch(struct hang *h,
- struct drm_i915_gem_request *rq)
+ struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
@@ -114,14 +123,6 @@ static int emit_recurse_batch(struct hang *h,
if (err)
goto unpin_vma;
- err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- goto unpin_hws;
-
- err = i915_switch_context(rq);
- if (err)
- goto unpin_hws;
-
i915_vma_move_to_active(vma, rq, 0);
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
@@ -140,6 +141,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -148,6 +155,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
@@ -155,12 +168,24 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -173,19 +198,16 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
-unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
return err;
}
-static struct drm_i915_gem_request *
-hang_create_request(struct hang *h,
- struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
int err;
if (i915_gem_object_is_active(h->obj)) {
@@ -210,25 +232,76 @@ hang_create_request(struct hang *h,
h->batch = vaddr;
}
- rq = i915_gem_request_alloc(engine, ctx);
+ rq = i915_request_alloc(engine, h->ctx);
if (IS_ERR(rq))
return rq;
err = emit_recurse_batch(h, rq);
if (err) {
- __i915_add_request(rq, false);
+ __i915_request_add(rq, false);
return ERR_PTR(err);
}
return rq;
}
-static u32 hws_seqno(const struct hang *h,
- const struct drm_i915_gem_request *rq)
+static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
{
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}
+struct wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const void *symbol;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+ struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ w->symbol);
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const void *symbol)
+{
+ w->i915 = i915;
+ w->symbol = symbol;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
+ (W)->i915; \
+ __fini_wedge((W)))
+
+static noinline int
+flush_test(struct drm_i915_private *i915, unsigned int flags)
+{
+ struct wedge_me w;
+
+ cond_resched();
+
+ wedge_on_timeout(&w, i915, HZ)
+ i915_gem_wait_for_idle(i915, flags);
+
+ return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
+
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
@@ -240,13 +313,25 @@ static void hang_fini(struct hang *h)
i915_gem_object_unpin_map(h->hws);
i915_gem_object_put(h->hws);
- i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
+ kernel_context_close(h->ctx);
+
+ flush_test(h->i915, I915_WAIT_LOCKED);
+}
+
+static bool wait_for_hang(struct hang *h, struct i915_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
}
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -265,7 +350,7 @@ static int igt_hang_sanitycheck(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- rq = hang_create_request(&h, engine, i915->kernel_context);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("Failed to create request for %s, err=%d\n",
@@ -273,17 +358,17 @@ static int igt_hang_sanitycheck(void *arg)
goto fini;
}
- i915_gem_request_get(rq);
+ i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- __i915_add_request(rq, true);
+ __i915_request_add(rq, true);
- timeout = i915_wait_request(rq,
+ timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(rq);
+ i915_request_put(rq);
if (timeout < 0) {
err = timeout;
@@ -305,6 +390,9 @@ static void global_reset_lock(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
@@ -362,58 +450,133 @@ static int igt_global_reset(void *arg)
return err;
}
-static int igt_reset_engine(void *arg)
+static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
- struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int reset_count, reset_engine_count;
+ struct hang h;
int err = 0;
- /* Check that we can issue a global GPU and engine reset */
+ /* Check that we can issue an engine reset on an idle engine (no-op) */
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ unsigned int reset_count, reset_engine_count;
+ IGT_TIMEOUT(end_time);
+
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
- err = i915_reset_engine(engine, I915_RESET_QUIET);
- if (err) {
- pr_err("i915_reset_engine failed\n");
- break;
- }
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ if (active) {
+ struct i915_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ err = -EIO;
+ break;
+ }
+
+ i915_request_put(rq);
+ }
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
- pr_err("Full GPU reset recorded! (engine reset expected)\n");
- err = -EINVAL;
- break;
- }
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
- if (i915_reset_engine_count(&i915->gpu_error, engine) ==
- reset_engine_count) {
- pr_err("No %s engine reset recorded!\n", engine->name);
- err = -EINVAL;
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ reset_engine_count += active;
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count) {
+ pr_err("%s engine reset %srecorded!\n",
+ engine->name, active ? "not " : "");
+ err = -EINVAL;
+ break;
+ }
+
+ engine->hangcheck.stalled = false;
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ if (err)
break;
- }
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ err = flush_test(i915, 0);
+ if (err)
+ break;
}
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine(void *arg)
+{
+ return __igt_reset_engine(arg, false);
+}
+
+static int igt_reset_active_engine(void *arg)
+{
+ return __igt_reset_engine(arg, true);
+}
+
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
- struct drm_i915_gem_request *rq[2] = {};
+ struct i915_request *rq[2] = {};
struct i915_gem_context *ctx[2];
struct drm_file *file;
unsigned long count = 0;
@@ -442,40 +605,41 @@ static int active_engine(void *data)
while (!kthread_should_stop()) {
unsigned int idx = count++ & 1;
- struct drm_i915_gem_request *old = rq[idx];
- struct drm_i915_gem_request *new;
+ struct i915_request *old = rq[idx];
+ struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
- new = i915_gem_request_alloc(engine, ctx[idx]);
+ new = i915_request_alloc(engine, ctx[idx]);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
- rq[idx] = i915_gem_request_get(new);
- i915_add_request(new);
+ rq[idx] = i915_request_get(new);
+ i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
if (old) {
- i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT);
- i915_gem_request_put(old);
+ i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(old);
}
}
for (count = 0; count < ARRAY_SIZE(rq); count++)
- i915_gem_request_put(rq[count]);
+ i915_request_put(rq[count]);
err_file:
mock_file_free(engine->i915, file);
return err;
}
-static int igt_reset_active_engines(void *arg)
+static int __igt_reset_engine_others(struct drm_i915_private *i915,
+ bool active)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine, *active;
+ struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
+ struct hang h;
int err = 0;
/* Check that issuing a reset on one engine does not interfere
@@ -485,24 +649,36 @@ static int igt_reset_active_engines(void *arg)
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- struct task_struct *threads[I915_NUM_ENGINES];
+ struct task_struct *threads[I915_NUM_ENGINES] = {};
unsigned long resets[I915_NUM_ENGINES];
unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long count = 0;
IGT_TIMEOUT(end_time);
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
memset(threads, 0, sizeof(threads));
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
struct task_struct *tsk;
- if (active == engine)
- continue;
-
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
- active);
+ other);
+
+ if (other == engine)
+ continue;
- tsk = kthread_run(active_engine, active,
- "igt/%s", active->name);
+ tsk = kthread_run(active_engine, other,
+ "igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
goto unwind;
@@ -512,20 +688,69 @@ static int igt_reset_active_engines(void *arg)
get_task_struct(tsk);
}
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
+ if (active) {
+ struct i915_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_request_put(rq);
+ err = -EIO;
+ break;
+ }
+
+ i915_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
err = i915_reset_engine(engine, I915_RESET_QUIET);
if (err) {
- pr_err("i915_reset_engine(%s) failed, err=%d\n",
- engine->name, err);
+ pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
+ engine->name, active ? "active" : "idle", err);
break;
}
+
+ engine->hangcheck.stalled = false;
+ count++;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("i915_reset_engine(%s:%s): %lu resets\n",
+ engine->name, active ? "active" : "idle", count);
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) -
+ resets[engine->id] != (active ? count : 0)) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, active ? "active" : "idle", count,
+ i915_reset_engine_count(&i915->gpu_error,
+ engine) - resets[engine->id]);
+ if (!err)
+ err = -EINVAL;
+ }
unwind:
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
int ret;
if (!threads[tmp])
@@ -533,42 +758,62 @@ unwind:
ret = kthread_stop(threads[tmp]);
if (ret) {
- pr_err("kthread for active engine %s failed, err=%d\n",
- active->name, ret);
+ pr_err("kthread for other engine %s failed, err=%d\n",
+ other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp]);
if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
- active)) {
+ other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
- active->name,
+ other->name,
i915_reset_engine_count(&i915->gpu_error,
- active) - resets[tmp]);
- err = -EIO;
+ other) - resets[tmp]);
+ if (!err)
+ err = -EINVAL;
}
}
if (global != i915_reset_count(&i915->gpu_error)) {
pr_err("Global reset (count=%ld)!\n",
i915_reset_count(&i915->gpu_error) - global);
- err = -EIO;
+ if (!err)
+ err = -EINVAL;
}
if (err)
break;
- cond_resched();
+ err = flush_test(i915, 0);
+ if (err)
+ break;
}
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
-static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+static int igt_reset_idle_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, false);
+}
+
+static int igt_reset_active_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, true);
+}
+
+static u32 fake_hangcheck(struct i915_request *rq)
{
u32 reset_count;
@@ -583,20 +828,10 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
return reset_count;
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
-{
- return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
@@ -614,21 +849,21 @@ static int igt_wait_reset(void *arg)
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS], i915->kernel_context);
+ rq = hang_create_request(&h, i915->engine[RCS]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -639,9 +874,9 @@ static int igt_wait_reset(void *arg)
reset_count = fake_hangcheck(rq);
- timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+ timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
if (timeout < 0) {
- pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
+ pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
timeout);
err = timeout;
goto out_rq;
@@ -655,7 +890,7 @@ static int igt_wait_reset(void *arg)
}
out_rq:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
fini:
hang_fini(&h);
unlock:
@@ -686,47 +921,46 @@ static int igt_reset_queue(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
- struct drm_i915_gem_request *prev;
+ struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
if (!intel_engine_can_store_dword(engine))
continue;
- prev = hang_create_request(&h, engine, i915->kernel_context);
+ prev = hang_create_request(&h, engine);
if (IS_ERR(prev)) {
err = PTR_ERR(prev);
goto fini;
}
- i915_gem_request_get(prev);
- __i915_add_request(prev, true);
+ i915_request_get(prev);
+ __i915_request_add(prev, true);
count = 0;
do {
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
unsigned int reset_count;
- rq = hang_create_request(&h,
- engine,
- i915->kernel_context);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- prev->fence.seqno, hws_seqno(&h, prev));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(prev->engine, &p,
+ "%s\n", prev->engine->name);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -745,8 +979,8 @@ static int igt_reset_queue(void *arg)
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
prev->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
@@ -754,21 +988,21 @@ static int igt_reset_queue(void *arg)
if (rq->fence.error) {
pr_err("Fence error status not zero [%d] after unrelated reset\n",
rq->fence.error);
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
- i915_gem_request_put(rq);
- i915_gem_request_put(prev);
+ i915_request_put(rq);
+ i915_request_put(prev);
err = -EINVAL;
goto fini;
}
- i915_gem_request_put(prev);
+ i915_request_put(prev);
prev = rq;
count++;
} while (time_before(jiffies, end_time));
@@ -777,7 +1011,11 @@ static int igt_reset_queue(void *arg)
*h.batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(i915);
- i915_gem_request_put(prev);
+ i915_request_put(prev);
+
+ err = flush_test(i915, I915_WAIT_LOCKED);
+ if (err)
+ break;
}
fini:
@@ -797,7 +1035,7 @@ static int igt_handle_error(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
struct hang h;
- struct drm_i915_gem_request *rq;
+ struct i915_request *rq;
struct i915_gpu_state *error;
int err;
@@ -815,21 +1053,21 @@ static int igt_handle_error(void *arg)
if (err)
goto err_unlock;
- rq = hang_create_request(&h, engine, i915->kernel_context);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_fini;
}
- i915_gem_request_get(rq);
- __i915_add_request(rq, true);
+ i915_request_get(rq);
+ __i915_request_add(rq, true);
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -859,7 +1097,7 @@ static int igt_handle_error(void *arg)
}
err_request:
- i915_gem_request_put(rq);
+ i915_request_put(rq);
err_fini:
hang_fini(&h);
err_unlock:
@@ -872,21 +1110,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_reset_engine),
- SUBTEST(igt_reset_active_engines),
+ SUBTEST(igt_reset_idle_engine),
+ SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_idle_engine_others),
+ SUBTEST(igt_reset_active_engine_others),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ bool saved_hangcheck;
int err;
if (!intel_has_gpu_reset(i915))
return 0;
intel_runtime_pm_get(i915);
+ saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
+ i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(i915);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 3cac22e..f76f259 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -61,20 +61,30 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
- const i915_reg_t *reg = gen8_shadowed_regs;
- unsigned int i;
+ struct {
+ const i915_reg_t *regs;
+ unsigned int size;
+ } reg_lists[] = {
+ { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
+ { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ };
+ const i915_reg_t *reg;
+ unsigned int i, j;
s32 prev;
- for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
- u32 offset = i915_mmio_reg_offset(*reg);
+ for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
+ reg = reg_lists[j].regs;
+ for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
+ u32 offset = i915_mmio_reg_offset(*reg);
- if (prev >= (s32)offset) {
- pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
- __func__, i, offset, prev);
- return -EINVAL;
- }
+ if (prev >= (s32)offset) {
+ pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
+ __func__, i, offset, prev);
+ return -EINVAL;
+ }
- prev = offset;
+ prev = offset;
+ }
}
return 0;
@@ -90,6 +100,7 @@ int intel_uncore_mock_selftests(void)
{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
+ { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
};
int err, i;
@@ -120,10 +131,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
!IS_CHERRYVIEW(dev_priv))
return 0;
- if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */
- return 0;
-
- if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */
+ /*
+ * This test may lockup the machine or cause GPU hangs afterwards.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
@@ -148,7 +159,10 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
+ iosf_mbi_punit_acquire();
intel_uncore_forcewake_reset(dev_priv, false);
+ iosf_mbi_punit_release();
+
check_for_unclaimed_mmio(dev_priv);
(void)I915_READ(reg);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index bbf80d4..501becc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -92,3 +92,14 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
return i915_gem_create_context(i915, file->driver_priv);
}
+
+struct i915_gem_context *
+kernel_context(struct drm_i915_private *i915)
+{
+ return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+}
+
+void kernel_context_close(struct i915_gem_context *ctx)
+{
+ context_close(ctx);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
index 2f432c0..29b9d60 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/selftests/mock_context.h
@@ -36,4 +36,7 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file);
+struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
+void kernel_context_close(struct i915_gem_context *ctx);
+
#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 331c2b0..78a89ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -32,6 +32,13 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
+static void advance(struct mock_engine *engine,
+ struct mock_request *request)
+{
+ list_del_init(&request->link);
+ mock_seqno_advance(&engine->base, request->base.global_seqno);
+}
+
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
@@ -39,15 +46,23 @@ static void hw_delay_complete(struct timer_list *t)
spin_lock(&engine->hw_lock);
- request = first_request(engine);
- if (request) {
- list_del_init(&request->link);
- mock_seqno_advance(&engine->base, request->base.global_seqno);
- }
-
+ /* Timer fired, first request is complete */
request = first_request(engine);
if (request)
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ advance(engine, request);
+
+ /*
+ * Also immediately signal any subsequent 0-delay requests, but
+ * requeue the timer for the next delayed request.
+ */
+ while ((request = first_request(engine))) {
+ if (request->delay) {
+ mod_timer(&engine->hw_delay, jiffies + request->delay);
+ break;
+ }
+
+ advance(engine, request);
+ }
spin_unlock(&engine->hw_lock);
}
@@ -66,7 +81,7 @@ static void mock_context_unpin(struct intel_engine_cs *engine,
i915_gem_context_put(ctx);
}
-static int mock_request_alloc(struct drm_i915_gem_request *request)
+static int mock_request_alloc(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
@@ -76,38 +91,44 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
return 0;
}
-static int mock_emit_flush(struct drm_i915_gem_request *request,
+static int mock_emit_flush(struct i915_request *request,
unsigned int flags)
{
return 0;
}
-static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+static void mock_emit_breadcrumb(struct i915_request *request,
u32 *flags)
{
}
-static void mock_submit_request(struct drm_i915_gem_request *request)
+static void mock_submit_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
- i915_gem_request_submit(request);
+ i915_request_submit(request);
GEM_BUG_ON(!request->global_seqno);
spin_lock_irq(&engine->hw_lock);
list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ if (mock->link.prev == &engine->hw_queue) {
+ if (mock->delay)
+ mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ else
+ advance(engine, mock);
+ }
spin_unlock_irq(&engine->hw_lock);
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
- const unsigned long sz = roundup_pow_of_two(sizeof(struct intel_ring));
+ const unsigned long sz = PAGE_SIZE / 2;
struct intel_ring *ring;
+ BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
+
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 04eb936..e6d4b88 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -43,7 +43,7 @@ void mock_device_flush(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
- i915_gem_retire_requests(i915);
+ i915_retire_requests(i915);
}
static void mock_device_release(struct drm_device *dev)
@@ -85,6 +85,8 @@ static void mock_device_release(struct drm_device *dev)
i915_gemfs_fini(i915);
+ drm_mode_config_cleanup(&i915->drm);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -179,20 +181,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- spin_lock_init(&i915->mm.object_stat_lock);
mock_uncore_init(i915);
+ i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
- goto put_device;
-
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
- init_llist_head(&i915->mm.free_list);
- INIT_LIST_HEAD(&i915->mm.unbound_list);
- INIT_LIST_HEAD(&i915->mm.bound_list);
+ goto err_drv;
mock_init_contexts(i915);
@@ -246,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context)
goto err_engine;
- i915->preempt_context = mock_context(i915, NULL);
- if (!i915->preempt_context)
- goto err_kernel_context;
-
WARN_ON(i915_gemfs_init(i915));
return i915;
-err_kernel_context:
- i915_gem_context_put(i915->kernel_context);
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
@@ -271,6 +262,9 @@ err_objects:
kmem_cache_destroy(i915->objects);
err_wq:
destroy_workqueue(i915->wq);
+err_drv:
+ drm_mode_config_cleanup(&i915->drm);
+ drm_dev_fini(&i915->drm);
put_device:
put_device(&pdev->dev);
err:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 336e1af..e96873f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.i915 = i915;
- ggtt->mappable_base = 0;
- ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 8097e36..0dc29e2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -25,16 +25,16 @@
#include "mock_engine.h"
#include "mock_request.h"
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay)
{
- struct drm_i915_gem_request *request;
+ struct i915_request *request;
struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = i915_gem_request_alloc(engine, context);
+ request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
@@ -44,7 +44,7 @@ mock_request(struct intel_engine_cs *engine,
return &mock->base;
}
-bool mock_cancel_request(struct drm_i915_gem_request *request)
+bool mock_cancel_request(struct i915_request *request)
{
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
@@ -57,7 +57,7 @@ bool mock_cancel_request(struct drm_i915_gem_request *request)
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
- i915_gem_request_unsubmit(request);
+ i915_request_unsubmit(request);
return was_queued;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4dea74c..995fb72 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -27,20 +27,20 @@
#include <linux/list.h>
-#include "../i915_gem_request.h"
+#include "../i915_request.h"
struct mock_request {
- struct drm_i915_gem_request base;
+ struct i915_request base;
struct list_head link;
unsigned long delay;
};
-struct drm_i915_gem_request *
+struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
unsigned long delay);
-bool mock_cancel_request(struct drm_i915_gem_request *request);
+bool mock_cancel_request(struct i915_request *request);
#endif /* !__MOCK_REQUEST__ */
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index b62763a..fe6becd 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -25,6 +25,7 @@
struct imx_hdmi {
struct device *dev;
struct drm_encoder encoder;
+ struct dw_hdmi *hdmi;
struct regmap *regmap;
};
@@ -239,14 +240,18 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- ret = dw_hdmi_bind(pdev, encoder, plat_data);
+ platform_set_drvdata(pdev, hdmi);
+
+ hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
* which would have called the encoder cleanup. Do it manually.
*/
- if (ret)
+ if (IS_ERR(hdmi->hdmi)) {
+ ret = PTR_ERR(hdmi->hdmi);
drm_encoder_cleanup(encoder);
+ }
return ret;
}
@@ -254,7 +259,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
void *data)
{
- return dw_hdmi_unbind(dev);
+ struct imx_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_unbind(hdmi->hdmi);
}
static const struct component_ops dw_hdmi_imx_ops = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 17d2f3a..1d053bb 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -38,7 +38,6 @@
struct imx_drm_device {
struct drm_device *drm;
unsigned int pipes;
- struct drm_fbdev_cma *fbhelper;
struct drm_atomic_state *state;
};
@@ -47,13 +46,6 @@ static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
#endif
-static void imx_drm_driver_lastclose(struct drm_device *drm)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
void imx_drm_connector_destroy(struct drm_connector *connector)
@@ -69,13 +61,6 @@ void imx_drm_encoder_destroy(struct drm_encoder *encoder)
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-static void imx_drm_output_poll_changed(struct drm_device *drm)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
-}
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -107,7 +92,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = imx_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -186,7 +171,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = imx_drm_driver_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -272,6 +257,7 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+ drm->mode_config.allow_fb_modifiers = true;
drm_mode_config_init(drm);
@@ -298,12 +284,9 @@ static int imx_drm_bind(struct device *dev)
dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
- if (IS_ERR(imxdrm->fbhelper)) {
- ret = PTR_ERR(imxdrm->fbhelper);
- imxdrm->fbhelper = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, legacyfb_depth, MAX_CRTC);
+ if (ret)
goto err_unbind;
- }
#endif
drm_kms_helper_poll_init(drm);
@@ -317,8 +300,7 @@ static int imx_drm_bind(struct device *dev)
err_fbhelper:
drm_kms_helper_poll_fini(drm);
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
- if (imxdrm->fbhelper)
- drm_fbdev_cma_fini(imxdrm->fbhelper);
+ drm_fb_cma_fbdev_fini(drm);
err_unbind:
#endif
component_unbind_all(drm->dev, drm);
@@ -333,14 +315,12 @@ err_unref:
static void imx_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm = drm->dev_private;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- if (imxdrm->fbhelper)
- drm_fbdev_cma_fini(imxdrm->fbhelper);
+ drm_fb_cma_fbdev_fini(drm);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index f0b7556..15c2bec 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -8,7 +8,6 @@ struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_fbdev_cma;
struct drm_framebuffer;
struct drm_plane;
struct imx_drm_crtc;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9a99618..e83af0f 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
drm_crtc_vblank_on(crtc);
+}
+static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.mode_set_nofb = ipu_crtc_mode_set_nofb,
.atomic_check = ipu_crtc_atomic_check,
.atomic_begin = ipu_crtc_atomic_begin,
+ .atomic_flush = ipu_crtc_atomic_flush,
.atomic_disable = ipu_crtc_atomic_disable,
.atomic_enable = ipu_crtc_atomic_enable,
};
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 247c60e..203f247 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -22,6 +22,7 @@
#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
+#include "imx-drm.h"
#include "ipuv3-plane.h"
struct ipu_plane_state {
@@ -77,6 +78,18 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_BGRX8888_A8,
};
+static const uint64_t ipu_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const uint64_t pre_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_INVALID
+};
+
int ipu_plane_irq(struct ipu_plane *ipu_plane)
{
return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch,
@@ -260,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
kfree(ipu_plane);
}
-void ipu_plane_state_reset(struct drm_plane *plane)
+static void ipu_plane_state_reset(struct drm_plane *plane)
{
struct ipu_plane_state *ipu_state;
@@ -280,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
plane->state = &ipu_state->base;
}
-struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
+static struct drm_plane_state *
+ipu_plane_duplicate_state(struct drm_plane *plane)
{
struct ipu_plane_state *state;
@@ -294,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
return &state->base;
}
-void ipu_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+static void ipu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
@@ -303,6 +317,22 @@ void ipu_plane_destroy_state(struct drm_plane *plane,
kfree(ipu_state);
}
+static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ struct ipu_soc *ipu = to_ipu_plane(plane)->ipu;
+
+ /* linear is supported for all planes and formats */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ /* without a PRG there are no supported modifiers */
+ if (!ipu_prg_present(ipu))
+ return false;
+
+ return ipu_prg_format_supported(ipu, format, modifier);
+}
+
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -310,6 +340,7 @@ static const struct drm_plane_funcs ipu_plane_funcs = {
.reset = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state = ipu_plane_destroy_state,
+ .format_mod_supported = ipu_plane_format_mod_supported,
};
static int ipu_plane_atomic_check(struct drm_plane *plane,
@@ -322,7 +353,6 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba;
bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
- struct drm_rect clip;
int hsub, vsub;
int ret;
@@ -338,14 +368,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- can_position, true);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, true);
if (ret)
return ret;
@@ -550,8 +576,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16,
- fb->pitches[0],
- fb->format->format, &eba);
+ fb->pitches[0], fb->format->format,
+ fb->modifier, &eba);
}
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -700,18 +726,71 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state, *crtc_state;
struct drm_plane_state *plane_state;
+ struct ipu_plane_state *ipu_state;
+ struct ipu_plane *ipu_plane;
struct drm_plane *plane;
+ struct drm_crtc *crtc;
int available_pres = ipu_prg_max_active_channels();
- int i;
+ int ret, i;
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We are going over the planes in 2 passes: first we assign PREs to
+ * planes with a tiling modifier, which need the PREs to resolve into
+ * linear. Any failure to assign a PRE there is fatal. In the second
+ * pass we try to assign PREs to linear FBs, to improve memory access
+ * patterns for them. Failure at this point is non-fatal, as we can
+ * scan out linear FBs without a PRE.
+ */
for_each_new_plane_in_state(state, plane, plane_state, i) {
- struct ipu_plane_state *ipu_state =
- to_ipu_plane_state(plane_state);
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if (!(plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) ||
+ plane_state->fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ if (!ipu_prg_present(ipu_plane->ipu) || !available_pres)
+ return -EINVAL;
+
+ if (!ipu_prg_format_supported(ipu_plane->ipu,
+ plane_state->fb->format->format,
+ plane_state->fb->modifier))
+ return -EINVAL;
+
+ ipu_state->use_pre = true;
+ available_pres--;
+ }
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if ((plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) &&
+ plane_state->fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ /* make sure that modifier is initialized */
+ plane_state->fb->modifier = DRM_FORMAT_MOD_LINEAR;
if (ipu_prg_present(ipu_plane->ipu) && available_pres &&
- plane_state->fb &&
ipu_prg_format_supported(ipu_plane->ipu,
plane_state->fb->format->format,
plane_state->fb->modifier)) {
@@ -731,6 +810,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
enum drm_plane_type type)
{
struct ipu_plane *ipu_plane;
+ const uint64_t *modifiers = ipu_format_modifiers;
int ret;
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
@@ -746,10 +826,13 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
+ if (ipu_prg_present(ipu))
+ modifiers = pre_format_modifiers;
+
ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
&ipu_plane_funcs, ipu_plane_formats,
ARRAY_SIZE(ipu_plane_formats),
- NULL, type, NULL);
+ modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(ipu_plane);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 6f12189..2f4b0ff 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -91,7 +91,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
- struct drm_rect clip = { 0, };
if (!fb)
return 0;
@@ -108,13 +107,10 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- clip.x2 = crtc_state->mode.hdisplay;
- clip.y2 = crtc_state->mode.vdisplay;
-
- return drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 3ff5027..59a1102 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1054,7 +1054,8 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
u8 buffer[10];
ssize_t err;
- err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
+ &hdmi->conn, mode);
if (err) {
dev_err(hdmi->dev,
"Failed to get vendor infoframe from mode: %zd\n", err);
@@ -1222,7 +1223,6 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
drm_mode_connector_update_edid_property(conn, edid);
ret = drm_add_edid_modes(conn, edid);
- drm_edid_to_eld(conn, edid);
kfree(edid);
return ret;
}
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 5155f01..0552020 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -36,6 +36,7 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
+#include "meson_canvas.h"
#include "meson_registers.h"
/* CRTC definition */
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
} else
meson_vpp_disable_interlace_vscaler_osd1(priv);
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR);
+
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
priv->io_base + _REG(VPP_MISC));
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3b804fd..32b1a6c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -151,6 +151,14 @@ static struct regmap_config meson_regmap_config = {
.max_register = 0x1000,
};
+static void meson_vpu_init(struct meson_drm *priv)
+{
+ writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
+ writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
+ writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
+ writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
+}
+
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -181,40 +189,55 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto free_drm;
+ }
priv->io_base = regs;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
+ if (!res)
+ return -EINVAL;
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs)
- return -EADDRNOTAVAIL;
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
priv->hhi = devm_regmap_init_mmio(dev, regs,
&meson_regmap_config);
if (IS_ERR(priv->hhi)) {
dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
- return PTR_ERR(priv->hhi);
+ ret = PTR_ERR(priv->hhi);
+ goto free_drm;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ if (!res)
+ return -EINVAL;
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs)
- return -EADDRNOTAVAIL;
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
priv->dmc = devm_regmap_init_mmio(dev, regs,
&meson_regmap_config);
if (IS_ERR(priv->dmc)) {
dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- return PTR_ERR(priv->dmc);
+ ret = PTR_ERR(priv->dmc);
+ goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
- drm_vblank_init(drm, 1);
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto free_drm;
+
drm_mode_config_init(drm);
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
@@ -222,6 +245,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
/* Hardware Initialization */
+ meson_vpu_init(priv);
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
@@ -272,7 +296,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
return 0;
free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -291,7 +315,7 @@ static void meson_drv_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm);
drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 5e8b392..8450d6ac 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -43,6 +43,9 @@ struct meson_drm {
bool osd1_commit;
uint32_t osd1_ctrl_stat;
uint32_t osd1_blk0_cfg[5];
+ uint32_t osd1_addr;
+ uint32_t osd1_stride;
+ uint32_t osd1_height;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index cef4144..a393095 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -23,6 +23,7 @@
#include <linux/of_graph.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
@@ -137,7 +138,9 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_phy;
struct clk *hdmi_pclk;
struct clk *venci_clk;
+ struct regulator *hdmi_supply;
u32 irq_stat;
+ struct dw_hdmi *hdmi;
};
#define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
@@ -300,7 +303,7 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
}
}
-static inline void dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
+static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
{
struct meson_drm *priv = dw_hdmi->priv;
@@ -407,9 +410,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
msleep(100);
/* Reset PHY 3 times in a row */
- dw_hdmi_phy_reset(dw_hdmi);
- dw_hdmi_phy_reset(dw_hdmi);
- dw_hdmi_phy_reset(dw_hdmi);
+ meson_dw_hdmi_phy_reset(dw_hdmi);
+ meson_dw_hdmi_phy_reset(dw_hdmi);
+ meson_dw_hdmi_phy_reset(dw_hdmi);
/* Temporary Disable VENC video stream */
if (priv->venc.hdmi_use_enci)
@@ -535,7 +538,6 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* TOFIX Enable support for non-vic modes */
static enum drm_mode_status
dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
@@ -552,12 +554,12 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
- /* For now, only accept VIC modes */
- if (!vic)
- return MODE_BAD;
-
- /* For now, filter by supported VIC modes */
- if (!meson_venc_hdmi_supported_vic(vic))
+ /* Check against non-VIC supported modes */
+ if (!vic) {
+ if (!meson_venc_hdmi_supported_mode(mode))
+ return MODE_BAD;
+ /* Check against supported VIC modes */
+ } else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
vclk_freq = mode->clock;
@@ -583,9 +585,14 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
/* Finally filter by configurable vclk frequencies */
switch (vclk_freq) {
+ case 25175:
+ case 40000:
case 54000:
+ case 65000:
case 74250:
+ case 108000:
case 148500:
+ case 162000:
case 297000:
case 594000:
return MODE_OK;
@@ -650,10 +657,6 @@ static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
mode->base.id, mode->name, vic);
- /* Should have been filtered */
- if (!vic)
- return;
-
/* VENC + VENC-DVI Mode setup */
meson_venc_hdmi_mode_set(priv, vic, mode);
@@ -751,6 +754,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
encoder = &meson_dw_hdmi->encoder;
+ meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
+ if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
+ if (PTR_ERR(meson_dw_hdmi->hdmi_supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ meson_dw_hdmi->hdmi_supply = NULL;
+ } else {
+ ret = regulator_enable(meson_dw_hdmi->hdmi_supply);
+ if (ret)
+ return ret;
+ }
+
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
"hdmitx_apb");
if (IS_ERR(meson_dw_hdmi->hdmitx_apb)) {
@@ -865,9 +879,12 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
- ret = dw_hdmi_bind(pdev, encoder, &meson_dw_hdmi->dw_plat_data);
- if (ret)
- return ret;
+ platform_set_drvdata(pdev, meson_dw_hdmi);
+
+ meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
+ &meson_dw_hdmi->dw_plat_data);
+ if (IS_ERR(meson_dw_hdmi->hdmi))
+ return PTR_ERR(meson_dw_hdmi->hdmi);
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
@@ -877,7 +894,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
static void meson_dw_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- dw_hdmi_unbind(dev);
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_unbind(meson_dw_hdmi->hdmi);
}
static const struct component_ops meson_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 17e96fa..12c80df 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -49,7 +49,6 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
- struct drm_rect clip = { 0, };
if (!state->crtc)
return 0;
@@ -58,13 +57,10 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- clip.x2 = crtc_state->mode.hdisplay;
- clip.y2 = crtc_state->mode.vdisplay;
-
- return drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
}
/* Takes a fixed 16.16 number and converts it to integer. */
@@ -164,10 +160,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- gem->paddr, fb->pitches[0],
- fb->height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ priv->viu.osd1_addr = gem->paddr;
+ priv->viu.osd1_stride = fb->pitches[0];
+ priv->viu.osd1_height = fb->height;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 2847381..bca8714 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -1363,6 +1363,10 @@
#define VPU_PROT3_STAT_1 0x277a
#define VPU_PROT3_STAT_2 0x277b
#define VPU_PROT3_REQ_ONOFF 0x277c
+#define VPU_RDARB_MODE_L1C1 0x2790
+#define VPU_RDARB_MODE_L1C2 0x2799
+#define VPU_RDARB_MODE_L2C1 0x279d
+#define VPU_WRARB_MODE_L2C1 0x27a2
/* osd super scale */
#define OSDSR_HV_SIZEIN 0x3130
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 4767704..f051122 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -328,14 +328,24 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
#define MESON_VCLK_HDMI_DDR_54000 2
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
#define MESON_VCLK_HDMI_DDR_148500 3
+/* 4028 /4 /4 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_25175 4
+/* 3200 /4 /2 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_40000 5
+/* 5200 /4 /2 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_65000 6
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_74250 4
+#define MESON_VCLK_HDMI_74250 7
+/* 4320 /4 /1 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_108000 8
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_148500 5
+#define MESON_VCLK_HDMI_148500 9
+/* 3240 /2 /1 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_162000 10
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_297000 6
+#define MESON_VCLK_HDMI_297000 11
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_594000 7
+#define MESON_VCLK_HDMI_594000 12
struct meson_vclk_params {
unsigned int pll_base_freq;
@@ -401,6 +411,46 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_25175] = {
+ .pll_base_freq = 4028000,
+ .pll_od1 = 4,
+ .pll_od2 = 4,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_40000] = {
+ .pll_base_freq = 3200000,
+ .pll_od1 = 4,
+ .pll_od2 = 2,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_65000] = {
+ .pll_base_freq = 5200000,
+ .pll_od1 = 4,
+ .pll_od2 = 2,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_108000] = {
+ .pll_base_freq = 4320000,
+ .pll_od1 = 4,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_162000] = {
+ .pll_base_freq = 3240000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -451,6 +501,90 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
0xFFFF, 0x4e00);
break;
+ case 3200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4aab);
+ break;
+
+ case 3240000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4800);
+ break;
+
+ case 3865000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4855);
+ break;
+
+ case 4028000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4eab);
+ break;
+
case 4320000:
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
@@ -485,6 +619,23 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
val, (val & HDMI_PLL_LOCK), 10, 0);
break;
+
+ case 5200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+ break;
};
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
@@ -498,6 +649,42 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
break;
+ case 3200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 3240000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 3865000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 4028000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
case 4320000:
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
@@ -516,6 +703,15 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
break;
+ case 5200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
};
/* Reset PLL */
@@ -590,15 +786,30 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
else
freq = MESON_VCLK_HDMI_DDR_54000;
break;
+ case 25175:
+ freq = MESON_VCLK_HDMI_25175;
+ break;
+ case 40000:
+ freq = MESON_VCLK_HDMI_40000;
+ break;
+ case 65000:
+ freq = MESON_VCLK_HDMI_65000;
+ break;
case 74250:
freq = MESON_VCLK_HDMI_74250;
break;
+ case 108000:
+ freq = MESON_VCLK_HDMI_108000;
+ break;
case 148500:
if (dac_freq != 148500)
freq = MESON_VCLK_HDMI_DDR_148500;
else
freq = MESON_VCLK_HDMI_148500;
break;
+ case 162000:
+ freq = MESON_VCLK_HDMI_162000;
+ break;
case 297000:
freq = MESON_VCLK_HDMI_297000;
break;
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 9509017..6e27013 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,6 +697,314 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x31f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x90,
+ .havon_end = 0x30f,
+ .vavon_bline = 0x23,
+ .vavon_eline = 0x202,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x60,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 2,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x20c,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x41f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0xD8,
+ .havon_end = 0x3f7,
+ .vavon_bline = 0x1b,
+ .vavon_eline = 0x272,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x80,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 4,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x273,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 1343,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 296,
+ .havon_end = 1319,
+ .vavon_bline = 35,
+ .vavon_eline = 802,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 136,
+ .vso_begin = 30,
+ .vso_end = 50,
+ .vso_bline = 0,
+ .vso_eline = 6,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 805,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x63f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x180,
+ .havon_end = 0x5ff,
+ .vavon_bline = 0x23,
+ .vavon_eline = 0x382,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x80,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x383,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x697,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x168,
+ .havon_end = 0x667,
+ .vavon_bline = 0x29,
+ .vavon_eline = 0x428,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x70,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x429,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x86f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x1f0,
+ .havon_end = 0x82f,
+ .vavon_bline = 0x31,
+ .vavon_eline = 0x4e0,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0xc0,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x4e1,
+ },
+};
+
+struct meson_hdmi_venc_dmt_mode {
+ struct drm_display_mode drm_mode;
+ union meson_hdmi_venc_mode *mode;
+} meson_hdmi_venc_dmt_modes[] = {
+ /* 640x480@60Hz */
+ {
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_640x480_60,
+ },
+ /* 800x600@60Hz */
+ {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_800x600_60,
+ },
+ /* 1024x768@60Hz */
+ {
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
+ 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_1024x768_60,
+ },
+ /* 1152x864@75Hz */
+ {
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
+ 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1152x864_75,
+ },
+ /* 1280x1024@60Hz */
+ {
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
+ 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1280x1024_60,
+ },
+ /* 1600x1200@60Hz */
+ {
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
+ 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1600x1200_60,
+ },
+ /* 1920x1080@60Hz */
+ {
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
+ 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_1080p60
+ },
+ { }, /* sentinel */
+};
+
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -736,6 +1044,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
return a;
}
+bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+{
+ struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+
+ while (vmode->mode) {
+ if (drm_mode_equal(&vmode->drm_mode, mode))
+ return true;
+ vmode++;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
+
bool meson_venc_hdmi_supported_vic(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
@@ -750,6 +1072,20 @@ bool meson_venc_hdmi_supported_vic(int vic)
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
+static union meson_hdmi_venc_mode
+*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+{
+ struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+
+ while (vmode->mode) {
+ if (drm_mode_equal(&vmode->drm_mode, mode))
+ return vmode->mode;
+ vmode++;
+ }
+
+ return NULL;
+}
+
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
@@ -811,10 +1147,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
- vmode = meson_venc_hdmi_get_vic_vmode(vic);
+ if (meson_venc_hdmi_supported_vic(vic))
+ vmode = meson_venc_hdmi_get_vic_vmode(vic);
+ else
+ vmode = meson_venc_hdmi_get_dmt_vmode(mode);
if (!vmode) {
- dev_err(priv->dev, "%s: Fatal Error, unsupported vic %d\n",
- __func__, vic);
+ dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+ DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
return;
}
@@ -864,7 +1203,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
hsync_pixels_venc *= 2;
/* Disable VDACs */
- writel_bits_relaxed(0x1f, 0x1f,
+ writel_bits_relaxed(0xff, 0xff,
priv->io_base + _REG(VENC_VDAC_SETTING));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index a1b96e8..7c18a36 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -58,6 +58,7 @@ struct meson_cvbs_enci_mode {
};
/* HDMI Clock parameters */
+bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
bool meson_venc_hdmi_supported_vic(int vic);
bool meson_venc_hdmi_venc_repeat(int vic);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 68e5d9c..fb50a9d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1620,8 +1620,8 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_VIRTUAL_X;
if (mode->vdisplay > 1024)
return MODE_VIRTUAL_Y;
- if (mga_vga_calculate_mode_bandwidth(mode,
- bpp > (31877 * 1024)))
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp) >
+ (31877 * 1024))
return MODE_BANDWIDTH;
} else if (mdev->type == G200_EV &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3e7e1cd..05570f0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -199,9 +199,8 @@ static struct ttm_backend_func mgag200_tt_backend_func = {
};
-static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *tt;
@@ -209,27 +208,15 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
if (tt == NULL)
return NULL;
tt->func = &mgag200_tt_backend_func;
- if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(tt, bo, page_flags)) {
kfree(tt);
return NULL;
}
return tt;
}
-static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
-{
- return ttm_pool_populate(ttm);
-}
-
-static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_create = mgag200_ttm_tt_create,
- .ttm_tt_populate = mgag200_ttm_tt_populate,
- .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = mgag200_bo_evict_flags,
@@ -237,7 +224,6 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
@@ -338,7 +324,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, acc_size,
NULL, NULL, mgag200_bo_ttm_destroy);
if (ret)
return ret;
@@ -354,6 +340,7 @@ static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
@@ -366,7 +353,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
@@ -378,6 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
int mgag200_bo_unpin(struct mgag200_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
@@ -389,11 +377,12 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
}
int mgag200_bo_push_sysram(struct mgag200_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
@@ -410,7 +399,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 99d39b2..38cbde9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -28,6 +28,19 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_GPU_SUDO
+ bool "Enable SUDO flag on submits"
+ depends on DRM_MSM && EXPERT
+ default n
+ help
+ Enable userspace that has CAP_SYS_RAWIO to submit GPU commands
+ that are run from RB instead of IB1. This essentially gives
+ userspace kernel level access, but is useful for firmware
+ debugging.
+
+ Only use this if you are a driver developer. This should *not*
+ be enabled for production kernels. If unsure, say N.
+
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
@@ -81,3 +94,10 @@ config DRM_MSM_DSI_14NM_PHY
default y
help
Choose this option if DSI PHY on 8996 is used on the platform.
+
+config DRM_MSM_DSI_10NM_PHY
+ bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SDM845 is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 92b3844..cd40c05 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -25,26 +25,26 @@ msm-y := \
edp/edp_connector.o \
edp/edp_ctrl.o \
edp/edp_phy.o \
- mdp/mdp_format.o \
- mdp/mdp_kms.o \
- mdp/mdp4/mdp4_crtc.o \
- mdp/mdp4/mdp4_dtv_encoder.o \
- mdp/mdp4/mdp4_lcdc_encoder.o \
- mdp/mdp4/mdp4_lvds_connector.o \
- mdp/mdp4/mdp4_irq.o \
- mdp/mdp4/mdp4_kms.o \
- mdp/mdp4/mdp4_plane.o \
- mdp/mdp5/mdp5_cfg.o \
- mdp/mdp5/mdp5_ctl.o \
- mdp/mdp5/mdp5_crtc.o \
- mdp/mdp5/mdp5_encoder.o \
- mdp/mdp5/mdp5_irq.o \
- mdp/mdp5/mdp5_mdss.o \
- mdp/mdp5/mdp5_kms.o \
- mdp/mdp5/mdp5_pipe.o \
- mdp/mdp5/mdp5_mixer.o \
- mdp/mdp5/mdp5_plane.o \
- mdp/mdp5/mdp5_smp.o \
+ disp/mdp_format.o \
+ disp/mdp_kms.o \
+ disp/mdp4/mdp4_crtc.o \
+ disp/mdp4/mdp4_dtv_encoder.o \
+ disp/mdp4/mdp4_lcdc_encoder.o \
+ disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_irq.o \
+ disp/mdp4/mdp4_kms.o \
+ disp/mdp4/mdp4_plane.o \
+ disp/mdp5/mdp5_cfg.o \
+ disp/mdp5/mdp5_ctl.o \
+ disp/mdp5/mdp5_crtc.o \
+ disp/mdp5/mdp5_encoder.o \
+ disp/mdp5/mdp5_irq.o \
+ disp/mdp5/mdp5_mdss.o \
+ disp/mdp5/mdp5_kms.o \
+ disp/mdp5/mdp5_pipe.o \
+ disp/mdp5/mdp5_mixer.o \
+ disp/mdp5/mdp5_plane.o \
+ disp/mdp5/mdp5_smp.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,31 +62,35 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- mdp/mdp4/mdp4_dsi_encoder.o \
+ disp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/phy/dsi_phy.o \
- mdp/mdp5/mdp5_cmd_encoder.o
+ disp/mdp5/mdp5_cmd_encoder.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 4baef27..3ebbeb3 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,6 +35,7 @@
A3XX_INT0_CP_RB_INT | \
A3XX_INT0_CP_REG_PROTECT_FAULT | \
A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_CACHE_FLUSH_TS | \
A3XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -256,8 +257,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
*/
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
@@ -268,8 +269,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 8199a4b..16d3d59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -27,6 +27,7 @@
A4XX_INT0_CP_RB_INT | \
A4XX_INT0_CP_REG_PROTECT_FAULT | \
A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_CACHE_FLUSH_TS | \
A4XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -274,16 +275,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
return ret;
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
new file mode 100644
index 0000000..059ec7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
+#include "a5xx_gpu.h"
+
+static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "PFP state:\n");
+
+ for (i = 0; i < 36; i++) {
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ME state:\n");
+
+ for (i = 0; i < 29; i++) {
+ gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "MEQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 64; i++) {
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+ }
+
+ return 0;
+}
+
+static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ROQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 512 / 4; i++) {
+ uint32_t val[4];
+ int j;
+ for (j = 0; j < 4; j++)
+ val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
+ drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
+ val[0], val[1], val[2], val[3]);
+ }
+
+ return 0;
+}
+
+static int show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ node->info_ent->data;
+
+ return show(priv->gpu, &p);
+}
+
+#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
+static struct drm_info_list a5xx_debugfs_list[] = {
+ ENT(pfp),
+ ENT(me),
+ ENT(meq),
+ ENT(roq),
+};
+
+/* for debugfs files that can be written to, we can't use drm helper: */
+static int
+reset_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ /* TODO do we care about trying to make sure the GPU is idle?
+ * Since this is just a debug feature limited to CAP_SYS_ADMIN,
+ * maybe it is fine to let the user keep both pieces if they
+ * try to reset an active GPU.
+ */
+
+ mutex_lock(&dev->struct_mutex);
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
+ adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
+ adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ }
+
+ gpu->needs_hw_init = true;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ gpu->funcs->recover(gpu);
+
+ pm_runtime_put_sync(&gpu->pdev->dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+
+
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+{
+ struct drm_device *dev;
+ struct dentry *ent;
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ dev = minor->dev;
+
+ ret = drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ return ret;
+ }
+
+ ent = debugfs_create_file("reset", S_IWUGO,
+ minor->debugfs_root,
+ dev, &reset_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index a1f4eee..a4f68af 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -17,6 +17,8 @@
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/pm_opp.h>
+#include <linux/nvmem-consumer.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -138,6 +140,65 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ struct msm_gem_object *obj;
+ uint32_t *ptr, dwords;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ /* copy commands into RB: */
+ obj = submit->bos[submit->cmd[i].idx].obj;
+ dwords = submit->cmd[i].size;
+
+ ptr = msm_gem_get_vaddr(&obj->base);
+
+ /* _get_vaddr() shouldn't fail at this point,
+ * since we've already mapped it once in
+ * submit_reloc()
+ */
+ if (WARN_ON(!ptr))
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ /* normally the OUT_PKTn() would wait
+ * for space for the packet. But since
+ * we just OUT_RING() the whole thing,
+ * need to call adreno_wait_ring()
+ * ourself:
+ */
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, ptr[i]);
+ }
+
+ msm_gem_put_vaddr(&obj->base);
+
+ break;
+ }
+ }
+
+ a5xx_flush(gpu, ring);
+ a5xx_preempt_trigger(gpu);
+
+ /* we might not necessarily have a cmd from userspace to
+ * trigger an event to know that submit has completed, so
+ * do this manually:
+ */
+ a5xx_idle(gpu, ring);
+ ring->memptrs->fence = submit->seqno;
+ msm_gpu_retire(gpu);
+}
+
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
@@ -147,6 +208,12 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
+ priv->lastctx = NULL;
+ a5xx_submit_in_rb(gpu, submit, ctx);
+ return;
+ }
+
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
@@ -430,25 +497,6 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
-
-static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
- const struct firmware *fw, u64 *iova)
-{
- struct drm_gem_object *bo;
- void *ptr;
-
- ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
-
- if (IS_ERR(ptr))
- return ERR_CAST(ptr);
-
- memcpy(ptr, &fw->data[4], fw->size - 4);
-
- msm_gem_put_vaddr(bo);
- return bo;
-}
-
static int a5xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -456,8 +504,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
int ret;
if (!a5xx_gpu->pm4_bo) {
- a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
- &a5xx_gpu->pm4_iova);
+ a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
@@ -469,8 +517,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
}
if (!a5xx_gpu->pfp_bo) {
- a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
- &a5xx_gpu->pfp_iova);
+ a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
@@ -595,6 +643,12 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* Select RBBM0 to countable 6 to get the busy status for devfreq */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
/* Increase VFD cache access so LRZ and other data gets evicted less */
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
@@ -785,19 +839,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
adreno_gpu_cleanup(adreno_gpu);
@@ -1165,6 +1219,14 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
+static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1179,11 +1241,32 @@ static const struct adreno_gpu_funcs funcs = {
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
+ .debugfs_init = a5xx_debugfs_init,
#endif
+ .gpu_busy = a5xx_gpu_busy,
},
.get_timestamp = a5xx_get_timestamp,
};
+static void check_speed_bin(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ u32 bin, val;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+
+ /* If a nvmem cell isn't defined, nothing to do */
+ if (IS_ERR(cell))
+ return;
+
+ bin = *((u32 *) nvmem_cell_read(cell, NULL));
+ nvmem_cell_put(cell);
+
+ val = (1 << bin);
+
+ dev_pm_opp_set_supported_hw(dev, &val, 1);
+}
+
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -1210,6 +1293,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
+ check_speed_bin(&pdev->dev);
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6fb8c2f..7d71860 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -49,6 +49,10 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+#ifdef CONFIG_DEBUG_FS
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e5700bbf..e9c0e56 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -103,10 +103,16 @@ static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct dev_pm_opp *opp;
+ u32 ret = 0;
opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
- return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ }
+
+ return ret;
}
/* Setup thermal limit management */
@@ -255,7 +261,6 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *drm = gpu->dev;
- const struct firmware *fw;
uint32_t dwords = 0, offset = 0, bosize;
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
@@ -263,15 +268,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (a5xx_gpu->gpmu_bo)
return;
- /* Get the firmware */
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
- if (IS_ERR(fw)) {
- DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
- gpu->name);
- return;
- }
-
- data = (unsigned int *) fw->data;
+ data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
/*
* The first dword is the size of the remaining data in dwords. Use it
@@ -279,12 +276,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
* the firmware that we read
*/
- if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
- goto out;
+ if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
+ (data[0] < 2) || (data[0] >=
+ (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
+ return;
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
if (data[1] != 2)
- goto out;
+ return;
cmds = data + data[2] + 3;
cmds_size = data[0] - data[2] - 2;
@@ -319,8 +318,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
- goto out;
-
+ return;
err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
@@ -330,8 +328,4 @@ err:
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
a5xx_gpu->gpmu_dwords = 0;
-
-out:
- /* No need to keep that firmware laying around anymore */
- release_firmware(fw);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 05022ea..8e0cb16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -17,7 +17,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#define ANY_ID 0xff
@@ -31,73 +30,92 @@ static const struct adreno_info gpulist[] = {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_256K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
.revn = 307, /* because a305c is revn==306 */
.name = "A306",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_128K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
.revn = 320,
.name = "A320",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_512K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330,
.name = "A330",
- .pm4fw = "a330_pm4.fw",
- .pfpfw = "a330_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
.gmem = SZ_1M,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
.revn = 430,
.name = "A430",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
- .pm4fw = "a530_pm4.fw",
- .pfpfw = "a530_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
+ },
.gmem = SZ_1M,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
- .gpmufw = "a530v3_gpmu.fw2",
.zapfw = "a530_zap.mdt",
},
};
-MODULE_FIRMWARE("a300_pm4.fw");
-MODULE_FIRMWARE("a300_pfp.fw");
-MODULE_FIRMWARE("a330_pm4.fw");
-MODULE_FIRMWARE("a330_pfp.fw");
-MODULE_FIRMWARE("a420_pm4.fw");
-MODULE_FIRMWARE("a420_pfp.fw");
-MODULE_FIRMWARE("a530_fm4.fw");
-MODULE_FIRMWARE("a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a300_pm4.fw");
+MODULE_FIRMWARE("qcom/a300_pfp.fw");
+MODULE_FIRMWARE("qcom/a330_pm4.fw");
+MODULE_FIRMWARE("qcom/a330_pfp.fw");
+MODULE_FIRMWARE("qcom/a420_pm4.fw");
+MODULE_FIRMWARE("qcom/a420_pfp.fw");
+MODULE_FIRMWARE("qcom/a530_pm4.fw");
+MODULE_FIRMWARE("qcom/a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a530_zap.mdt");
+MODULE_FIRMWARE("qcom/a530_zap.b00");
+MODULE_FIRMWARE("qcom/a530_zap.b01");
+MODULE_FIRMWARE("qcom/a530_zap.b02");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -125,11 +143,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
- struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
+ struct msm_gpu *gpu = NULL;
int ret;
+ if (pdev)
+ gpu = platform_get_drvdata(pdev);
+
if (!gpu) {
- dev_err(dev->dev, "no adreno device\n");
+ dev_err_once(dev->dev, "no GPU device was found\n");
return NULL;
}
@@ -143,6 +164,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
+#ifdef CONFIG_DEBUG_FS
+ if (gpu->funcs->debugfs_init) {
+ gpu->funcs->debugfs_init(gpu, dev->primary);
+ gpu->funcs->debugfs_init(gpu, dev->render);
+ gpu->funcs->debugfs_init(gpu, dev->control);
+ }
+#endif
+
return gpu;
}
@@ -153,101 +182,45 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
-static int find_chipid(struct device *dev, u32 *chipid)
+static int find_chipid(struct device *dev, struct adreno_rev *rev)
{
struct device_node *node = dev->of_node;
const char *compat;
int ret;
+ u32 chipid;
/* first search the compat strings for qcom,adreno-XYZ.W: */
ret = of_property_read_string_index(node, "compatible", 0, &compat);
if (ret == 0) {
- unsigned rev, patch;
+ unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
- *chipid = 0;
- *chipid |= (rev / 100) << 24; /* core */
- rev %= 100;
- *chipid |= (rev / 10) << 16; /* major */
- rev %= 10;
- *chipid |= rev << 8; /* minor */
- *chipid |= patch;
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ rev->core = r / 100;
+ r %= 100;
+ rev->major = r / 10;
+ r %= 10;
+ rev->minor = r;
+ rev->patchid = patch;
return 0;
}
}
/* and if that fails, fall back to legacy "qcom,chipid" property: */
- ret = of_property_read_u32(node, "qcom,chipid", chipid);
- if (ret)
+ ret = of_property_read_u32(node, "qcom,chipid", &chipid);
+ if (ret) {
+ dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
-
- dev_warn(dev, "Using legacy qcom,chipid binding!\n");
- dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
- (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
- (*chipid >> 8) & 0xff, *chipid & 0xff);
-
- return 0;
-}
-
-/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
-static int adreno_get_legacy_pwrlevels(struct device *dev)
-{
- struct device_node *child, *node;
- int ret;
-
- node = of_find_compatible_node(dev->of_node, NULL,
- "qcom,gpu-pwrlevels");
- if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
- return -ENXIO;
- }
-
- for_each_child_of_node(node, child) {
- unsigned int val;
-
- ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
- if (ret)
- continue;
-
- /*
- * Skip the intentionally bogus clock value found at the bottom
- * of most legacy frequency tables
- */
- if (val != 27000000)
- dev_pm_opp_add(dev, val, 0);
}
- return 0;
-}
-
-static int adreno_get_pwrlevels(struct device *dev,
- struct adreno_platform_config *config)
-{
- unsigned long freq = ULONG_MAX;
- struct dev_pm_opp *opp;
- int ret;
-
- /* You down with OPP? */
- if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
- ret = adreno_get_legacy_pwrlevels(dev);
- else
- ret = dev_pm_opp_of_add_table(dev);
-
- if (ret)
- return ret;
-
- /* Find the fastest defined rate */
- opp = dev_pm_opp_find_freq_floor(dev, &freq);
- if (!IS_ERR(opp))
- config->fast_rate = dev_pm_opp_get_freq(opp);
+ rev->core = (chipid >> 24) & 0xff;
+ rev->major = (chipid >> 16) & 0xff;
+ rev->minor = (chipid >> 8) & 0xff;
+ rev->patchid = (chipid & 0xff);
- if (!config->fast_rate) {
- DRM_DEV_INFO(dev,
- "Could not find clock rate. Using default\n");
- /* Pick a suitably safe clock speed for any target */
- config->fast_rate = 200000000;
- }
+ dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+ rev->core, rev->major, rev->minor, rev->patchid);
return 0;
}
@@ -258,22 +231,9 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
struct msm_gpu *gpu;
- u32 val;
int ret;
- ret = find_chipid(dev, &val);
- if (ret) {
- dev_err(dev, "could not find chipid: %d\n", ret);
- return ret;
- }
-
- config.rev = ADRENO_REV((val >> 24) & 0xff,
- (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
-
- /* find clock rates: */
- config.fast_rate = 0;
-
- ret = adreno_get_pwrlevels(dev, &config);
+ ret = find_chipid(dev, &config.rev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index e2ffecc..17d0506 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,11 +17,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -140,27 +140,47 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
- const struct firmware *fw;
+ int i;
- if (adreno_gpu->pm4)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
+ const struct firmware *fw;
+
+ if (!adreno_gpu->info->fw[i])
+ continue;
+
+ /* Skip if the firmware has already been loaded */
+ if (adreno_gpu->fw[i])
+ continue;
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
- if (IS_ERR(fw))
- return PTR_ERR(fw);
- adreno_gpu->pm4 = fw;
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
- if (IS_ERR(fw)) {
- release_firmware(adreno_gpu->pm4);
- adreno_gpu->pm4 = NULL;
- return PTR_ERR(fw);
+ adreno_gpu->fw[i] = fw;
}
- adreno_gpu->pfp = fw;
return 0;
}
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+
+ return bo;
+}
+
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -293,26 +313,12 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, 0x00000000);
}
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
- /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
-
- /* Workaround for missing irq issue on 8x16/a306. Unsure if the
- * root cause is a platform issue or some a306 quirk, but this
- * keeps things humming along:
- */
- if (adreno_is_a306(adreno_gpu)) {
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
- }
-
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
/* Dummy set-constant to trigger context rollover */
@@ -461,10 +467,80 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
if (spin_until(ring_freewords(ring) >= ndwords))
DRM_DEV_ERROR(ring->gpu->dev->dev,
- "timeout waiting for space in ringubffer %d\n",
+ "timeout waiting for space in ringbuffer %d\n",
ring->id);
}
+/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
+static int adreno_get_legacy_pwrlevels(struct device *dev)
+{
+ struct device_node *child, *node;
+ int ret;
+
+ node = of_find_compatible_node(dev->of_node, NULL,
+ "qcom,gpu-pwrlevels");
+ if (!node) {
+ dev_err(dev, "Could not find the GPU powerlevels\n");
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int val;
+
+ ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
+ if (ret)
+ continue;
+
+ /*
+ * Skip the intentionally bogus clock value found at the bottom
+ * of most legacy frequency tables
+ */
+ if (val != 27000000)
+ dev_pm_opp_add(dev, val, 0);
+ }
+
+ return 0;
+}
+
+static int adreno_get_pwrlevels(struct device *dev,
+ struct msm_gpu *gpu)
+{
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->fast_rate = 0;
+
+ /* You down with OPP? */
+ if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
+ ret = adreno_get_legacy_pwrlevels(dev);
+ else {
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret)
+ dev_err(dev, "Unable to set the OPP table\n");
+ }
+
+ if (!ret) {
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (!IS_ERR(opp)) {
+ gpu->fast_rate = freq;
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ if (!gpu->fast_rate) {
+ dev_warn(dev,
+ "Could not find a clock rate. Using a reasonable default\n");
+ /* Pick a suitably safe clock speed for any target */
+ gpu->fast_rate = 200000000;
+ }
+
+ DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
@@ -479,15 +555,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
-
- DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
- gpu->fast_rate, gpu->bus_freq);
-
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
adreno_gpu_config.irqname = "kgsl_3d0_irq";
@@ -496,6 +563,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.nr_rings = nr_rings;
+ adreno_get_pwrlevels(&pdev->dev, gpu);
+
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -506,8 +575,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- release_firmware(adreno_gpu->pm4);
- release_firmware(adreno_gpu->pfp);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 28e3de6..d6b0e7b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -48,6 +48,13 @@ enum adreno_regs {
REG_ADRENO_REGISTER_MAX,
};
+enum {
+ ADRENO_FW_PM4 = 0,
+ ADRENO_FW_PFP = 1,
+ ADRENO_FW_GPMU = 2,
+ ADRENO_FW_MAX,
+};
+
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
@@ -72,8 +79,7 @@ struct adreno_info {
struct adreno_rev rev;
uint32_t revn;
const char *name;
- const char *pm4fw, *pfpfw;
- const char *gpmufw;
+ const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
@@ -115,7 +121,7 @@ struct adreno_gpu {
} fwloc;
/* firmware: */
- const struct firmware *pm4, *pfp;
+ const struct firmware *fw[ADRENO_FW_MAX];
/*
* Register offsets are different between some GPUs.
@@ -129,10 +135,6 @@ struct adreno_gpu {
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -204,6 +206,8 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova);
int adreno_hw_init(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea3..576cea3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 14bd3bd..6e5e1aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -129,7 +129,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp4_crtc_destroy(struct drm_crtc *crtc)
@@ -382,7 +382,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
- drm_gem_object_reference(next_bo);
+ drm_gem_object_get(next_bo);
msm_gem_get_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
@@ -467,7 +467,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail:
- drm_gem_object_unreference_unlocked(cursor_bo);
+ drm_gem_object_put_unlocked(cursor_bo);
return ret;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 6a1ebda..6a1ebda 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587..ba8e587 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index b764d7f..b764d7f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index f7f0874..4b646bf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -164,7 +164,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 940de51..0c13f86 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -22,7 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp4.xml.h"
struct device_node;
@@ -234,10 +234,6 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
#endif
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-static inline int match_dev_name(struct device *dev, void *data)
-{
- return !strcmp(dev_name(dev), data);
-}
/* bus scaling data is associated with extra pointless platform devices,
* "dtv", etc.. this is a bit of a hack, but we need a way for encoders
* to find their pdata to make the bus-scaling stuff work.
@@ -245,8 +241,7 @@ static inline int match_dev_name(struct device *dev, void *data)
static inline void *mdp4_find_pdata(const char *devname)
{
struct device *dev;
- dev = bus_find_device(&platform_bus_type, NULL,
- (void *)devname, match_dev_name);
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
return dev ? dev->platform_data : NULL;
}
#endif
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a64592..4a64592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86..e3b1c86 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ce42459..ce42459 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a1ad3a..7a1ad3a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e0..d9c10e0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d..824067d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 75910d0..75910d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 1abc7f5c..d6f79dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
pingpong_tearcheck_disable(encoder);
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
bs_set(mdp5_cmd_enc, 0);
@@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (pingpong_tearcheck_enable(encoder))
return;
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e414850..9893e43 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ bool start = !mdp5_cstate->defer_start;
+
+ mdp5_cstate->defer_start = false;
DBG("%s: flush=%08x", crtc->name, flush_mask);
- return mdp5_ctl_commit(ctl, pipeline, flush_mask);
+
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
}
/*
@@ -170,7 +174,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
@@ -947,12 +951,17 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p,
if (WARN_ON(!pipeline))
return;
+ if (mdp5_cstate->ctl)
+ drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
+
drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
pipeline->mixer->name : "(null)");
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
pipeline->r_mixer->name : "(null)");
+
+ drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
static void mdp5_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 439e0a3..f93d568 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -41,7 +41,9 @@ struct mdp5_ctl {
u32 status;
bool encoder_enabled;
- uint32_t start_mask;
+
+ /* pending flush_mask bits */
+ u32 flush_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
@@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
- mdp_ctl_flush_mask_encoder(intf);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl,
{
struct mdp5_interface *intf = pipeline->intf;
- if (!ctl->encoder_enabled || ctl->start_mask != 0)
+ if (!ctl->encoder_enabled)
return false;
switch (intf->type) {
@@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-static void refill_start_mask(struct mdp5_ctl *ctl,
- struct mdp5_pipeline *pipeline)
-{
- struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
-
- /*
- * Writeback encoder needs to program & flush
- * address registers for each page flip..
- */
- if (intf->type == INTF_WB)
- ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
-}
-
/**
* mdp5_ctl_set_encoder_state() - set the encoder state
*
@@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return 0;
@@ -494,6 +468,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
+ case 3: return MDP5_CTL_FLUSH_LM3;
+ case 4: return MDP5_CTL_FLUSH_LM4;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
@@ -557,17 +533,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
*/
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline,
- u32 flush_mask)
+ u32 flush_mask, bool start)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
- ctl->start_mask &= ~flush_mask;
-
- VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
- ctl->start_mask, ctl->pending_ctl_trigger);
+ VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
@@ -582,6 +555,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
fix_for_single_flush(ctl, &flush_mask, &flush_id);
+ if (!start) {
+ ctl->flush_mask |= flush_mask;
+ return curr_ctl_flush_mask;
+ } else {
+ flush_mask |= ctl->flush_mask;
+ ctl->flush_mask = 0;
+ }
+
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
@@ -590,7 +571,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return curr_ctl_flush_mask;
@@ -711,6 +691,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
+ unsigned dsi_cnt = 0;
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -760,7 +741,10 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
*/
- if (rev >= 3) {
+ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
+ if (hw_cfg->intf.connect[c] == INTF_DSI)
+ dsi_cnt++;
+ if ((rev >= 3) && (dsi_cnt > 1)) {
ctl_mgr->single_flush_supported = true;
/* Reserve CTL0/1 for INTF1/2 */
ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index b631203..403b0db 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
- u32 flush_mask);
+ u32 flush_mask, bool start);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 36ad3cb..9af94e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
@@ -319,6 +319,7 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 280e368..280e368 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3e9bba4..6d8e3a9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- aspace = NULL;;
+ aspace = NULL;
}
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 9b3fe01..425a03d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -20,7 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_pipe.h"
@@ -133,6 +133,14 @@ struct mdp5_crtc_state {
u32 pp_done_irqmask;
bool cmd_mode;
+
+ /* should we not write CTL[n].START register on flush? If the
+ * encoder has changed this is set to true, since encoder->enable()
+ * is called after crtc state is committed, but we only want to
+ * write the CTL[n].START register once. This lets us defer
+ * writing CTL[n].START until encoder->enable()
+ */
+ bool defer_start;
};
#define to_mdp5_crtc_state(x) \
container_of(x, struct mdp5_crtc_state, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7..f2a0db7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 8a00991..8a00991 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 9be94f5..9be94f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ff52c49..ff52c49 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index bb2b0ac..bb2b0ac 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index be50445..a9f31da 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -286,7 +286,6 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
uint32_t max_width, max_height;
bool out_of_bounds = false;
uint32_t caps = 0;
- struct drm_rect clip;
int min_scale, max_scale;
int ret;
@@ -320,15 +319,12 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
return -ERANGE;
}
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
- ret = drm_plane_helper_check_state(state, &clip, min_scale,
- max_scale, true, true);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
if (ret)
return ret;
@@ -470,7 +466,6 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_crtc_state *crtc_state;
- struct drm_rect clip;
int min_scale, max_scale;
int ret;
@@ -498,15 +493,12 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
plane->state->fb != state->fb)
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
- ret = drm_plane_helper_check_state(state, &clip, min_scale,
- max_scale, true, true);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
if (ret)
return ret;
@@ -543,7 +535,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
ctl = mdp5_crtc_get_ctl(new_state->crtc);
- mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
}
*to_mdp5_plane_state(plane->state) =
@@ -964,8 +956,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, src_y;
uint32_t src_w, src_h;
uint32_t src_img_w, src_img_h;
- uint32_t src_x_r;
- int crtc_x_r;
int ret;
nplanes = fb->format->num_planes;
@@ -1010,9 +1000,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
crtc_w /= 2;
src_w /= 2;
src_img_w /= 2;
-
- crtc_x_r = crtc_x + crtc_w;
- src_x_r = src_x + src_w;
}
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
@@ -1052,9 +1039,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (right_hwpipe)
mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
config, hdecm, vdecm, hflip, vflip,
- crtc_x_r, crtc_y, crtc_w, crtc_h,
+ crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
src_img_w, src_img_h,
- src_x_r, src_y, src_w, src_h);
+ src_x + src_w, src_y, src_w, src_h);
plane->fb = fb;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index ae4983d..ae4983d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index b41d044..b41d044 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c40..1494c40 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4..b4a8aa4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
index 6428730..6428730 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487..1185487 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 98742d7..b744bcc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -192,13 +192,14 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv;
struct drm_bridge *ext_bridge;
int ret;
- if (WARN_ON(!encoder))
+ if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
return -EINVAL;
+ priv = dev->dev_private;
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
@@ -245,19 +246,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
fail:
- if (msm_dsi) {
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+ msm_dsi->connector = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 2302046..70d9a9a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -36,6 +36,7 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
+ MSM_DSI_PHY_10NM,
MSM_DSI_PHY_MAX
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 479086c..f6a9471 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1556,5 +1547,175 @@ static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00
#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 65c1dfb..0327bb5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -118,6 +118,24 @@ static const struct msm_dsi_config msm8996_dsi_cfg = {
.num_dsi = 2,
};
+static const char * const dsi_sdm845_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct msm_dsi_config sdm845_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sdm845_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@@ -131,6 +149,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 00a5da2..9cfdcf1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,6 +25,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 0f7324a..7a03a94 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -115,6 +115,7 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_clk_src;
struct clk *pixel_clk_src;
+ struct clk *byte_intf_clk;
u32 byte_clk_rate;
u32 esc_clk_rate;
@@ -214,7 +215,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto exit;
}
- ahb_clk = clk_get(dev, "iface_clk");
+ ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
goto put_gdsc;
@@ -225,7 +226,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_clk;
+ goto put_gdsc;
}
ret = clk_prepare_enable(ahb_clk);
@@ -249,8 +250,6 @@ disable_clks:
disable_gdsc:
regulator_disable(gdsc_reg);
pm_runtime_put_sync(dev);
-put_clk:
- clk_put(ahb_clk);
put_gdsc:
regulator_put(gdsc_reg);
exit:
@@ -379,6 +378,19 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
+ cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+ } else {
+ msm_host->byte_intf_clk = NULL;
+ }
+
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -504,6 +516,16 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_set_rate(msm_host->byte_intf_clk,
+ msm_host->byte_clk_rate / 2);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte intf clk, %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -522,8 +544,19 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto pixel_clk_err;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
+ }
+ }
+
return 0;
+byte_intf_clk_err:
+ clk_disable_unprepare(msm_host->pixel_clk);
pixel_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
byte_clk_err:
@@ -617,6 +650,8 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
clk_disable_unprepare(msm_host->byte_clk);
} else {
clk_disable_unprepare(msm_host->pixel_clk);
@@ -1028,10 +1063,8 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
if (msm_host->tx_gem_obj) {
msm_gem_put_iova(msm_host->tx_gem_obj, 0);
- mutex_lock(&dev->struct_mutex);
- msm_gem_free_object(msm_host->tx_gem_obj);
+ drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
}
if (msm_host->tx_buf)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 8552481..4cb1cb6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -88,6 +88,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
} else if (!other_dsi) {
ret = 0;
@@ -116,6 +118,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
if (ret)
return ret;
@@ -858,7 +862,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
int id = msm_dsi->id;
int ret;
- if (id > DSI_MAX) {
+ if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 790ca28..8e9d5c2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -395,6 +395,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+ { .compatible = "qcom,dsi-phy-10nm",
+ .data = &dsi_phy_10nm_cfgs },
+#endif
{}
};
@@ -503,10 +507,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
- if (!phy->pll)
+ if (IS_ERR_OR_NULL(phy->pll))
dev_info(dev,
- "%s: pll init failed, need separate pll clk driver\n",
- __func__);
+ "%s: pll init failed: %ld, need separate pll clk driver\n",
+ __func__, PTR_ERR(phy->pll));
dsi_phy_disable_resource(phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1733f66..c56268c 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 0000000..0af951a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,251 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ void __iomem *lane_base = phy->lane_base;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+ 0x55);
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+ 0x88);
+ }
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+ i == 4 ? 0x80 : 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+ tx_dctrl[i]);
+ }
+
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+}
+
+static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ /*
+ * TODO: These params need to be computed, they're currently hardcoded
+ * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
+ * default escape clock of 19.2 Mhz.
+ */
+
+ timing->hs_halfbyte_en = 0;
+ timing->clk_zero = 0x1c;
+ timing->clk_prepare = 0x07;
+ timing->clk_trail = 0x07;
+ timing->hs_exit = 0x23;
+ timing->hs_zero = 0x21;
+ timing->hs_prepare = 0x07;
+ timing->hs_trail = 0x07;
+ timing->hs_rqst = 0x05;
+ timing->ta_sure = 0x00;
+ timing->ta_go = 0x03;
+ timing->ta_get = 0x04;
+
+ timing->shared_timings.clk_pre = 0x2d;
+ timing->shared_timings.clk_post = 0x0d;
+
+ return 0;
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* Select MS1 byte-clk */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+ timing->hs_halfbyte_en);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+ timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+ timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+ timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+ timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+ timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+ timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+ timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+ timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+ timing->ta_go | (timing->ta_sure << 3));
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+ timing->ta_get);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+ 0x00);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+ /* power up lanes */
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* TODO: only power up lanes that are used */
+ data |= 0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+ if (ret) {
+ dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v3_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+}
+
+static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+
+ phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+ "DSI_PHY_LANE");
+ if (IS_ERR(phy->lane_base)) {
+ dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+ .type = MSM_DSI_PHY_10NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .init = dsi_10nm_phy_init,
+ },
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index bc289f5..613e206 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -166,6 +166,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_14NM:
pll = msm_dsi_pll_14nm_init(pdev, id);
break;
+ case MSM_DSI_PHY_10NM:
+ pll = msm_dsi_pll_10nm_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
@@ -173,7 +176,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
if (IS_ERR(pll)) {
dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
- return NULL;
+ return pll;
}
pll->type = type;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index f63e7ad..8b32271 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -115,5 +115,14 @@ msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENODEV);
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
new file mode 100644
index 0000000..c4c37a7
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -0,0 +1,822 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize_low;
+ u32 ssc_stepsize_high;
+ u32 ssc_div_per_low;
+ u32 ssc_div_per_high;
+ u32 ssc_adjper_low;
+ u32 ssc_adjper_high;
+ u32 ssc_control;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ bool enable_ssc;
+ bool ssc_center;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct pll_10nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+
+ void __iomem *phy_cmn_mmio;
+ void __iomem *mmio;
+
+ u64 vco_ref_clk_rate;
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ int vco_delay;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+
+ /* private clocks: */
+ struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+ u32 num_hws;
+
+ /* clock-provider: */
+ struct clk_hw_onecell_data *hw_data;
+
+ struct pll_10nm_cached_state cached_state;
+
+ enum msm_dsi_phy_usecase uc;
+ struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = pll->vco_ref_clk_rate;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 fref = pll->vco_ref_clk_rate;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll_freq <= 1900000000UL)
+ regs->pll_prop_gain_rate = 8;
+ else if (pll_freq <= 3000000000UL)
+ regs->pll_prop_gain_rate = 10;
+ else
+ regs->pll_prop_gain_rate = 12;
+ if (pll_freq < 1100000000UL)
+ regs->pll_clock_inverters = 8;
+ else
+ regs->pll_clock_inverters = 0;
+
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = regs->frac_div_start_low |
+ (regs->frac_div_start_mid << 8) |
+ (regs->frac_div_start_high << 16);
+ ssc_step_size = regs->decimal_div_start;
+ ssc_step_size *= (1 << config->frac_bits);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ regs->ssc_div_per_low = ssc_per & 0xFF;
+ regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+ regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+ regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+ regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+ regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+ regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ regs->decimal_div_start, frac, config->frac_bits);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ if (pll->pll_configuration.enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ regs->ssc_stepsize_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ regs->ssc_stepsize_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ regs->ssc_div_per_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ regs->ssc_div_per_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+ regs->ssc_adjper_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+ regs->ssc_adjper_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | regs->ssc_control);
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+ 0xba);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+ 0x4c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+ reg->decimal_div_start);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ reg->frac_div_start_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ reg->frac_div_start_mid);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ reg->frac_div_start_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+ reg->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
+ parent_rate);
+
+ pll_10nm->vco_current_rate = rate;
+ pll_10nm->vco_ref_clk_rate = parent_rate;
+
+ dsi_pll_setup_config(pll_10nm);
+
+ dsi_pll_calc_dec_frac(pll_10nm);
+
+ dsi_pll_calc_ssc(pll_10nm);
+
+ dsi_pll_commit(pll_10nm);
+
+ dsi_pll_config_hzindep_reg(pll_10nm);
+
+ dsi_pll_ssc_commit(pll_10nm);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->mmio +
+ REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data | BIT(5));
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+ /* Start PLL */
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(pll_10nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ goto error;
+ }
+
+ pll->pll_on = true;
+
+ dsi_pll_enable_global_clk(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_global_clk(pll_10nm->slave);
+
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+ 0x01);
+ if (pll_10nm->slave)
+ pll_write(pll_10nm->slave->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_10nm);
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_10nm);
+ if (pll_10nm->slave) {
+ dsi_pll_disable_global_clk(pll_10nm->slave);
+ dsi_pll_disable_sub(pll_10nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->mmio;
+ u64 ref_clk = pll_10nm->vco_ref_clk_rate;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ */
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_10nm->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_10nm_vco_set_rate,
+ .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+ .prepare = dsi_pll_10nm_vco_prepare,
+ .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = pll_read(pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 val;
+
+ val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ return 0;
+}
+
+static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->phy_cmn_mmio;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ switch (uc) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ pll_10nm->uc = uc;
+
+ return 0;
+}
+
+static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+ if (pixel_clk_provider)
+ *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+ return 0;
+}
+
+static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+{
+ char clk_name[32], parent[32], vco_name[32];
+ char parent2[32], parent3[32], parent4[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_10nm_vco,
+ };
+ struct device *dev = &pll_10nm->pdev->dev;
+ struct clk_hw **hws = pll_10nm->hws;
+ struct clk_hw_onecell_data *hw_data;
+ struct clk_hw *hw;
+ int num = 0;
+ int ret;
+
+ DBG("DSI%d", pll_10nm->id);
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+ NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
+ pll_10nm->base.clk_hw.init = &vco_init;
+
+ ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
+ if (ret)
+ return ret;
+
+ hws[num++] = &pll_10nm->base.clk_hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
+
+ hw = clk_hw_register_divider(dev, clk_name,
+ parent, CLK_SET_RATE_PARENT,
+ pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT,
+ pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 2);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_mux(dev, clk_name,
+ (const char *[]){
+ parent, parent2, parent3, parent4
+ }, 4, 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+ pll_10nm->num_hws = num;
+
+ hw_data->num = NUM_PROVIDED_CLKS;
+ pll_10nm->hw_data = hw_data;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ pll_10nm->hw_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ struct dsi_pll_10nm *pll_10nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+ if (!pll_10nm)
+ return ERR_PTR(-ENOMEM);
+
+ DBG("DSI PLL%d", id);
+
+ pll_10nm->pdev = pdev;
+ pll_10nm->id = id;
+ pll_10nm_list[id] = pll_10nm;
+
+ pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
+ dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
+ dev_err(&pdev->dev, "failed to map PLL base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = &pll_10nm->base;
+ pll->min_rate = 1000000000UL;
+ pll->max_rate = 3500000000UL;
+ pll->get_provider = dsi_pll_10nm_get_provider;
+ pll->destroy = dsi_pll_10nm_destroy;
+ pll->save_state = dsi_pll_10nm_save_state;
+ pll->restore_state = dsi_pll_10nm_restore_state;
+ pll->set_usecase = dsi_pll_10nm_set_usecase;
+
+ pll_10nm->vco_delay = 1;
+
+ ret = pll_10nm_register(pll_10nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_pll_save_state(pll);
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index fe15aa6..71fe60e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
val &= div_mask(width);
return divider_recalc_rate(hw, parent_rate, val, NULL,
- postdiv->flags);
+ postdiv->flags, width);
}
static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 6e76797..3656155 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -769,7 +769,7 @@ static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctr
if (rc) {
pr_err("%s: wait key and an ready failed\n", __func__);
return rc;
- };
+ }
/* Read BCAPS and send to HDCP engine */
rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1855182c7..ba74cb4 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -161,8 +161,11 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- if (priv->kms->funcs->debugfs_init)
+ if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+ if (ret)
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0a3ea30..30cd514 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,16 +37,9 @@
#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
-static void msm_fb_output_poll_changed(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fb_helper_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
- .output_poll_changed = msm_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc,
@@ -551,13 +544,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
context_close(ctx);
}
-static void msm_lastclose(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-}
-
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -674,7 +660,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -692,7 +678,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = msm_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -732,7 +718,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
args->offset = msm_gem_mmap_offset(obj);
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -797,7 +783,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
ret = 0;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -866,7 +852,7 @@ static struct drm_driver msm_driver = {
DRIVER_MODESET,
.open = msm_open,
.postclose = msm_postclose,
- .lastclose = msm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c646843..48ed5b9 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -51,7 +51,6 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
@@ -303,7 +302,8 @@ int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
-static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index fc175e7..0e0c872 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -53,7 +53,7 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_put_unlocked(bo);
}
kfree(msm_fb);
@@ -160,7 +160,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
out_unref:
for (i = 0; i < n; i++)
- drm_gem_object_unreference_unlocked(bos[i]);
+ drm_gem_object_put_unlocked(bos[i]);
return ERR_PTR(ret);
}
@@ -274,7 +274,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_put_unlocked(bo);
return ERR_CAST(fb);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 1aa6a4c..b9fe059 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -37,8 +37,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx);
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct msm_fence_context *fctx,
- struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 81fe6d6..9519647 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -93,14 +93,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
+ void *ptr = ERR_CAST(msm_obj->sgt);
+
dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_CAST(msm_obj->sgt);
+ msm_obj->sgt = NULL;
+ return ptr;
}
- msm_obj->pages = p;
-
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
@@ -135,7 +138,10 @@ static void put_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(msm_obj->sgt);
+
+ if (msm_obj->sgt)
+ sg_free_table(msm_obj->sgt);
+
kfree(msm_obj->sgt);
if (use_pages(obj))
@@ -464,7 +470,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = msm_gem_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
fail:
return ret;
@@ -792,6 +798,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
+/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -848,7 +855,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -968,7 +975,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1028,7 +1035,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1046,7 +1053,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
}
@@ -1054,7 +1061,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_CAST(vaddr);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9320e18..c5d9bd3 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -146,6 +146,7 @@ struct msm_gem_submit {
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
+ bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b8dc8f9..7bd83e0 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -430,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ if (args->flags & MSM_SUBMIT_SUDO) {
+ if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
+ !capable(CAP_SYS_RAWIO))
+ return -EINVAL;
+ }
+
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
@@ -471,6 +477,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d34e331..ffbec22 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -96,6 +96,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
+ u64 size = domain->geometry.aperture_end -
+ domain->geometry.aperture_start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -106,7 +108,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
aspace->mmu = msm_iommu_new(dev, domain);
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+ size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 2322014..1c09acf 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -21,42 +21,90 @@
#include "msm_fence.h"
#include <linux/string_helpers.h>
+#include <linux/pm_opp.h>
+#include <linux/devfreq.h>
/*
* Power Management:
*/
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-static void bs_init(struct msm_gpu *gpu)
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
{
- if (gpu->bus_scale_table) {
- gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
- DBG("bus scale client: %08x", gpu->bsc);
- }
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ clk_set_rate(gpu->core_clk, *freq);
+ dev_pm_opp_put(opp);
+
+ return 0;
}
-static void bs_fini(struct msm_gpu *gpu)
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
{
- if (gpu->bsc) {
- msm_bus_scale_unregister_client(gpu->bsc);
- gpu->bsc = 0;
- }
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ u64 cycles;
+ u32 freq = ((u32) status->current_frequency) / 1000000;
+ ktime_t time;
+
+ status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
+ gpu->funcs->gpu_busy(gpu, &cycles);
+
+ status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+
+ gpu->devfreq.busy_cycles = cycles;
+
+ time = ktime_get();
+ status->total_time = ktime_us_delta(time, gpu->devfreq.time);
+ gpu->devfreq.time = time;
+
+ return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+
+ return 0;
}
-static void bs_set(struct msm_gpu *gpu, int idx)
+static struct devfreq_dev_profile msm_devfreq_profile = {
+ .polling_ms = 10,
+ .target = msm_devfreq_target,
+ .get_dev_status = msm_devfreq_get_dev_status,
+ .get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+static void msm_devfreq_init(struct msm_gpu *gpu)
{
- if (gpu->bsc) {
- DBG("set bus scaling: %d", idx);
- msm_bus_scale_client_update_request(gpu->bsc, idx);
+ /* We need target support to do devfreq */
+ if (!gpu->funcs->gpu_busy)
+ return;
+
+ msm_devfreq_profile.initial_freq = gpu->fast_rate;
+
+ /*
+ * Don't set the freq_table or max_state and let devfreq build the table
+ * from OPP
+ */
+
+ gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ &msm_devfreq_profile, "simple_ondemand", NULL);
+
+ if (IS_ERR(gpu->devfreq.devfreq)) {
+ dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ gpu->devfreq.devfreq = NULL;
}
}
-#else
-static void bs_init(struct msm_gpu *gpu) {}
-static void bs_fini(struct msm_gpu *gpu) {}
-static void bs_set(struct msm_gpu *gpu, int idx) {}
-#endif
static int enable_pwrrail(struct msm_gpu *gpu)
{
@@ -143,8 +191,6 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
return 0;
}
@@ -152,8 +198,6 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, 0);
return 0;
}
@@ -175,6 +219,13 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (gpu->devfreq.devfreq) {
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+ }
+
gpu->needs_hw_init = true;
return 0;
@@ -186,6 +237,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
+ if (gpu->devfreq.devfreq)
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
ret = disable_axi(gpu);
if (ret)
return ret;
@@ -294,6 +348,8 @@ static void recover_worker(struct work_struct *work)
msm_rd_dump_submit(priv->hangrd, submit,
"offending task: %s (%s)", task->comm, cmd);
+
+ kfree(cmd);
} else {
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
@@ -306,7 +362,7 @@ static void recover_worker(struct work_struct *work)
* needs to happen after msm_rd_dump_submit() to ensure that the
* bo's referenced by the offending submit are still around.
*/
- for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
uint32_t fence = ring->memptrs->fence;
@@ -496,7 +552,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->aspace);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
pm_runtime_mark_last_busy(&gpu->pdev->dev);
@@ -578,7 +634,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
/* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_reference(&msm_obj->base);
+ drm_gem_object_get(&msm_obj->base);
msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
@@ -626,8 +682,10 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
GFP_KERNEL);
- if (!gpu->grp_clks)
+ if (!gpu->grp_clks) {
+ gpu->nr_clocks = 0;
return -ENOMEM;
+ }
of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
gpu->grp_clks[i] = get_clock(dev, name);
@@ -753,7 +811,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->pdev = pdev;
platform_set_drvdata(pdev, gpu);
- bs_init(gpu);
+ msm_devfreq_init(gpu);
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
config->va_start, config->va_end);
@@ -809,7 +867,7 @@ fail:
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
platform_set_drvdata(pdev, NULL);
@@ -824,8 +882,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
WARN_ON(!list_empty(&gpu->active_list));
- bs_fini(gpu);
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
@@ -834,7 +890,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index e113d64..b824117 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -65,7 +65,10 @@ struct msm_gpu_funcs {
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ /* for generation specific debugfs: */
+ int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
+ int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
};
struct msm_gpu {
@@ -108,12 +111,7 @@ struct msm_gpu {
struct clk **grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
- uint32_t fast_rate, bus_freq;
-
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
- uint32_t bsc;
-#endif
+ uint32_t fast_rate;
/* Hang and Inactivity Detection:
*/
@@ -125,6 +123,12 @@ struct msm_gpu {
struct work_struct recover_work;
struct drm_gem_object *memptrs_bo;
+
+ struct {
+ struct devfreq *devfreq;
+ u64 busy_cycles;
+ ktime_t time;
+ } devfreq;
};
/* It turns out that all targets use the same ringbuffer size */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6ca98da..6f5295b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -76,7 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
- drm_gem_object_unreference_unlocked(ring->bo);
+ drm_gem_object_put_unlocked(ring->bo);
}
kfree(ring);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 1207ffe..5cae8db 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -131,11 +131,37 @@ static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
+static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ /* Clear and enable VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
+ mxsfb_disable_axi_clk(mxsfb);
+
+ return 0;
+}
+
+static void mxsfb_pipe_disable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+
+ /* Disable and clear VBLANK IRQ */
+ mxsfb_enable_axi_clk(mxsfb);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ mxsfb_disable_axi_clk(mxsfb);
+}
+
static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
.enable = mxsfb_pipe_enable,
.disable = mxsfb_pipe_disable,
.update = mxsfb_pipe_update,
.prepare_fb = mxsfb_pipe_prepare_fb,
+ .enable_vblank = mxsfb_pipe_enable_vblank,
+ .disable_vblank = mxsfb_pipe_disable_vblank,
};
static int mxsfb_load(struct drm_device *drm, unsigned long flags)
@@ -274,33 +300,11 @@ static void mxsfb_lastclose(struct drm_device *drm)
drm_fbdev_cma_restore_mode(mxsfb->fbdev);
}
-static int mxsfb_enable_vblank(struct drm_device *drm, unsigned int crtc)
-{
- struct mxsfb_drm_private *mxsfb = drm->dev_private;
-
- /* Clear and enable VBLANK IRQ */
- mxsfb_enable_axi_clk(mxsfb);
- writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
- mxsfb_disable_axi_clk(mxsfb);
-
- return 0;
-}
-
-static void mxsfb_disable_vblank(struct drm_device *drm, unsigned int crtc)
+static void mxsfb_irq_preinstall(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- /* Disable and clear VBLANK IRQ */
- mxsfb_enable_axi_clk(mxsfb);
- writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- mxsfb_disable_axi_clk(mxsfb);
-}
-
-static void mxsfb_irq_preinstall(struct drm_device *drm)
-{
- mxsfb_disable_vblank(drm, 0);
+ mxsfb_pipe_disable_vblank(&mxsfb->pipe);
}
static irqreturn_t mxsfb_irq_handler(int irq, void *data)
@@ -333,8 +337,6 @@ static struct drm_driver mxsfb_driver = {
.irq_handler = mxsfb_irq_handler,
.irq_preinstall = mxsfb_irq_preinstall,
.irq_uninstall = mxsfb_irq_preinstall,
- .enable_vblank = mxsfb_enable_vblank,
- .disable_vblank = mxsfb_disable_vblank,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 90075b6..c79160c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -213,8 +213,10 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
(dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
+ int domain = pci_domain_nr(dev->pdev->bus);
- pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
+ pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
+ 0x7c, &type);
sim_data.memory_type = (type >> 12) & 1;
sim_data.memory_width = 64;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index b985990..0c9bdf0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -216,12 +216,15 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
struct nvkm_pll_vals pllvals;
int ret;
+ int domain;
+
+ domain = pci_domain_nr(dev->pdev->bus);
if (plltype == PLL_MEMORY &&
(dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
-
- pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
+ 0x6c, &mpllP);
mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
@@ -232,7 +235,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
(dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
- pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+ pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
+ 0x4c, &clock);
return clock / 1000;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index c8c2333..df4358e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -46,7 +46,6 @@ struct nouveau_plane {
struct drm_property *brightness;
struct drm_property *hue;
struct drm_property *saturation;
- struct drm_property *iturbt_709;
} props;
int colorkey;
@@ -54,7 +53,7 @@ struct nouveau_plane {
int brightness;
int hue;
int saturation;
- int iturbt_709;
+ enum drm_color_encoding color_encoding;
void (*set_params)(struct nouveau_plane *);
};
@@ -166,7 +165,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (fb->format->format == DRM_FORMAT_NV12 ||
fb->format->format == DRM_FORMAT_NV21)
format |= NV_PVIDEO_FORMAT_PLANAR;
- if (nv_plane->iturbt_709)
+ if (nv_plane->color_encoding == DRM_COLOR_YCBCR_BT709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
@@ -229,7 +228,7 @@ nv10_set_params(struct nouveau_plane *plane)
nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
if (plane->cur) {
- if (plane->iturbt_709)
+ if (plane->color_encoding == DRM_COLOR_YCBCR_BT709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
@@ -258,8 +257,8 @@ nv_set_property(struct drm_plane *plane,
nv_plane->hue = value;
else if (property == nv_plane->props.saturation)
nv_plane->saturation = value;
- else if (property == nv_plane->props.iturbt_709)
- nv_plane->iturbt_709 = value;
+ else if (property == nv_plane->base.color_encoding_property)
+ nv_plane->color_encoding = value;
else
return -EINVAL;
@@ -313,14 +312,11 @@ nv10_overlay_init(struct drm_device *device)
device, 0, "hue", 0, 359);
plane->props.saturation = drm_property_create_range(
device, 0, "saturation", 0, 8192 - 1);
- plane->props.iturbt_709 = drm_property_create_range(
- device, 0, "iturbt_709", 0, 1);
if (!plane->props.colorkey ||
!plane->props.contrast ||
!plane->props.brightness ||
!plane->props.hue ||
- !plane->props.saturation ||
- !plane->props.iturbt_709)
+ !plane->props.saturation)
goto cleanup;
plane->colorkey = 0;
@@ -343,9 +339,13 @@ nv10_overlay_init(struct drm_device *device)
drm_object_attach_property(&plane->base.base,
plane->props.saturation, plane->saturation);
- plane->iturbt_709 = 0;
- drm_object_attach_property(&plane->base.base,
- plane->props.iturbt_709, plane->iturbt_709);
+ plane->color_encoding = DRM_COLOR_YCBCR_BT601;
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
plane->set_params = nv10_set_params;
nv10_set_params(plane);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index adb78f7..92be0e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index 59f3ba5..b57fe4a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index b1ac47e..9398d9f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -46,6 +46,16 @@ enum nvkm_therm_attr_type {
NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
};
+struct nvkm_therm_clkgate_init {
+ u32 addr;
+ u8 count;
+ u32 data;
+};
+
+struct nvkm_therm_clkgate_pack {
+ const struct nvkm_therm_clkgate_init *init;
+};
+
struct nvkm_therm {
const struct nvkm_therm_func *func;
struct nvkm_subdev subdev;
@@ -85,17 +95,24 @@ struct nvkm_therm {
int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
+
+ bool clkgating_enabled;
};
int nvkm_therm_temp_get(struct nvkm_therm *);
int nvkm_therm_fan_sense(struct nvkm_therm *);
int nvkm_therm_cstate(struct nvkm_therm *, int, int);
+void nvkm_therm_clkgate_init(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+void nvkm_therm_clkgate_enable(struct nvkm_therm *);
+void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 380f340..debbbf0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- int or = nv_encoder->or;
+ int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025;
u32 val;
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- int or = nv_encoder->or;
+ int or = ffs(nv_encoder->dcb->or) - 1;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- int or = nv_encoder->or;
+ int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nvif_object *device = &drm->client.device.object;
- int or = nv_encoder->or;
+ int or = ffs(nv_encoder->dcb->or) - 1;
u32 div, val;
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
return -ENODEV;
}
- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
return 0;
if (drm->client.device.info.chipset <= 0xa0 ||
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
+ INIT_LIST_HEAD(&drm->bl_connectors);
+
if (apple_gmux_present()) {
NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
return 0;
}
- INIT_LIST_HEAD(&drm->bl_connectors);
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ef68741..6f402c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -298,7 +298,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size, sg,
+ align >> PAGE_SHIFT, false, acc_size, sg,
robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -549,10 +549,10 @@ int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
{
+ struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
if (ret)
return ret;
@@ -604,19 +604,17 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
}
static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags, struct page *dummy_read)
+nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
{
#if IS_ENABLED(CONFIG_AGP)
- struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
if (drm->agp.bridge) {
- return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
- page_flags, dummy_read);
+ return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
}
#endif
- return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
+ return nouveau_sgdma_create_ttm(bo, page_flags);
}
static int
@@ -1200,6 +1198,7 @@ static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
+ struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
@@ -1214,11 +1213,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
if (ret)
return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_reg);
+ ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
if (ret)
goto out;
@@ -1226,7 +1225,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
out:
ttm_bo_mem_put(bo, &tmp_reg);
return ret;
@@ -1236,6 +1235,7 @@ static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{
+ struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
@@ -1250,11 +1250,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
if (ret)
goto out;
@@ -1327,8 +1327,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
}
static int
-nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1336,7 +1337,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -1360,22 +1361,25 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (drm->ttm.move) {
if (new_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr,
- no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flipd(bo, evict,
+ ctx->interruptible,
+ ctx->no_wait_gpu, new_reg);
else if (old_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr,
- no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flips(bo, evict,
+ ctx->interruptible,
+ ctx->no_wait_gpu, new_reg);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr,
- no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_m2mf(bo, evict,
+ ctx->interruptible,
+ ctx->no_wait_gpu, new_reg);
if (!ret)
goto out;
}
/* Fallback to software copy. */
- ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1544,7 +1548,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
@@ -1569,17 +1573,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev);
+ return ttm_dma_populate((void *)ttm, dev, ctx);
}
#endif
- r = ttm_pool_populate(ttm);
+ r = ttm_pool_populate(ttm, ctx);
if (r) {
return r;
}
@@ -1669,5 +1673,4 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 7b5cc5c..be8e00b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
+static inline void
+nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
+{
+ if (*pnvbo) {
+ nouveau_bo_unmap(*pnvbo);
+ nouveau_bo_unpin(*pnvbo);
+ nouveau_bo_ref(NULL, pnvbo);
+ }
+}
+
+static inline int
+nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
+ struct nouveau_bo **pnvbo)
+{
+ int ret = nouveau_bo_new(cli, size, align, flags,
+ 0, 0, NULL, NULL, pnvbo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(*pnvbo, flags, true);
+ if (ret == 0) {
+ ret = nouveau_bo_map(*pnvbo);
+ if (ret == 0)
+ return ret;
+ nouveau_bo_unpin(*pnvbo);
+ }
+ nouveau_bo_ref(NULL, pnvbo);
+ }
+ return ret;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 69d6e61..6ed9cb0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -647,8 +653,10 @@ detect_analog:
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index b7a18fb..366acb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -60,7 +60,6 @@ struct nouveau_crtc {
} cursor;
struct {
- struct nouveau_bo *nvbo;
int depth;
} lut;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e7785f..0097134 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <nvif/class.h>
@@ -292,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 56fe261..bbbf353 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -510,36 +510,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
-static void
-nouveau_get_hdmi_dev(struct nouveau_drm *drm)
-{
- struct pci_dev *pdev = drm->dev->pdev;
-
- if (!pdev) {
- NV_DEBUG(drm, "not a PCI device; no HDMI\n");
- drm->hdmi_device = NULL;
- return;
- }
-
- /* subfunction one is a hdmi audio device? */
- drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
- PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
-
- if (!drm->hdmi_device) {
- NV_DEBUG(drm, "hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
- return;
- }
-
- if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
- NV_DEBUG(drm, "possible hdmi device not audio %d\n", drm->hdmi_device->class);
- pci_dev_put(drm->hdmi_device);
- drm->hdmi_device = NULL;
- return;
- }
-}
-
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -567,8 +537,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
- nouveau_get_hdmi_dev(drm);
-
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -654,8 +622,6 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_ttm_fini(drm);
nouveau_vga_fini(drm);
- if (drm->hdmi_device)
- pci_dev_put(drm->hdmi_device);
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
kfree(drm);
@@ -855,7 +821,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
}
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
@@ -890,7 +855,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
@@ -912,15 +876,6 @@ nouveau_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}
- /* if we have a hdmi audio device - make sure it has a driver loaded */
- if (drm->hdmi_device) {
- if (!drm->hdmi_device->driver) {
- DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
- pm_runtime_mark_last_busy(dev);
- return -EBUSY;
- }
- }
-
list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
if (crtc->enabled) {
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 96f6bd8..881b44b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -208,7 +208,6 @@ struct nouveau_drm {
bool have_disp_power_ref;
struct dev_pm_domain vga_pm_domain;
- struct pci_dev *hdmi_device;
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index be7357b..85c1f10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
+static int nouveau_fbcon_bpp;
+module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
+
static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
@@ -413,14 +417,6 @@ out:
return ret;
}
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon)
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
-}
-
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
@@ -496,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_fbdev *fbcon;
- int preferred_bpp;
+ int preferred_bpp = nouveau_fbcon_bpp;
int ret;
if (!dev->mode_config.num_crtc ||
@@ -520,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
- if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
- preferred_bpp = 8;
- else
- if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
- preferred_bpp = 16;
- else
- preferred_bpp = 32;
+ if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ preferred_bpp = 8;
+ else
+ if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
+ preferred_bpp = 16;
+ else
+ preferred_bpp = 32;
+ }
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!drm_drv_uses_atomic_modeset(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e2bca72..a6f192e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -68,8 +68,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
void nouveau_fbcon_accel_restore(struct drm_device *dev);
-void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
-
extern int nouveau_nofbaccel;
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index efc89aa..e72a7e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
- ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
+ ttm_bo_unreserve(&nvbo->bo);
drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 11f6ca8..8ebdc74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -82,11 +82,9 @@ static struct ttm_backend_func nv50_sgdma_backend = {
};
struct ttm_tt *
-nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
- struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -98,7 +96,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
/*
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
* and thus our nouveau_sgdma_destroy() hook, so we don't need
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 96082b6..89929ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -12,9 +12,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
extern const struct ttm_mem_type_manager_func nv04_gart_manager;
-struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
- unsigned long size, u32 page_flags,
- struct page *dummy_read_page);
+struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
+ u32 page_flags);
int nouveau_ttm_init(struct nouveau_drm *drm);
void nouveau_ttm_fini(struct nouveau_drm *drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 52e52a3..3da5a43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -4,6 +4,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
@@ -61,7 +62,7 @@ static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- nouveau_fbcon_output_poll_changed(dev);
+ drm_fb_helper_output_poll_changed(dev);
}
static bool
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 584466e..8bd739c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -137,8 +137,10 @@ struct nv50_head_atom {
} mode;
struct {
+ bool visible;
u32 handle;
u64 offset:40;
+ u8 mode:4;
} lut;
struct {
@@ -192,6 +194,7 @@ struct nv50_head_atom {
union {
struct {
+ bool ilut:1;
bool core:1;
bool curs:1;
};
@@ -200,6 +203,7 @@ struct nv50_head_atom {
union {
struct {
+ bool ilut:1;
bool core:1;
bool curs:1;
bool view:1;
@@ -228,8 +232,6 @@ struct nv50_wndw_atom {
struct drm_plane_state state;
u8 interval;
- struct drm_rect clip;
-
struct {
u32 handle;
u16 offset:12;
@@ -660,6 +662,10 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
+ struct {
+ struct nouveau_bo *nvbo[2];
+ int next;
+ } lut;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
@@ -840,10 +846,6 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
int ret;
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
- asyw->clip.x1 = 0;
- asyw->clip.y1 = 0;
- asyw->clip.x2 = asyh->state.mode.hdisplay;
- asyw->clip.y2 = asyh->state.mode.vdisplay;
asyw->image.w = fb->base.width;
asyw->image.h = fb->base.height;
@@ -1143,10 +1145,10 @@ nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
{
int ret;
- ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
asyh->curs.visible = asyw->state.visible;
if (ret || !asyh->curs.visible)
return ret;
@@ -1432,10 +1434,10 @@ nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
if (!fb->format->depth)
return -EINVAL;
- ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
if (ret)
return ret;
@@ -1793,6 +1795,54 @@ nv50_head_lut_clr(struct nv50_head *head)
}
static void
+nv50_head_lut_load(struct drm_property_blob *blob, int mode,
+ struct nouveau_bo *nvbo)
+{
+ struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+ void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
+ const int size = blob->length / sizeof(*in);
+ int bits, shift, i;
+ u16 zero, r, g, b;
+
+ /* This can't happen.. But it shuts the compiler up. */
+ if (WARN_ON(size != 256))
+ return;
+
+ switch (mode) {
+ case 0: /* LORES. */
+ case 1: /* HIRES. */
+ bits = 11;
+ shift = 3;
+ zero = 0x0000;
+ break;
+ case 7: /* INTERPOLATE_257_UNITY_RANGE. */
+ bits = 14;
+ shift = 0;
+ zero = 0x6000;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = 0; i < size; i++) {
+ r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
+ g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
+ b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
+ writew(r, lut + (i * 0x08) + 0);
+ writew(g, lut + (i * 0x08) + 2);
+ writew(b, lut + (i * 0x08) + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(r, lut + (i * 0x08) + 0);
+ writew(g, lut + (i * 0x08) + 2);
+ writew(b, lut + (i * 0x08) + 4);
+}
+
+static void
nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
@@ -1800,18 +1850,18 @@ nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if ((push = evo_wait(core, 7))) {
if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0xc0000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 30);
evo_data(push, asyh->lut.offset >> 8);
} else
if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0xc0000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 30);
evo_data(push, asyh->lut.offset >> 8);
evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, asyh->lut.handle);
} else {
evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
- evo_data(push, 0x83000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 24);
evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
@@ -1894,7 +1944,7 @@ nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
static void
nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
{
- if (asyh->clr.core && (!asyh->set.core || y))
+ if (asyh->clr.ilut && (!asyh->set.ilut || y))
nv50_head_lut_clr(head);
if (asyh->clr.core && (!asyh->set.core || y))
nv50_head_core_clr(head);
@@ -1907,7 +1957,15 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.view ) nv50_head_view (head, asyh);
if (asyh->set.mode ) nv50_head_mode (head, asyh);
- if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.ilut ) {
+ struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
+ struct drm_property_blob *blob = asyh->state.gamma_lut;
+ if (blob)
+ nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
+ asyh->lut.offset = nvbo->bo.offset;
+ head->lut.next ^= 1;
+ nv50_head_lut_set(head, asyh);
+ }
if (asyh->set.core ) nv50_head_core_set(head, asyh);
if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
if (asyh->set.base ) nv50_head_base (head, asyh);
@@ -2042,6 +2100,37 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
}
static void
+nv50_head_atomic_check_lut(struct nv50_head *head,
+ struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh)
+{
+ struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+
+ /* An I8 surface without an input LUT makes no sense, and
+ * EVO will throw an error if you try.
+ *
+ * Legacy clients actually cause this due to the order in
+ * which they call ioctls, so we will enable the LUT with
+ * whatever contents the buffer already contains to avoid
+ * triggering the error check.
+ */
+ if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
+ asyh->lut.handle = 0;
+ asyh->clr.ilut = armh->lut.visible;
+ return;
+ }
+
+ if (disp->disp->oclass < GF110_DISP) {
+ asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
+ asyh->set.ilut = true;
+ } else {
+ asyh->lut.mode = 7;
+ asyh->set.ilut = asyh->state.color_mgmt_changed;
+ }
+ asyh->lut.handle = disp->mast.base.vram.handle;
+}
+
+static void
nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
@@ -2126,6 +2215,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
if (asyh->state.mode_changed)
nv50_head_atomic_check_mode(head, asyh);
+ if (asyh->state.color_mgmt_changed ||
+ asyh->base.cpp != armh->base.cpp)
+ nv50_head_atomic_check_lut(head, armh, asyh);
+ asyh->lut.visible = asyh->lut.handle != 0;
+
if (asyc) {
if (asyc->set.scaler)
nv50_head_atomic_check_view(armh, asyh, asyc);
@@ -2141,7 +2235,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->core.w = asyh->base.w;
asyh->core.h = asyh->base.h;
} else
- if ((asyh->core.visible = asyh->curs.visible)) {
+ if ((asyh->core.visible = asyh->curs.visible) ||
+ (asyh->core.visible = asyh->lut.visible)) {
/*XXX: We need to either find some way of having the
* primary base layer appear black, while still
* being able to display the other layers, or we
@@ -2159,11 +2254,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->core.layout = 1;
asyh->core.block = 0;
asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
- asyh->lut.handle = disp->mast.base.vram.handle;
- asyh->lut.offset = head->base.lut.nvbo->bo.offset;
asyh->set.base = armh->base.cpp != asyh->base.cpp;
asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
} else {
+ asyh->lut.visible = false;
asyh->core.visible = false;
asyh->curs.visible = false;
asyh->base.cpp = 0;
@@ -2187,8 +2281,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->clr.curs = true;
}
} else {
+ asyh->clr.ilut = armh->lut.visible;
asyh->clr.core = armh->core.visible;
asyh->clr.curs = armh->curs.visible;
+ asyh->set.ilut = asyh->lut.visible;
asyh->set.core = asyh->core.visible;
asyh->set.curs = asyh->curs.visible;
}
@@ -2198,47 +2294,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
return 0;
}
-static void
-nv50_head_lut_load(struct drm_crtc *crtc)
-{
- struct nv50_disp *disp = nv50_disp(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- u16 *r, *g, *b;
- int i;
-
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
-
- for (i = 0; i < 256; i++) {
- if (disp->disp->oclass < GF110_DISP) {
- writew((*r++ >> 2) + 0x0000, lut + (i * 0x08) + 0);
- writew((*g++ >> 2) + 0x0000, lut + (i * 0x08) + 2);
- writew((*b++ >> 2) + 0x0000, lut + (i * 0x08) + 4);
- } else {
- /* 0x6000 interferes with the 14-bit color??? */
- writew((*r++ >> 2) + 0x6000, lut + (i * 0x20) + 0);
- writew((*g++ >> 2) + 0x6000, lut + (i * 0x20) + 2);
- writew((*b++ >> 2) + 0x6000, lut + (i * 0x20) + 4);
- }
- }
-}
-
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
};
-static int
-nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- nv50_head_lut_load(crtc);
- return 0;
-}
-
static void
nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
@@ -2294,17 +2354,15 @@ nv50_head_reset(struct drm_crtc *crtc)
static void
nv50_head_destroy(struct drm_crtc *crtc)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+ int i;
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- if (nv_crtc->lut.nvbo)
- nouveau_bo_unpin(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
+ nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
drm_crtc_cleanup(crtc);
kfree(crtc);
@@ -2313,7 +2371,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
- .gamma_set = nv50_head_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -2331,7 +2389,7 @@ nv50_head_create(struct drm_device *dev, int index)
struct nv50_base *base;
struct nv50_curs *curs;
struct drm_crtc *crtc;
- int ret;
+ int ret, i;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
@@ -2353,22 +2411,14 @@ nv50_head_create(struct drm_device *dev, int index)
drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
- ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(head->base.lut.nvbo);
- if (ret)
- nouveau_bo_unpin(head->base.lut.nvbo);
- }
+ for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
+ ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
+ TTM_PL_FLAG_VRAM,
+ &head->lut.nvbo[i]);
if (ret)
- nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+ goto out;
}
- if (ret)
- goto out;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -2688,7 +2738,6 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(disp->disp, 0, &args,
@@ -2755,7 +2804,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
}
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi, mode);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
+ &nv_connector->base, mode);
if (!ret) {
/* We have a Vendor InfoFrame, populate it to the display */
args.pwr.vendor_infoframe_length
@@ -3064,10 +3114,8 @@ nv50_mstc_get_modes(struct drm_connector *connector)
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
- if (mstc->edid) {
+ if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
- drm_edid_to_eld(&mstc->connector, mstc->edid);
- }
if (!mstc->connector.display_info.bpc)
mstc->connector.display_info.bpc = 8;
@@ -4311,7 +4359,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
@@ -4350,7 +4398,6 @@ nv50_display_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_plane *plane;
- struct drm_crtc *crtc;
u32 *push;
push = evo_wait(nv50_mast(dev), 32);
@@ -4369,10 +4416,6 @@ nv50_display_init(struct drm_device *dev)
}
}
- drm_for_each_crtc(crtc, dev) {
- nv50_head_lut_load(crtc);
- }
-
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
@@ -4426,6 +4469,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
+ dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
if (nouveau_atomic)
dev->driver->driver_features |= DRIVER_ATOMIC;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 08e77cd..05cd674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -28,6 +28,7 @@
#include <core/option.h>
#include <subdev/bios.h>
+#include <subdev/therm.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -1682,7 +1683,7 @@ nve4_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1721,7 +1722,7 @@ nve6_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1760,7 +1761,7 @@ nve7_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1811,7 +1812,7 @@ nvf0_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1824,7 +1825,7 @@ nvf0_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1849,7 +1850,7 @@ nvf1_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1862,7 +1863,7 @@ nvf1_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1887,7 +1888,7 @@ nv106_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1900,7 +1901,7 @@ nv106_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1925,7 +1926,7 @@ nv108_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1938,7 +1939,7 @@ nv108_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2345,6 +2346,7 @@ nv138_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
+ .secboot = gp108_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2356,6 +2358,10 @@ nv138_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
+ .gr = gp107_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
};
static const struct nvkm_device_chip
@@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
}
}
+ nvkm_therm_clkgate_fini(device->therm, suspend);
if (device->func->fini)
device->func->fini(device, suspend);
@@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device)
}
nvkm_acpi_init(device);
+ nvkm_therm_clkgate_enable(device->therm);
time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "init completed in %lldus\n", time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index d7c2adb..c8ec3fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -137,6 +137,7 @@ struct gf100_gr_func {
int (*rops)(struct gf100_gr *);
int ppc_nr;
const struct gf100_grctx_func *grctx;
+ const struct nvkm_therm_clkgate_pack *clkgate_pack;
struct nvkm_sclass sclass[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 5e82f94..1b52fcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
+#include "gk104.h"
#include "ctxgf100.h"
#include <nvif/class.h>
@@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = {
{}
};
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_main_0[] = {
+ { 0x4041f0, 1, 0x00004046 },
+ { 0x409890, 1, 0x00000045 },
+ { 0x4098b0, 1, 0x0000007f },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rstr2d_0[] = {
+ { 0x4078c0, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_unk_0[] = {
+ { 0x406000, 1, 0x00004044 },
+ { 0x405860, 1, 0x00004042 },
+ { 0x40590c, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gcc_0[] = {
+ { 0x408040, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_sked_0[] = {
+ { 0x407000, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_unk_1[] = {
+ { 0x405bf0, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ctxctl_0[] = {
+ { 0x41a890, 1, 0x00000042 },
+ { 0x41a8b0, 1, 0x0000007f },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_0[] = {
+ { 0x418500, 1, 0x00004042 },
+ { 0x418608, 1, 0x00004042 },
+ { 0x418688, 1, 0x00004042 },
+ { 0x418718, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_esetup_0[] = {
+ { 0x418828, 1, 0x00000044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tpbus_0[] = {
+ { 0x418bbc, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_zcull_0[] = {
+ { 0x418970, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tpconf_0[] = {
+ { 0x418c70, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_1[] = {
+ { 0x418cf0, 1, 0x00004042 },
+ { 0x418d70, 1, 0x00004042 },
+ { 0x418f0c, 1, 0x00004042 },
+ { 0x418e0c, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_gcc_0[] = {
+ { 0x419020, 1, 0x00004042 },
+ { 0x419038, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ffb_0[] = {
+ { 0x418898, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tex_0[] = {
+ { 0x419a40, 9, 0x00004042 },
+ { 0x419acc, 1, 0x00004047 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_poly_0[] = {
+ { 0x419868, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_l1c_0[] = {
+ { 0x419ccc, 3, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_2[] = {
+ { 0x419c70, 1, 0x00004045 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_mp_0[] = {
+ { 0x419fd0, 1, 0x00004043 },
+ { 0x419fd8, 1, 0x00004049 },
+ { 0x419fe0, 2, 0x00004042 },
+ { 0x419ff0, 1, 0x00004046 },
+ { 0x419ff8, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ppc_0[] = {
+ { 0x41be28, 1, 0x00000042 },
+ { 0x41bfe8, 1, 0x00004042 },
+ { 0x41bed0, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_zrop_0[] = {
+ { 0x408810, 2, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_0[] = {
+ { 0x408a80, 6, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_crop_0[] = {
+ { 0x4089a8, 1, 0x00004042 },
+ { 0x4089b0, 1, 0x00000042 },
+ { 0x4089b8, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_pxbar_0[] = {
+ { 0x13c820, 1, 0x0001007f },
+ { 0x13cbe0, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk104_clkgate_pack[] = {
+ { gk104_clkgate_blcg_init_main_0 },
+ { gk104_clkgate_blcg_init_rstr2d_0 },
+ { gk104_clkgate_blcg_init_unk_0 },
+ { gk104_clkgate_blcg_init_gcc_0 },
+ { gk104_clkgate_blcg_init_sked_0 },
+ { gk104_clkgate_blcg_init_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_0 },
+ { gk104_clkgate_blcg_init_gpc_esetup_0 },
+ { gk104_clkgate_blcg_init_gpc_tpbus_0 },
+ { gk104_clkgate_blcg_init_gpc_zcull_0 },
+ { gk104_clkgate_blcg_init_gpc_tpconf_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_gcc_0 },
+ { gk104_clkgate_blcg_init_gpc_ffb_0 },
+ { gk104_clkgate_blcg_init_gpc_tex_0 },
+ { gk104_clkgate_blcg_init_gpc_poly_0 },
+ { gk104_clkgate_blcg_init_gpc_l1c_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_2 },
+ { gk104_clkgate_blcg_init_gpc_mp_0 },
+ { gk104_clkgate_blcg_init_gpc_ppc_0 },
+ { gk104_clkgate_blcg_init_rop_zrop_0 },
+ { gk104_clkgate_blcg_init_rop_0 },
+ { gk104_clkgate_blcg_init_rop_crop_0 },
+ { gk104_clkgate_blcg_init_pxbar_0 },
+ {}
+};
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
+ if (gr->func->clkgate_pack)
+ nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
+ gr->func->clkgate_pack);
nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
@@ -338,6 +544,7 @@ gk104_gr = {
.rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gk104_grctx,
+ .clkgate_pack = gk104_clkgate_pack,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
new file mode 100644
index 0000000..a24c177
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul <lyude@redhat.com>
+ */
+#ifndef __GK104_GR_H__
+#define __GK104_GR_H__
+
+#include <subdev/therm.h>
+
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_main_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rstr2d_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gcc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_sked_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_1[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ctxctl_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_esetup_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpbus_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_zcull_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpconf_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_1[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_gcc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ffb_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tex_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_poly_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_l1c_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_2[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_mp_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ppc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_zrop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_crop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_pxbar_0[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index a38e19b..4da916a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
+#include "gk104.h"
#include "ctxgf100.h"
#include <subdev/timer.h>
@@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = {
{}
};
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_sked_0[] = {
+ { 0x407000, 1, 0x00004041 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_gcc_0[] = {
+ { 0x419020, 1, 0x00000042 },
+ { 0x419038, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_l1c_0[] = {
+ { 0x419cd4, 2, 0x00004042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_mp_0[] = {
+ { 0x419fd0, 1, 0x00004043 },
+ { 0x419fd8, 1, 0x00004049 },
+ { 0x419fe0, 2, 0x00004042 },
+ { 0x419ff0, 1, 0x00000046 },
+ { 0x419ff8, 1, 0x00004042 },
+ { 0x419f90, 1, 0x00004042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_main_0[] = {
+ { 0x4041f4, 1, 0x00000000 },
+ { 0x409894, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_unk_0[] = {
+ { 0x406004, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_sked_0[] = {
+ { 0x407004, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_ctxctl_0[] = {
+ { 0x41a894, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_unk_0[] = {
+ { 0x418504, 1, 0x00000000 },
+ { 0x41860c, 1, 0x00000000 },
+ { 0x41868c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_esetup_0[] = {
+ { 0x41882c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_zcull_0[] = {
+ { 0x418974, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_l1c_0[] = {
+ { 0x419cd8, 2, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_unk_1[] = {
+ { 0x419c74, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_mp_0[] = {
+ { 0x419fd4, 1, 0x00004a4a },
+ { 0x419fdc, 1, 0x00000014 },
+ { 0x419fe4, 1, 0x00000000 },
+ { 0x419ff4, 1, 0x00001724 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_ppc_0[] = {
+ { 0x41be2c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_pcounter_0[] = {
+ { 0x1be018, 1, 0x000001ff },
+ { 0x1bc018, 1, 0x000001ff },
+ { 0x1b8018, 1, 0x000001ff },
+ { 0x1b4124, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk110_clkgate_pack[] = {
+ { gk104_clkgate_blcg_init_main_0 },
+ { gk104_clkgate_blcg_init_rstr2d_0 },
+ { gk104_clkgate_blcg_init_unk_0 },
+ { gk104_clkgate_blcg_init_gcc_0 },
+ { gk110_clkgate_blcg_init_sked_0 },
+ { gk104_clkgate_blcg_init_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_0 },
+ { gk104_clkgate_blcg_init_gpc_esetup_0 },
+ { gk104_clkgate_blcg_init_gpc_tpbus_0 },
+ { gk104_clkgate_blcg_init_gpc_zcull_0 },
+ { gk104_clkgate_blcg_init_gpc_tpconf_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_1 },
+ { gk110_clkgate_blcg_init_gpc_gcc_0 },
+ { gk104_clkgate_blcg_init_gpc_ffb_0 },
+ { gk104_clkgate_blcg_init_gpc_tex_0 },
+ { gk104_clkgate_blcg_init_gpc_poly_0 },
+ { gk110_clkgate_blcg_init_gpc_l1c_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_2 },
+ { gk110_clkgate_blcg_init_gpc_mp_0 },
+ { gk104_clkgate_blcg_init_gpc_ppc_0 },
+ { gk104_clkgate_blcg_init_rop_zrop_0 },
+ { gk104_clkgate_blcg_init_rop_0 },
+ { gk104_clkgate_blcg_init_rop_crop_0 },
+ { gk104_clkgate_blcg_init_pxbar_0 },
+ { gk110_clkgate_slcg_init_main_0 },
+ { gk110_clkgate_slcg_init_unk_0 },
+ { gk110_clkgate_slcg_init_sked_0 },
+ { gk110_clkgate_slcg_init_gpc_ctxctl_0 },
+ { gk110_clkgate_slcg_init_gpc_unk_0 },
+ { gk110_clkgate_slcg_init_gpc_esetup_0 },
+ { gk110_clkgate_slcg_init_gpc_zcull_0 },
+ { gk110_clkgate_slcg_init_gpc_l1c_0 },
+ { gk110_clkgate_slcg_init_gpc_unk_1 },
+ { gk110_clkgate_slcg_init_gpc_mp_0 },
+ { gk110_clkgate_slcg_init_gpc_ppc_0 },
+ { gk110_clkgate_slcg_init_pcounter_0 },
+ {}
+};
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@@ -192,6 +346,7 @@ gk110_gr = {
.rops = gf100_gr_rops,
.ppc_nr = 2,
.grctx = &gk110_grctx,
+ .clkgate_pack = gk110_clkgate_pack,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index dde89a4..53859b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
args->v0.id = di;
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+ strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
/* Currently only global counters (PCOUNTER) are implemented
* but this will be different for local counters (MP). */
@@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
"/%s/%02x", dom->name, si);
} else {
strncpy(args->v0.name, sig->name,
- sizeof(args->v0.name));
+ sizeof(args->v0.name) - 1);
}
args->v0.signal = si;
@@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
args->v0.source = sig->source[si];
args->v0.mask = src->mask;
- strncpy(args->v0.name, src->name, sizeof(args->v0.name));
+ strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
}
if (++si < source_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 77273b5..58a59b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
ret = msgqueue_0137bca5_new(falcon, sb, queue);
break;
case 0x0148cdec:
+ case 0x015ccf3e:
ret = msgqueue_0148cdec_new(falcon, sb, queue);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 96e0941..f0a2688 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
+ u32 MP;
if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) {
@@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
sclk = read_clk(clk, 0x10 + idx, false);
}
- if (M * P)
- return sclk * N / (M * P);
+ MP = M * P;
- return 0;
+ if (!MP)
+ return 0;
+
+ return sclk * N / MP;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 2571530..b4f22cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o
nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gf108.o
nvkm-y += nvkm/subdev/fb/gk104.o
+nvkm-y += nvkm/subdev/fb/gk110.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 47d28c2..cdc4e0a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -26,6 +26,7 @@
#include <core/memory.h>
#include <core/option.h>
+#include <subdev/therm.h>
void
gf100_fb_intr(struct nvkm_fb *base)
@@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base)
if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ if (base->func->clkgate_pack) {
+ nvkm_therm_clkgate_init(device->therm,
+ base->func->clkgate_pack);
+ }
}
void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 0a6e8ea..48fd98e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -20,10 +20,56 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
+ * Lyude Paul
*/
+#include "gk104.h"
#include "gf100.h"
#include "ram.h"
+/*
+ *******************************************************************************
+ * PGRAPH registers for clockgating
+ *******************************************************************************
+ */
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_unk_0[] = {
+ { 0x100d10, 1, 0x0000c244 },
+ { 0x100d30, 1, 0x0000c242 },
+ { 0x100d3c, 1, 0x00000242 },
+ { 0x100d48, 1, 0x00000242 },
+ { 0x100d1c, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_vm_0[] = {
+ { 0x100c98, 1, 0x00000242 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_main_0[] = {
+ { 0x10f000, 1, 0x00000042 },
+ { 0x17e030, 1, 0x00000044 },
+ { 0x17e040, 1, 0x00000044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_bcast_0[] = {
+ { 0x17ea60, 4, 0x00000044 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk104_fb_clkgate_pack[] = {
+ { gk104_fb_clkgate_blcg_init_unk_0 },
+ { gk104_fb_clkgate_blcg_init_vm_0 },
+ { gk104_fb_clkgate_blcg_init_main_0 },
+ { gk104_fb_clkgate_blcg_init_bcast_0 },
+ {}
+};
+
static const struct nvkm_fb_func
gk104_fb = {
.dtor = gf100_fb_dtor,
@@ -33,6 +79,7 @@ gk104_fb = {
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
+ .clkgate_pack = gk104_fb_clkgate_pack,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
new file mode 100644
index 0000000..b3c78e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+
+#ifndef __GK104_FB_H__
+#define __GK104_FB_H__
+
+#include <subdev/therm.h>
+
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_vm_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_main_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_bcast_0[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
new file mode 100644
index 0000000..0695e5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include "gf100.h"
+#include "gk104.h"
+#include "ram.h"
+#include <subdev/therm.h>
+#include <subdev/fb.h>
+
+/*
+ *******************************************************************************
+ * PGRAPH registers for clockgating
+ *******************************************************************************
+ */
+
+static const struct nvkm_therm_clkgate_init
+gk110_fb_clkgate_blcg_init_unk_0[] = {
+ { 0x100d10, 1, 0x0000c242 },
+ { 0x100d30, 1, 0x0000c242 },
+ { 0x100d3c, 1, 0x00000242 },
+ { 0x100d48, 1, 0x0000c242 },
+ { 0x100d1c, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk110_fb_clkgate_pack[] = {
+ { gk110_fb_clkgate_blcg_init_unk_0 },
+ { gk104_fb_clkgate_blcg_init_vm_0 },
+ { gk104_fb_clkgate_blcg_init_main_0 },
+ { gk104_fb_clkgate_blcg_init_bcast_0 },
+ {}
+};
+
+static const struct nvkm_fb_func
+gk110_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
+ .ram_new = gk104_ram_new,
+ .default_bigpage = 17,
+ .clkgate_pack = gk110_fb_clkgate_pack,
+};
+
+int
+gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gk110_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 9351188..414a423 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -3,6 +3,7 @@
#define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
#include <subdev/fb.h>
+#include <subdev/therm.h>
struct nvkm_bios;
struct nvkm_fb_func {
@@ -27,6 +28,7 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
u8 default_bigpage;
+ const struct nvkm_therm_clkgate_pack *clkgate_pack;
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index 4c07d10..18241c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -28,8 +28,16 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct pci_dev *bridge;
u32 mem, mib;
+ int domain = 0;
+ struct pci_dev *pdev = NULL;
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (dev_is_pci(fb->subdev.device->dev))
+ pdev = to_pci_dev(fb->subdev.device->dev);
+
+ if (pdev)
+ domain = pci_domain_nr(pdev->bus);
+
+ bridge = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 1));
if (!bridge) {
nvkm_error(&fb->subdev, "no bridge device\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index fa81d0c..37b201b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
+ memory = nvkm_umem_search(client, handle);
+ if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index e35d3e1..1c12e58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
else
block = (size >> page[i].shift) << page[i].shift;
} else {
- block = (size >> page[i].shift) << page[i].shift;;
+ block = (size >> page[i].shift) << page[i].shift;
}
/* Perform operation. */
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
tail = this->addr + this->size;
if (vmm->func->page_block && next && next->page != p)
- tail = ALIGN_DOWN(addr, vmm->func->page_block);
+ tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) {
rb_erase(&this->tree, &vmm->free);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 53d01fb..1dbe593 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000756,
- 0x00000748,
+ 0x00000754,
+ 0x00000746,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000075a,
0x00000758,
+ 0x00000756,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b8a,
- 0x00000a2d,
+ 0x00000b88,
+ 0x00000a2b,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000bb3,
- 0x00000b8c,
+ 0x00000bb1,
+ 0x00000b8a,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000bbf,
0x00000bbd,
+ 0x00000bbb,
0x00000000,
0x00000000,
0x00000000,
@@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = {
0x000005d3,
0x00000003,
0x00000002,
- 0x0000069d,
+ 0x0000069b,
0x00040004,
0x00000000,
- 0x000006b9,
+ 0x000006b7,
0x00010005,
0x00000000,
- 0x000006d6,
+ 0x000006d4,
0x00010006,
0x00000000,
0x0000065b,
0x00000007,
0x00000000,
- 0x000006e1,
+ 0x000006df,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = {
/* 0x065b: memx_func_wait_vblank */
0x9800f840,
0x66b00016,
- 0x130bf400,
+ 0x120bf400,
0xf40166b0,
0x0ef4060b,
/* 0x066d: memx_func_wait_vblank_head1 */
- 0x2077f12e,
- 0x070ef400,
-/* 0x0674: memx_func_wait_vblank_head0 */
- 0x000877f1,
-/* 0x0678: memx_func_wait_vblank_0 */
- 0x07c467f1,
- 0xcf0664b6,
- 0x67fd0066,
- 0xf31bf404,
-/* 0x0688: memx_func_wait_vblank_1 */
- 0x07c467f1,
- 0xcf0664b6,
- 0x67fd0066,
- 0xf30bf404,
-/* 0x0698: memx_func_wait_vblank_fini */
- 0xf80410b6,
-/* 0x069d: memx_func_wr32 */
- 0x00169800,
- 0xb6011598,
- 0x60f90810,
- 0xd0fc50f9,
- 0x21f4e0fc,
- 0x0242b640,
- 0xf8e91bf4,
-/* 0x06b9: memx_func_wait */
- 0x2c87f000,
- 0xcf0684b6,
- 0x1e980088,
- 0x011d9800,
- 0x98021c98,
- 0x10b6031b,
- 0xa321f410,
-/* 0x06d6: memx_func_delay */
- 0x1e9800f8,
- 0x0410b600,
- 0xf87e21f4,
-/* 0x06e1: memx_func_train */
-/* 0x06e3: memx_exec */
- 0xf900f800,
- 0xb9d0f9e0,
- 0xb2b902c1,
-/* 0x06ed: memx_exec_next */
- 0x00139802,
- 0xe70410b6,
- 0xe701f034,
- 0xb601e033,
- 0x30f00132,
- 0xde35980c,
- 0x12b855f9,
- 0xe41ef406,
- 0x98f10b98,
- 0xcbbbf20c,
- 0xc4b7f102,
- 0x06b4b607,
- 0xfc00bbcf,
- 0xf5e0fcd0,
- 0xf8033621,
-/* 0x0729: memx_info */
- 0x01c67000,
-/* 0x072f: memx_info_data */
- 0xf10e0bf4,
- 0xf103ccc7,
- 0xf40800b7,
-/* 0x073a: memx_info_train */
- 0xc7f10b0e,
- 0xb7f10bcc,
-/* 0x0742: memx_info_send */
- 0x21f50100,
- 0x00f80336,
-/* 0x0748: memx_recv */
- 0xf401d6b0,
- 0xd6b0980b,
- 0xd80bf400,
-/* 0x0756: memx_init */
- 0x00f800f8,
-/* 0x0758: perf_recv */
-/* 0x075a: perf_init */
+ 0x2077f02c,
+/* 0x0673: memx_func_wait_vblank_head0 */
+ 0xf0060ef4,
+/* 0x0676: memx_func_wait_vblank_0 */
+ 0x67f10877,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x0686: memx_func_wait_vblank_1 */
+ 0x67f1f31b,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x0696: memx_func_wait_vblank_fini */
+ 0x10b6f30b,
+/* 0x069b: memx_func_wr32 */
+ 0x9800f804,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
+ 0xe0fcd0fc,
+ 0xb64021f4,
+ 0x1bf40242,
+/* 0x06b7: memx_func_wait */
+ 0xf000f8e9,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f8a321,
+/* 0x06d4: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x06df: memx_func_train */
+ 0xf800f87e,
+/* 0x06e1: memx_exec */
+ 0xf9e0f900,
+ 0x02c1b9d0,
+/* 0x06eb: memx_exec_next */
+ 0x9802b2b9,
+ 0x10b60013,
+ 0xf034e704,
+ 0xe033e701,
+ 0x0132b601,
+ 0x980c30f0,
+ 0x55f9de35,
+ 0xf40612b8,
+ 0x0b98e41e,
+ 0xf20c98f1,
+ 0xf102cbbb,
+ 0xb607c4b7,
+ 0xbbcf06b4,
+ 0xfcd0fc00,
+ 0x3621f5e0,
+/* 0x0727: memx_info */
+ 0x7000f803,
+ 0x0bf401c6,
+/* 0x072d: memx_info_data */
+ 0xccc7f10e,
+ 0x00b7f103,
+ 0x0b0ef408,
+/* 0x0738: memx_info_train */
+ 0x0bccc7f1,
+ 0x0100b7f1,
+/* 0x0740: memx_info_send */
+ 0x033621f5,
+/* 0x0746: memx_recv */
+ 0xd6b000f8,
+ 0x980bf401,
+ 0xf400d6b0,
+ 0x00f8d80b,
+/* 0x0754: memx_init */
+/* 0x0756: perf_recv */
0x00f800f8,
-/* 0x075c: i2c_drive_scl */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0770: i2c_drive_scl_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x077e: i2c_drive_sda */
+/* 0x0758: perf_init */
+/* 0x075a: i2c_drive_scl */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
- 0x04bd0002,
-/* 0x0792: i2c_drive_sda_lo */
+ 0x04bd0001,
+/* 0x076e: i2c_drive_scl_lo */
0x07f100f8,
0x04b607e4,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x077c: i2c_drive_sda */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
0x0002d006,
0x00f804bd,
-/* 0x07a0: i2c_sense_scl */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0431fd00,
- 0xf4060bf4,
-/* 0x07b6: i2c_sense_scl_done */
- 0x00f80131,
-/* 0x07b8: i2c_sense_sda */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x07ce: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x07d0: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x5c21f501,
-/* 0x07dd: i2c_raise_scl_wait */
- 0xe8e7f107,
- 0x7e21f403,
- 0x07a021f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x07f1: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x07f5: i2c_start */
- 0xa021f500,
- 0x0d11f407,
- 0x07b821f5,
- 0xf40611f4,
-/* 0x0806: i2c_start_rep */
- 0x37f0300e,
- 0x5c21f500,
- 0x0137f007,
- 0x077e21f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xd021f550,
- 0x0464b607,
-/* 0x0833: i2c_start_send */
- 0xf01f11f4,
+/* 0x0790: i2c_drive_sda_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x079e: i2c_sense_scl */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0634b607,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x07b4: i2c_sense_scl_done */
+/* 0x07b6: i2c_sense_sda */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0634b607,
+ 0xfd0033cf,
+ 0x0bf40432,
+ 0x0131f406,
+/* 0x07cc: i2c_sense_sda_done */
+/* 0x07ce: i2c_raise_scl */
+ 0x40f900f8,
+ 0x089847f1,
+ 0xf50137f0,
+/* 0x07db: i2c_raise_scl_wait */
+ 0xf1075a21,
+ 0xf403e8e7,
+ 0x21f57e21,
+ 0x01f4079e,
+ 0x0142b609,
+/* 0x07ef: i2c_raise_scl_done */
+ 0xfcef1bf4,
+/* 0x07f3: i2c_start */
+ 0xf500f840,
+ 0xf4079e21,
+ 0x21f50d11,
+ 0x11f407b6,
+ 0x300ef406,
+/* 0x0804: i2c_start_rep */
+ 0xf50037f0,
+ 0xf0075a21,
+ 0x21f50137,
+ 0x76bb077c,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb607ce21,
+ 0x11f40464,
+/* 0x0831: i2c_start_send */
+ 0x0037f01f,
+ 0x077c21f5,
+ 0x1388e7f1,
+ 0xf07e21f4,
0x21f50037,
- 0xe7f1077e,
+ 0xe7f1075a,
0x21f41388,
- 0x0037f07e,
- 0x075c21f5,
- 0x1388e7f1,
-/* 0x084f: i2c_start_out */
- 0xf87e21f4,
-/* 0x0851: i2c_stop */
- 0x0037f000,
- 0x075c21f5,
- 0xf50037f0,
- 0xf1077e21,
- 0xf403e8e7,
- 0x37f07e21,
- 0x5c21f501,
- 0x88e7f107,
- 0x7e21f413,
+/* 0x084d: i2c_start_out */
+/* 0x084f: i2c_stop */
+ 0xf000f87e,
+ 0x21f50037,
+ 0x37f0075a,
+ 0x7c21f500,
+ 0xe8e7f107,
+ 0x7e21f403,
0xf50137f0,
- 0xf1077e21,
+ 0xf1075a21,
0xf41388e7,
- 0x00f87e21,
-/* 0x0884: i2c_bitw */
- 0x077e21f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x07d021f5,
- 0xf40464b6,
- 0xe7f11811,
- 0x21f41388,
- 0x0037f07e,
- 0x075c21f5,
- 0x1388e7f1,
-/* 0x08c3: i2c_bitw_out */
- 0xf87e21f4,
-/* 0x08c5: i2c_bitr */
- 0x0137f000,
- 0x077e21f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x07d021f5,
- 0xf40464b6,
- 0x21f51b11,
- 0x37f007b8,
- 0x5c21f500,
+ 0x37f07e21,
+ 0x7c21f501,
0x88e7f107,
0x7e21f413,
- 0xf4013cf0,
-/* 0x090a: i2c_bitr_done */
- 0x00f80131,
-/* 0x090c: i2c_get_byte */
- 0xf00057f0,
-/* 0x0912: i2c_get_byte_next */
- 0x54b60847,
- 0x0076bb01,
+/* 0x0882: i2c_bitw */
+ 0x21f500f8,
+ 0xe7f1077c,
+ 0x21f403e8,
+ 0x0076bb7e,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608c5,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x8421f550,
- 0x0464b608,
-/* 0x095c: i2c_get_byte_done */
-/* 0x095e: i2c_put_byte */
- 0x47f000f8,
-/* 0x0961: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+ 0x64b607ce,
+ 0x1811f404,
+ 0x1388e7f1,
+ 0xf07e21f4,
+ 0x21f50037,
+ 0xe7f1075a,
+ 0x21f41388,
+/* 0x08c1: i2c_bitw_out */
+/* 0x08c3: i2c_bitr */
+ 0xf000f87e,
+ 0x21f50137,
+ 0xe7f1077c,
+ 0x21f403e8,
+ 0x0076bb7e,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607ce,
+ 0x1b11f404,
+ 0x07b621f5,
+ 0xf50037f0,
+ 0xf1075a21,
+ 0xf41388e7,
+ 0x3cf07e21,
+ 0x0131f401,
+/* 0x0908: i2c_bitr_done */
+/* 0x090a: i2c_get_byte */
+ 0x57f000f8,
+ 0x0847f000,
+/* 0x0910: i2c_get_byte_next */
+ 0xbb0154b6,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x088421f5,
+ 0x08c321f5,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc521f550,
- 0x0464b608,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x09b7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x09b9: i2c_addr */
- 0x0076bb00,
+ 0x53fd2b11,
+ 0x0142b605,
+ 0xf0d81bf4,
+ 0x76bb0137,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6088221,
+/* 0x095a: i2c_get_byte_done */
+ 0x00f80464,
+/* 0x095c: i2c_put_byte */
+/* 0x095f: i2c_put_byte_next */
+ 0xb60847f0,
+ 0x54ff0142,
+ 0x0076bb38,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607f5,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0x64b60882,
+ 0x3411f404,
+ 0xf40046b0,
+ 0x76bbd81b,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6095e21,
-/* 0x09fe: i2c_addr_done */
- 0x00f80464,
-/* 0x0a00: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b702e4,
- 0xee980d1c,
-/* 0x0a0f: i2c_acquire */
- 0xf500f800,
- 0xf40a0021,
- 0xd9f00421,
- 0x4021f403,
-/* 0x0a1e: i2c_release */
- 0x21f500f8,
- 0x21f40a00,
- 0x03daf004,
- 0xf84021f4,
-/* 0x0a2d: i2c_recv */
- 0x0132f400,
- 0xb6f8c1c7,
- 0x16b00214,
- 0x3a1ff528,
- 0xf413a001,
- 0x0032980c,
- 0x0ccc13a0,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+ 0xb608c321,
+ 0x11f40464,
+ 0x0076bb0f,
+ 0xf40136b0,
+ 0x32f4061b,
+/* 0x09b5: i2c_put_byte_done */
+/* 0x09b7: i2c_addr */
+ 0xbb00f801,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a0f21f5,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b31bf5,
- 0xbb0057f0,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x09b921f5,
- 0xf50464b6,
- 0xc700d011,
- 0x76bbe0c5,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6095e21,
- 0x11f50464,
- 0x57f000ad,
+ 0x07f321f5,
+ 0xf40464b6,
+ 0xc3e72911,
+ 0x34b6012e,
+ 0x0553fd01,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x5c21f550,
+ 0x0464b609,
+/* 0x09fc: i2c_addr_done */
+/* 0x09fe: i2c_acquire_addr */
+ 0xcec700f8,
+ 0x02e4b6f8,
+ 0x0d1ce0b7,
+ 0xf800ee98,
+/* 0x0a0d: i2c_acquire */
+ 0xfe21f500,
+ 0x0421f409,
+ 0xf403d9f0,
+ 0x00f84021,
+/* 0x0a1c: i2c_release */
+ 0x09fe21f5,
+ 0xf00421f4,
+ 0x21f403da,
+/* 0x0a2b: i2c_recv */
+ 0xf400f840,
+ 0xc1c70132,
+ 0x0214b6f8,
+ 0xf52816b0,
+ 0xa0013a1f,
+ 0x980cf413,
+ 0x13a00032,
+ 0x31980ccc,
+ 0x0231f400,
+ 0xe0f9d0f9,
+ 0x67f1d0f9,
+ 0x63f10000,
+ 0x67921000,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b609b9,
- 0x8a11f504,
+ 0x64b60a0d,
+ 0xb0d0fc04,
+ 0x1bf500d6,
+ 0x57f000b3,
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b6090c,
- 0x6a11f404,
- 0xbbe05bcb,
+ 0x64b609b7,
+ 0xd011f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x5c21f550,
+ 0x0464b609,
+ 0x00ad11f5,
+ 0xbb0157f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x085121f5,
- 0xb90464b6,
- 0x74bd025b,
-/* 0x0b33: i2c_recv_not_rd08 */
- 0xb0430ef4,
- 0x1bf401d6,
- 0x0057f03d,
- 0x09b921f5,
- 0xc73311f4,
- 0x21f5e0c5,
- 0x11f4095e,
- 0x0057f029,
- 0x09b921f5,
- 0xc71f11f4,
- 0x21f5e0b5,
- 0x11f4095e,
- 0x5121f515,
- 0xc774bd08,
- 0x1bf408c5,
- 0x0232f409,
-/* 0x0b73: i2c_recv_not_wr08 */
-/* 0x0b73: i2c_recv_done */
- 0xc7030ef4,
- 0x21f5f8ce,
- 0xe0fc0a1e,
- 0x12f4d0fc,
- 0x027cb90a,
- 0x033621f5,
-/* 0x0b88: i2c_recv_exit */
-/* 0x0b8a: i2c_init */
- 0x00f800f8,
-/* 0x0b8c: test_recv */
- 0x05d817f1,
+ 0x09b721f5,
+ 0xf50464b6,
+ 0xbb008a11,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x090a21f5,
+ 0xf40464b6,
+ 0x5bcb6a11,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b6084f,
+ 0x025bb904,
+ 0x0ef474bd,
+/* 0x0b31: i2c_recv_not_rd08 */
+ 0x01d6b043,
+ 0xf03d1bf4,
+ 0x21f50057,
+ 0x11f409b7,
+ 0xe0c5c733,
+ 0x095c21f5,
+ 0xf02911f4,
+ 0x21f50057,
+ 0x11f409b7,
+ 0xe0b5c71f,
+ 0x095c21f5,
+ 0xf51511f4,
+ 0xbd084f21,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x0b71: i2c_recv_not_wr08 */
+/* 0x0b71: i2c_recv_done */
+ 0xf8cec703,
+ 0x0a1c21f5,
+ 0xd0fce0fc,
+ 0xb90a12f4,
+ 0x21f5027c,
+/* 0x0b86: i2c_recv_exit */
+ 0x00f80336,
+/* 0x0b88: i2c_init */
+/* 0x0b8a: test_recv */
+ 0x17f100f8,
+ 0x14b605d8,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d807,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0xf5134fe3,
+ 0xf8025621,
+/* 0x0bb1: test_init */
+ 0x00e7f100,
+ 0x5621f508,
+/* 0x0bbb: idle_recv */
+ 0xf800f802,
+/* 0x0bbd: idle */
+ 0x0031f400,
+ 0x05d417f1,
0xcf0614b6,
0x10b60011,
- 0xd807f101,
+ 0xd407f101,
0x0604b605,
0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0x5621f513,
-/* 0x0bb3: test_init */
- 0xf100f802,
- 0xf50800e7,
- 0xf8025621,
-/* 0x0bbd: idle_recv */
-/* 0x0bbf: idle */
- 0xf400f800,
- 0x17f10031,
- 0x14b605d4,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d407,
- 0x01d00604,
-/* 0x0bdb: idle_loop */
- 0xf004bd00,
- 0x32f45817,
-/* 0x0be1: idle_proc */
-/* 0x0be1: idle_proc_exec */
- 0xb910f902,
- 0x21f5021e,
- 0x10fc033f,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0bf5: idle_proc_next */
- 0x5810b6ef,
- 0xf4061fb8,
- 0x02f4e61b,
- 0x0028f4dd,
- 0x00bb0ef4,
+/* 0x0bd9: idle_loop */
+ 0x5817f004,
+/* 0x0bdf: idle_proc */
+/* 0x0bdf: idle_proc_exec */
+ 0xf90232f4,
+ 0x021eb910,
+ 0x033f21f5,
+ 0x11f410fc,
+ 0x0231f409,
+/* 0x0bf3: idle_proc_next */
+ 0xb6ef0ef4,
+ 0x1fb85810,
+ 0xe61bf406,
+ 0xf4dd02f4,
+ 0x0ef40028,
+ 0x000000bb,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index c4edbc7..e0222cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000005f3,
- 0x000005e5,
+ 0x000005ee,
+ 0x000005e0,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000005f7,
- 0x000005f5,
+ 0x000005f2,
+ 0x000005f0,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x000009f8,
- 0x000008a2,
+ 0x000009f3,
+ 0x0000089d,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000a16,
- 0x000009fa,
+ 0x00000a11,
+ 0x000009f5,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000a21,
- 0x00000a1f,
+ 0x00000a1c,
+ 0x00000a1a,
0x00000000,
0x00000000,
0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = {
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000004cf,
+ 0x000004cc,
0x00000003,
0x00000002,
- 0x00000546,
+ 0x00000541,
0x00040004,
0x00000000,
- 0x00000563,
+ 0x0000055e,
0x00010005,
0x00000000,
- 0x0000057d,
+ 0x00000578,
0x00010006,
0x00000000,
- 0x00000541,
+ 0x0000053c,
0x00000007,
0x00000000,
- 0x00000589,
+ 0x00000584,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = {
0x0001f604,
0x00f804bd,
/* 0x045c: memx_func_enter */
- 0x162067f1,
- 0xf55d77f1,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90487fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0x0700002d,
- 0x7e6eb2fe,
+ 0x47162046,
+ 0x6eb2f55d,
+ 0x0000047e,
+ 0x87fdd8b2,
+ 0xf960f904,
+ 0xfcd0fc80,
+ 0x002d7ee0,
+ 0xb2fe0700,
+ 0x00047e6e,
+ 0xfdd8b200,
+ 0x60f90487,
+ 0xd0fc80f9,
+ 0x2d7ee0fc,
+ 0xf0460000,
+ 0x7e6eb226,
0xb2000004,
0x0487fdd8,
0x80f960f9,
0xe0fcd0fc,
0x00002d7e,
- 0x26f067f1,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90487fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0x0600002d,
- 0x07e04004,
- 0xbd0006f6,
-/* 0x04b9: memx_func_enter_wait */
- 0x07c04604,
- 0xf00066cf,
- 0x0bf40464,
- 0xcf2c06f7,
- 0x06b50066,
-/* 0x04cf: memx_func_leave */
- 0x0600f8f1,
- 0x0066cf2c,
- 0x06f206b5,
- 0x07e44004,
- 0xbd0006f6,
-/* 0x04e1: memx_func_leave_wait */
- 0x07c04604,
- 0xf00066cf,
- 0x1bf40464,
- 0xf067f1f7,
+ 0xe0400406,
+ 0x0006f607,
+/* 0x04b6: memx_func_enter_wait */
+ 0xc04604bd,
+ 0x0066cf07,
+ 0xf40464f0,
+ 0x2c06f70b,
+ 0xb50066cf,
+ 0x00f8f106,
+/* 0x04cc: memx_func_leave */
+ 0x66cf2c06,
+ 0xf206b500,
+ 0xe4400406,
+ 0x0006f607,
+/* 0x04de: memx_func_leave_wait */
+ 0xc04604bd,
+ 0x0066cf07,
+ 0xf40464f0,
+ 0xf046f71b,
0xb2010726,
0x00047e6e,
0xfdd8b200,
0x60f90587,
0xd0fc80f9,
0x2d7ee0fc,
- 0x67f10000,
- 0x6eb21620,
- 0x0000047e,
- 0x87fdd8b2,
- 0xf960f905,
- 0xfcd0fc80,
- 0x002d7ee0,
- 0x0aa24700,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90587fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0xf800002d,
-/* 0x0541: memx_func_wait_vblank */
+ 0x20460000,
+ 0x7e6eb216,
+ 0xb2000004,
+ 0x0587fdd8,
+ 0x80f960f9,
+ 0xe0fcd0fc,
+ 0x00002d7e,
+ 0xb20aa247,
+ 0x00047e6e,
+ 0xfdd8b200,
+ 0x60f90587,
+ 0xd0fc80f9,
+ 0x2d7ee0fc,
+ 0x00f80000,
+/* 0x053c: memx_func_wait_vblank */
+ 0xf80410b6,
+/* 0x0541: memx_func_wr32 */
+ 0x00169800,
+ 0xb6011598,
+ 0x60f90810,
+ 0xd0fc50f9,
+ 0x2d7ee0fc,
+ 0x42b60000,
+ 0xe81bf402,
+/* 0x055e: memx_func_wait */
+ 0x2c0800f8,
+ 0x980088cf,
+ 0x1d98001e,
+ 0x021c9801,
+ 0xb6031b98,
+ 0x747e1010,
+ 0x00f80000,
+/* 0x0578: memx_func_delay */
+ 0xb6001e98,
+ 0x587e0410,
+ 0x00f80000,
+/* 0x0584: memx_func_train */
+/* 0x0586: memx_exec */
+ 0xe0f900f8,
+ 0xc1b2d0f9,
+/* 0x058e: memx_exec_next */
+ 0x1398b2b2,
0x0410b600,
-/* 0x0546: memx_func_wr32 */
- 0x169800f8,
- 0x01159800,
- 0xf90810b6,
- 0xfc50f960,
+ 0x01f034e7,
+ 0x01e033e7,
+ 0xf00132b6,
+ 0x35980c30,
+ 0xa655f9de,
+ 0xe51ef412,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0x07c44b02,
+ 0xfc00bbcf,
0x7ee0fcd0,
- 0xb600002d,
- 0x1bf40242,
-/* 0x0563: memx_func_wait */
- 0x0800f8e8,
- 0x0088cf2c,
- 0x98001e98,
- 0x1c98011d,
- 0x031b9802,
- 0x7e1010b6,
- 0xf8000074,
-/* 0x057d: memx_func_delay */
- 0x001e9800,
- 0x7e0410b6,
- 0xf8000058,
-/* 0x0589: memx_func_train */
-/* 0x058b: memx_exec */
- 0xf900f800,
- 0xb2d0f9e0,
-/* 0x0593: memx_exec_next */
- 0x98b2b2c1,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0x1ef412a6,
- 0xf10b98e5,
- 0xbbf20c98,
- 0xc44b02cb,
- 0x00bbcf07,
- 0xe0fcd0fc,
- 0x00029f7e,
-/* 0x05ca: memx_info */
- 0xc67000f8,
- 0x0c0bf401,
-/* 0x05d0: memx_info_data */
- 0x4b03cc4c,
- 0x0ef40800,
-/* 0x05d9: memx_info_train */
- 0x0bcc4c09,
-/* 0x05df: memx_info_send */
- 0x7e01004b,
0xf800029f,
-/* 0x05e5: memx_recv */
- 0x01d6b000,
- 0xb0a30bf4,
- 0x0bf400d6,
-/* 0x05f3: memx_init */
- 0xf800f8dc,
-/* 0x05f5: perf_recv */
-/* 0x05f7: perf_init */
- 0xf800f800,
-/* 0x05f9: i2c_drive_scl */
- 0x0036b000,
- 0x400d0bf4,
- 0x01f607e0,
- 0xf804bd00,
-/* 0x0609: i2c_drive_scl_lo */
- 0x07e44000,
- 0xbd0001f6,
-/* 0x0613: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0x07e0400d,
- 0xbd0002f6,
-/* 0x0623: i2c_drive_sda_lo */
- 0x4000f804,
- 0x02f607e4,
- 0xf804bd00,
-/* 0x062d: i2c_sense_scl */
- 0x0132f400,
- 0xcf07c443,
- 0x31fd0033,
- 0x060bf404,
-/* 0x063f: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x0641: i2c_sense_sda */
- 0x0132f400,
- 0xcf07c443,
- 0x32fd0033,
- 0x060bf404,
-/* 0x0653: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x0655: i2c_raise_scl */
- 0x4440f900,
- 0x01030898,
- 0x0005f97e,
-/* 0x0660: i2c_raise_scl_wait */
- 0x7e03e84e,
- 0x7e000058,
- 0xf400062d,
- 0x42b60901,
- 0xef1bf401,
-/* 0x0674: i2c_raise_scl_done */
- 0x00f840fc,
-/* 0x0678: i2c_start */
- 0x00062d7e,
- 0x7e0d11f4,
- 0xf4000641,
- 0x0ef40611,
-/* 0x0689: i2c_start_rep */
- 0x7e00032e,
- 0x030005f9,
- 0x06137e01,
+/* 0x05c5: memx_info */
+ 0x01c67000,
+/* 0x05cb: memx_info_data */
+ 0x4c0c0bf4,
+ 0x004b03cc,
+ 0x090ef408,
+/* 0x05d4: memx_info_train */
+ 0x4b0bcc4c,
+/* 0x05da: memx_info_send */
+ 0x9f7e0100,
+ 0x00f80002,
+/* 0x05e0: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0a30b,
+ 0xdc0bf400,
+/* 0x05ee: memx_init */
+ 0x00f800f8,
+/* 0x05f0: perf_recv */
+/* 0x05f2: perf_init */
+ 0x00f800f8,
+/* 0x05f4: i2c_drive_scl */
+ 0xf40036b0,
+ 0xe0400d0b,
+ 0x0001f607,
+ 0x00f804bd,
+/* 0x0604: i2c_drive_scl_lo */
+ 0xf607e440,
+ 0x04bd0001,
+/* 0x060e: i2c_drive_sda */
+ 0x36b000f8,
+ 0x0d0bf400,
+ 0xf607e040,
+ 0x04bd0002,
+/* 0x061e: i2c_drive_sda_lo */
+ 0xe44000f8,
+ 0x0002f607,
+ 0x00f804bd,
+/* 0x0628: i2c_sense_scl */
+ 0x430132f4,
+ 0x33cf07c4,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x063a: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x063c: i2c_sense_sda */
+ 0x430132f4,
+ 0x33cf07c4,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x064e: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x0650: i2c_raise_scl */
+ 0x984440f9,
+ 0x7e010308,
+/* 0x065b: i2c_raise_scl_wait */
+ 0x4e0005f4,
+ 0x587e03e8,
+ 0x287e0000,
+ 0x01f40006,
+ 0x0142b609,
+/* 0x066f: i2c_raise_scl_done */
+ 0xfcef1bf4,
+/* 0x0673: i2c_start */
+ 0x7e00f840,
+ 0xf4000628,
+ 0x3c7e0d11,
+ 0x11f40006,
+ 0x2e0ef406,
+/* 0x0684: i2c_start_rep */
+ 0xf47e0003,
+ 0x01030005,
+ 0x00060e7e,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x06507e50,
+ 0x0464b600,
+/* 0x06af: i2c_start_send */
+ 0x031d11f4,
+ 0x060e7e00,
+ 0x13884e00,
+ 0x0000587e,
+ 0xf47e0003,
+ 0x884e0005,
+ 0x00587e13,
+/* 0x06c9: i2c_start_out */
+/* 0x06cb: i2c_stop */
+ 0x0300f800,
+ 0x05f47e00,
+ 0x7e000300,
+ 0x4e00060e,
+ 0x587e03e8,
+ 0x01030000,
+ 0x0005f47e,
+ 0x7e13884e,
+ 0x03000058,
+ 0x060e7e01,
+ 0x13884e00,
+ 0x0000587e,
+/* 0x06fa: i2c_bitw */
+ 0x0e7e00f8,
+ 0xe84e0006,
+ 0x00587e03,
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x557e50fc,
+ 0x507e50fc,
0x64b60006,
- 0x1d11f404,
-/* 0x06b4: i2c_start_send */
- 0x137e0003,
- 0x884e0006,
- 0x00587e13,
- 0x7e000300,
- 0x4e0005f9,
- 0x587e1388,
-/* 0x06ce: i2c_start_out */
- 0x00f80000,
-/* 0x06d0: i2c_stop */
- 0xf97e0003,
- 0x00030005,
- 0x0006137e,
- 0x7e03e84e,
+ 0x1711f404,
+ 0x7e13884e,
0x03000058,
- 0x05f97e01,
+ 0x05f47e00,
0x13884e00,
0x0000587e,
- 0x137e0103,
- 0x884e0006,
- 0x00587e13,
-/* 0x06ff: i2c_bitw */
- 0x7e00f800,
- 0x4e000613,
- 0x587e03e8,
- 0x76bb0000,
+/* 0x0738: i2c_bitw_out */
+/* 0x073a: i2c_bitr */
+ 0x010300f8,
+ 0x00060e7e,
+ 0x7e03e84e,
+ 0xbb000058,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006507e,
+ 0xf40464b6,
+ 0x3c7e1a11,
+ 0x00030006,
+ 0x0005f47e,
+ 0x7e13884e,
+ 0xf0000058,
+ 0x31f4013c,
+/* 0x077d: i2c_bitr_done */
+/* 0x077f: i2c_get_byte */
+ 0x0500f801,
+/* 0x0783: i2c_get_byte_next */
+ 0xb6080400,
+ 0x76bb0154,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000655,
+ 0xb600073a,
0x11f40464,
- 0x13884e17,
- 0x0000587e,
- 0xf97e0003,
- 0x884e0005,
- 0x00587e13,
-/* 0x073d: i2c_bitw_out */
-/* 0x073f: i2c_bitr */
- 0x0300f800,
- 0x06137e01,
- 0x03e84e00,
- 0x0000587e,
+ 0x0553fd2a,
+ 0xf40142b6,
+ 0x0103d81b,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x06557e50,
+ 0x06fa7e50,
0x0464b600,
- 0x7e1a11f4,
- 0x03000641,
- 0x05f97e00,
- 0x13884e00,
- 0x0000587e,
- 0xf4013cf0,
-/* 0x0782: i2c_bitr_done */
- 0x00f80131,
-/* 0x0784: i2c_get_byte */
- 0x08040005,
-/* 0x0788: i2c_get_byte_next */
- 0xbb0154b6,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00073f7e,
- 0xf40464b6,
- 0x53fd2a11,
- 0x0142b605,
- 0x03d81bf4,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xff7e50fc,
- 0x64b60006,
-/* 0x07d1: i2c_get_byte_done */
-/* 0x07d3: i2c_put_byte */
- 0x0400f804,
-/* 0x07d5: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+/* 0x07cc: i2c_get_byte_done */
+/* 0x07ce: i2c_put_byte */
+ 0x080400f8,
+/* 0x07d0: i2c_put_byte_next */
+ 0xff0142b6,
+ 0x76bb3854,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006fa,
+ 0x11f40464,
+ 0x0046b034,
+ 0xbbd81bf4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0006ff7e,
+ 0x00073a7e,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x76bb0f11,
+ 0x0136b000,
+ 0xf4061bf4,
+/* 0x0826: i2c_put_byte_done */
+ 0x00f80132,
+/* 0x0828: i2c_addr */
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x073f7e50,
+ 0x06737e50,
0x0464b600,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x082b: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x082d: i2c_addr */
- 0x0076bb00,
+ 0xe72911f4,
+ 0xb6012ec3,
+ 0x53fd0134,
+ 0x0076bb05,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x787e50fc,
- 0x64b60006,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb60007d3,
-/* 0x0872: i2c_addr_done */
- 0x00f80464,
-/* 0x0874: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x0880: i2c_acquire */
- 0x0008747e,
+ 0xce7e50fc,
+ 0x64b60007,
+/* 0x086d: i2c_addr_done */
+/* 0x086f: i2c_acquire_addr */
+ 0xc700f804,
+ 0xe4b6f8ce,
+ 0x14e0b705,
+/* 0x087b: i2c_acquire */
+ 0x7e00f8d0,
+ 0x7e00086f,
+ 0xf0000004,
+ 0x2d7e03d9,
+ 0x00f80000,
+/* 0x088c: i2c_release */
+ 0x00086f7e,
0x0000047e,
- 0x7e03d9f0,
+ 0x7e03daf0,
0xf800002d,
-/* 0x0891: i2c_release */
- 0x08747e00,
- 0x00047e00,
- 0x03daf000,
- 0x00002d7e,
-/* 0x08a2: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13b80134,
- 0x98000cf4,
- 0x13b80032,
- 0x98000ccc,
- 0x31f40031,
- 0xf9d0f902,
- 0xd6d0f9e0,
- 0x10000000,
- 0xbb016792,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0008807e,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b01bf5,
- 0x76bb0005,
+/* 0x089d: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x341ff528,
+ 0xf413b801,
+ 0x3298000c,
+ 0xcc13b800,
+ 0x3198000c,
+ 0x0231f400,
+ 0xe0f9d0f9,
+ 0x00d6d0f9,
+ 0x92100000,
+ 0x76bb0167,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb600082d,
- 0x11f50464,
- 0xc5c700cc,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xd37e50fc,
- 0x64b60007,
- 0xa911f504,
- 0xbb010500,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00082d7e,
- 0xf50464b6,
- 0xbb008711,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0007847e,
- 0xf40464b6,
- 0x5bcb6711,
- 0x0076bbe0,
+ 0xb600087b,
+ 0xd0fc0464,
+ 0xf500d6b0,
+ 0x0500b01b,
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0xd07e50fc,
- 0x64b60006,
- 0xbd5bb204,
- 0x410ef474,
-/* 0x09a4: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x00053b1b,
- 0x00082d7e,
- 0xc73211f4,
- 0xd37ee0c5,
- 0x11f40007,
- 0x7e000528,
- 0xf400082d,
- 0xb5c71f11,
- 0x07d37ee0,
- 0x1511f400,
- 0x0006d07e,
- 0xc5c774bd,
- 0x091bf408,
- 0xf40232f4,
-/* 0x09e2: i2c_recv_not_wr08 */
-/* 0x09e2: i2c_recv_done */
- 0xcec7030e,
- 0x08917ef8,
- 0xfce0fc00,
- 0x0912f4d0,
- 0x9f7e7cb2,
-/* 0x09f6: i2c_recv_exit */
- 0x00f80002,
-/* 0x09f8: i2c_init */
-/* 0x09fa: test_recv */
- 0x584100f8,
- 0x0011cf04,
- 0x400110b6,
- 0x01f60458,
- 0xde04bd00,
- 0x134fd900,
- 0x0001de7e,
-/* 0x0a16: test_init */
- 0x004e00f8,
- 0x01de7e08,
-/* 0x0a1f: idle_recv */
+ 0x287e50fc,
+ 0x64b60008,
+ 0xcc11f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x07ce7e50,
+ 0x0464b600,
+ 0x00a911f5,
+ 0x76bb0105,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb6000828,
+ 0x11f50464,
+ 0x76bb0087,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600077f,
+ 0x11f40464,
+ 0xe05bcb67,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x06cb7e50,
+ 0x0464b600,
+ 0x74bd5bb2,
+/* 0x099f: i2c_recv_not_rd08 */
+ 0xb0410ef4,
+ 0x1bf401d6,
+ 0x7e00053b,
+ 0xf4000828,
+ 0xc5c73211,
+ 0x07ce7ee0,
+ 0x2811f400,
+ 0x287e0005,
+ 0x11f40008,
+ 0xe0b5c71f,
+ 0x0007ce7e,
+ 0x7e1511f4,
+ 0xbd0006cb,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x09dd: i2c_recv_not_wr08 */
+/* 0x09dd: i2c_recv_done */
+ 0xf8cec703,
+ 0x00088c7e,
+ 0xd0fce0fc,
+ 0xb20912f4,
+ 0x029f7e7c,
+/* 0x09f1: i2c_recv_exit */
+/* 0x09f3: i2c_init */
0xf800f800,
-/* 0x0a21: idle */
- 0x0031f400,
- 0xcf045441,
- 0x10b60011,
- 0x04544001,
- 0xbd0001f6,
-/* 0x0a35: idle_loop */
- 0xf4580104,
-/* 0x0a3a: idle_proc */
-/* 0x0a3a: idle_proc_exec */
- 0x10f90232,
- 0xa87e1eb2,
- 0x10fc0002,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0a4d: idle_proc_next */
- 0x5810b6f0,
- 0x1bf41fa6,
- 0xe002f4e8,
- 0xf40028f4,
- 0x0000c60e,
+/* 0x09f5: test_recv */
+ 0x04584100,
+ 0xb60011cf,
+ 0x58400110,
+ 0x0001f604,
+ 0x00de04bd,
+ 0x7e134fd9,
+ 0xf80001de,
+/* 0x0a11: test_init */
+ 0x08004e00,
+ 0x0001de7e,
+/* 0x0a1a: idle_recv */
+ 0x00f800f8,
+/* 0x0a1c: idle */
+ 0x410031f4,
+ 0x11cf0454,
+ 0x0110b600,
+ 0xf6045440,
+ 0x04bd0001,
+/* 0x0a30: idle_loop */
+ 0x32f45801,
+/* 0x0a35: idle_proc */
+/* 0x0a35: idle_proc_exec */
+ 0xb210f902,
+ 0x02a87e1e,
+ 0xf410fc00,
+ 0x31f40911,
+ 0xf00ef402,
+/* 0x0a48: idle_proc_next */
+ 0xa65810b6,
+ 0xe81bf41f,
+ 0xf4e002f4,
+ 0x0ef40028,
+ 0x000000c6,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 6a2572e..defddf59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000083a,
- 0x0000082c,
+ 0x00000833,
+ 0x00000825,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000083e,
- 0x0000083c,
+ 0x00000837,
+ 0x00000835,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000c6e,
- 0x00000b11,
+ 0x00000c67,
+ 0x00000b0a,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000c97,
- 0x00000c70,
+ 0x00000c90,
+ 0x00000c69,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ca3,
- 0x00000ca1,
+ 0x00000c9c,
+ 0x00000c9a,
0x00000000,
0x00000000,
0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gt215_pmu_data[] = {
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000005a0,
+ 0x0000059f,
0x00000003,
0x00000002,
- 0x00000632,
+ 0x0000062f,
0x00040004,
0x00000000,
- 0x0000064e,
+ 0x0000064b,
0x00010005,
0x00000000,
- 0x0000066b,
+ 0x00000668,
0x00010006,
0x00000000,
- 0x000005f0,
+ 0x000005ef,
0x00000007,
0x00000000,
- 0x00000676,
+ 0x00000673,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1305,560 +1305,560 @@ static uint32_t gt215_pmu_code[] = {
0x67f102d7,
0x63f1fffc,
0x76fdffff,
- 0x0267f104,
- 0x0576fd00,
- 0x70f980f9,
- 0xe0fcd0fc,
- 0xf04021f4,
+ 0x0267f004,
+ 0xf90576fd,
+ 0xfc70f980,
+ 0xf4e0fcd0,
+ 0x67f04021,
+ 0xe007f104,
+ 0x0604b607,
+ 0xbd0006d0,
+/* 0x0581: memx_func_enter_wait */
+ 0xc067f104,
+ 0x0664b607,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0x2c67f0f3,
+ 0xcf0664b6,
+ 0x06800066,
+/* 0x059f: memx_func_leave */
+ 0xf000f8f1,
+ 0x64b62c67,
+ 0x0066cf06,
+ 0xf0f20680,
0x07f10467,
- 0x04b607e0,
+ 0x04b607e4,
0x0006d006,
-/* 0x0582: memx_func_enter_wait */
+/* 0x05ba: memx_func_leave_wait */
0x67f104bd,
0x64b607c0,
0x0066cf06,
0xf40464f0,
- 0x67f0f30b,
- 0x0664b62c,
- 0x800066cf,
- 0x00f8f106,
-/* 0x05a0: memx_func_leave */
- 0xb62c67f0,
- 0x66cf0664,
- 0xf2068000,
- 0xf10467f0,
- 0xb607e407,
- 0x06d00604,
-/* 0x05bb: memx_func_leave_wait */
- 0xf104bd00,
- 0xb607c067,
- 0x66cf0664,
- 0x0464f000,
- 0xf1f31bf4,
- 0xb9161087,
- 0x21f4028e,
- 0x02d7b904,
- 0xffcc67f1,
- 0xffff63f1,
- 0xf90476fd,
- 0xfc70f980,
- 0xf4e0fcd0,
- 0x00f84021,
-/* 0x05f0: memx_func_wait_vblank */
- 0xb0001698,
- 0x0bf40066,
- 0x0166b013,
- 0xf4060bf4,
-/* 0x0602: memx_func_wait_vblank_head1 */
- 0x77f12e0e,
- 0x0ef40020,
-/* 0x0609: memx_func_wait_vblank_head0 */
- 0x0877f107,
-/* 0x060d: memx_func_wait_vblank_0 */
- 0xc467f100,
- 0x0664b607,
- 0xfd0066cf,
- 0x1bf40467,
-/* 0x061d: memx_func_wait_vblank_1 */
- 0xc467f1f3,
- 0x0664b607,
- 0xfd0066cf,
- 0x0bf40467,
-/* 0x062d: memx_func_wait_vblank_fini */
- 0x0410b6f3,
-/* 0x0632: memx_func_wr32 */
- 0x169800f8,
- 0x01159800,
- 0xf90810b6,
- 0xfc50f960,
- 0xf4e0fcd0,
- 0x42b64021,
- 0xe91bf402,
-/* 0x064e: memx_func_wait */
- 0x87f000f8,
- 0x0684b62c,
- 0x980088cf,
- 0x1d98001e,
- 0x021c9801,
- 0xb6031b98,
- 0x21f41010,
-/* 0x066b: memx_func_delay */
- 0x9800f8a3,
- 0x10b6001e,
- 0x7e21f404,
-/* 0x0676: memx_func_train */
- 0x57f100f8,
- 0x77f10003,
- 0x97f10000,
- 0x93f00000,
- 0x029eb970,
- 0xb90421f4,
- 0xe7f102d8,
- 0x21f42710,
-/* 0x0695: memx_func_train_loop_outer */
- 0x0158e07e,
- 0x0083f101,
- 0xe097f102,
- 0x1193f011,
- 0x80f990f9,
+ 0x87f1f31b,
+ 0x8eb91610,
+ 0x0421f402,
+ 0xf102d7b9,
+ 0xf1ffcc67,
+ 0xfdffff63,
+ 0x80f90476,
+ 0xd0fc70f9,
+ 0x21f4e0fc,
+/* 0x05ef: memx_func_wait_vblank */
+ 0x9800f840,
+ 0x66b00016,
+ 0x120bf400,
+ 0xf40166b0,
+ 0x0ef4060b,
+/* 0x0601: memx_func_wait_vblank_head1 */
+ 0x2077f02c,
+/* 0x0607: memx_func_wait_vblank_head0 */
+ 0xf0060ef4,
+/* 0x060a: memx_func_wait_vblank_0 */
+ 0x67f10877,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x061a: memx_func_wait_vblank_1 */
+ 0x67f1f31b,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x062a: memx_func_wait_vblank_fini */
+ 0x10b6f30b,
+/* 0x062f: memx_func_wr32 */
+ 0x9800f804,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
0xe0fcd0fc,
- 0xf94021f4,
- 0x0067f150,
-/* 0x06b5: memx_func_train_loop_inner */
- 0x1187f100,
- 0x9068ff11,
- 0xfd109894,
- 0x97f10589,
- 0x93f00720,
- 0xf990f910,
- 0xfcd0fc80,
- 0x4021f4e0,
- 0x008097f1,
- 0xb91093f0,
- 0x21f4029e,
- 0x02d8b904,
- 0xf92088c5,
+ 0xb64021f4,
+ 0x1bf40242,
+/* 0x064b: memx_func_wait */
+ 0xf000f8e9,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f8a321,
+/* 0x0668: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x0673: memx_func_train */
+ 0xf000f87e,
+ 0x77f00357,
+ 0x0097f100,
+ 0x7093f000,
+ 0xf4029eb9,
+ 0xd8b90421,
+ 0x10e7f102,
+ 0x7e21f427,
+/* 0x0690: memx_func_train_loop_outer */
+ 0x010158e0,
+ 0x020083f1,
+ 0x11e097f1,
+ 0xf91193f0,
+ 0xfc80f990,
+ 0xf4e0fcd0,
+ 0x50f94021,
+/* 0x06af: memx_func_train_loop_inner */
+ 0xf10067f0,
+ 0xff111187,
+ 0x98949068,
+ 0x0589fd10,
+ 0x072097f1,
+ 0xf91093f0,
0xfc80f990,
0xf4e0fcd0,
0x97f14021,
- 0x93f0053c,
- 0x0287f110,
- 0x0083f130,
- 0xf990f980,
+ 0x93f00080,
+ 0x029eb910,
+ 0xb90421f4,
+ 0x88c502d8,
+ 0xf990f920,
0xfcd0fc80,
0x4021f4e0,
- 0x0560e7f1,
- 0xf110e3f0,
- 0xf10000d7,
- 0x908000d3,
- 0xb7f100dc,
- 0xb3f08480,
- 0xa321f41e,
- 0x000057f1,
- 0xffff97f1,
- 0x830093f1,
-/* 0x0734: memx_func_train_loop_4x */
- 0x0080a7f1,
- 0xb910a3f0,
- 0x21f402ae,
- 0x02d8b904,
- 0xffdfb7f1,
- 0xffffb3f1,
- 0xf9048bfd,
- 0xfc80f9a0,
+ 0x053c97f1,
+ 0xf11093f0,
+ 0xf1300287,
+ 0xf9800083,
+ 0xfc80f990,
0xf4e0fcd0,
- 0xa7f14021,
- 0xa3f0053c,
- 0x0287f110,
- 0x0083f130,
- 0xf9a0f980,
- 0xfcd0fc80,
- 0x4021f4e0,
- 0x0560e7f1,
- 0xf110e3f0,
- 0xf10000d7,
- 0xb98000d3,
- 0xb7f102dc,
- 0xb3f02710,
- 0xa321f400,
- 0xf402eeb9,
- 0xddb90421,
- 0x949dff02,
+ 0xe7f14021,
+ 0xe3f00560,
+ 0x00d7f110,
+ 0x00d3f100,
+ 0x00dc9080,
+ 0x8480b7f1,
+ 0xf41eb3f0,
+ 0x57f0a321,
+ 0xff97f100,
+ 0x0093f1ff,
+/* 0x072d: memx_func_train_loop_4x */
+ 0x80a7f183,
+ 0x10a3f000,
+ 0xf402aeb9,
+ 0xd8b90421,
+ 0xdfb7f102,
+ 0xffb3f1ff,
+ 0x048bfdff,
+ 0x80f9a0f9,
+ 0xe0fcd0fc,
+ 0xf14021f4,
+ 0xf0053ca7,
+ 0x87f110a3,
+ 0x83f13002,
+ 0xa0f98000,
+ 0xd0fc80f9,
+ 0x21f4e0fc,
+ 0x60e7f140,
+ 0x10e3f005,
+ 0x0000d7f1,
+ 0x8000d3f1,
+ 0xf102dcb9,
+ 0xf02710b7,
+ 0x21f400b3,
+ 0x02eeb9a3,
+ 0xb90421f4,
+ 0x9dff02dd,
+ 0x0150b694,
+ 0xf4045670,
+ 0x7aa0921e,
+ 0xa9800bcc,
+ 0x0160b600,
+ 0x700470b6,
+ 0x1ef51066,
+ 0x50fcff01,
0x700150b6,
- 0x1ef40456,
- 0xcc7aa092,
- 0x00a9800b,
- 0xb60160b6,
- 0x66700470,
- 0x001ef510,
- 0xb650fcff,
- 0x56700150,
- 0xd41ef507,
-/* 0x07c7: memx_exec */
- 0xf900f8fe,
- 0xb9d0f9e0,
- 0xb2b902c1,
-/* 0x07d1: memx_exec_next */
- 0x00139802,
- 0xe70410b6,
- 0xe701f034,
- 0xb601e033,
- 0x30f00132,
- 0xde35980c,
- 0x12b855f9,
- 0xe41ef406,
- 0x98f10b98,
- 0xcbbbf20c,
- 0xc4b7f102,
- 0x06b4b607,
- 0xfc00bbcf,
- 0xf5e0fcd0,
+ 0x1ef50756,
+ 0x00f8fed6,
+/* 0x07c0: memx_exec */
+ 0xd0f9e0f9,
+ 0xb902c1b9,
+/* 0x07ca: memx_exec_next */
+ 0x139802b2,
+ 0x0410b600,
+ 0x01f034e7,
+ 0x01e033e7,
+ 0xf00132b6,
+ 0x35980c30,
+ 0xb855f9de,
+ 0x1ef40612,
+ 0xf10b98e4,
+ 0xbbf20c98,
+ 0xb7f102cb,
+ 0xb4b607c4,
+ 0x00bbcf06,
+ 0xe0fcd0fc,
+ 0x033621f5,
+/* 0x0806: memx_info */
+ 0xc67000f8,
+ 0x0e0bf401,
+/* 0x080c: memx_info_data */
+ 0x03ccc7f1,
+ 0x0800b7f1,
+/* 0x0817: memx_info_train */
+ 0xf10b0ef4,
+ 0xf10bccc7,
+/* 0x081f: memx_info_send */
+ 0xf50100b7,
0xf8033621,
-/* 0x080d: memx_info */
- 0x01c67000,
-/* 0x0813: memx_info_data */
- 0xf10e0bf4,
- 0xf103ccc7,
- 0xf40800b7,
-/* 0x081e: memx_info_train */
- 0xc7f10b0e,
- 0xb7f10bcc,
-/* 0x0826: memx_info_send */
- 0x21f50100,
- 0x00f80336,
-/* 0x082c: memx_recv */
- 0xf401d6b0,
- 0xd6b0980b,
- 0xd80bf400,
-/* 0x083a: memx_init */
- 0x00f800f8,
-/* 0x083c: perf_recv */
-/* 0x083e: perf_init */
- 0x00f800f8,
-/* 0x0840: i2c_drive_scl */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0854: i2c_drive_scl_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0862: i2c_drive_sda */
- 0x36b000f8,
- 0x110bf400,
- 0x07e007f1,
- 0xd00604b6,
- 0x04bd0002,
-/* 0x0876: i2c_drive_sda_lo */
- 0x07f100f8,
- 0x04b607e4,
- 0x0002d006,
- 0x00f804bd,
-/* 0x0884: i2c_sense_scl */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0431fd00,
- 0xf4060bf4,
-/* 0x089a: i2c_sense_scl_done */
- 0x00f80131,
-/* 0x089c: i2c_sense_sda */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x08b2: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x08b4: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x4021f501,
-/* 0x08c1: i2c_raise_scl_wait */
+/* 0x0825: memx_recv */
+ 0x01d6b000,
+ 0xb0980bf4,
+ 0x0bf400d6,
+/* 0x0833: memx_init */
+ 0xf800f8d8,
+/* 0x0835: perf_recv */
+/* 0x0837: perf_init */
+ 0xf800f800,
+/* 0x0839: i2c_drive_scl */
+ 0x0036b000,
+ 0xf1110bf4,
+ 0xb607e007,
+ 0x01d00604,
+ 0xf804bd00,
+/* 0x084d: i2c_drive_scl_lo */
+ 0xe407f100,
+ 0x0604b607,
+ 0xbd0001d0,
+/* 0x085b: i2c_drive_sda */
+ 0xb000f804,
+ 0x0bf40036,
+ 0xe007f111,
+ 0x0604b607,
+ 0xbd0002d0,
+/* 0x086f: i2c_drive_sda_lo */
+ 0xf100f804,
+ 0xb607e407,
+ 0x02d00604,
+ 0xf804bd00,
+/* 0x087d: i2c_sense_scl */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xcf0634b6,
+ 0x31fd0033,
+ 0x060bf404,
+/* 0x0893: i2c_sense_scl_done */
+ 0xf80131f4,
+/* 0x0895: i2c_sense_sda */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xcf0634b6,
+ 0x32fd0033,
+ 0x060bf404,
+/* 0x08ab: i2c_sense_sda_done */
+ 0xf80131f4,
+/* 0x08ad: i2c_raise_scl */
+ 0xf140f900,
+ 0xf0089847,
+ 0x21f50137,
+/* 0x08ba: i2c_raise_scl_wait */
+ 0xe7f10839,
+ 0x21f403e8,
+ 0x7d21f57e,
+ 0x0901f408,
+ 0xf40142b6,
+/* 0x08ce: i2c_raise_scl_done */
+ 0x40fcef1b,
+/* 0x08d2: i2c_start */
+ 0x21f500f8,
+ 0x11f4087d,
+ 0x9521f50d,
+ 0x0611f408,
+/* 0x08e3: i2c_start_rep */
+ 0xf0300ef4,
+ 0x21f50037,
+ 0x37f00839,
+ 0x5b21f501,
+ 0x0076bb08,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b608ad,
+ 0x1f11f404,
+/* 0x0910: i2c_start_send */
+ 0xf50037f0,
+ 0xf1085b21,
+ 0xf41388e7,
+ 0x37f07e21,
+ 0x3921f500,
+ 0x88e7f108,
+ 0x7e21f413,
+/* 0x092c: i2c_start_out */
+/* 0x092e: i2c_stop */
+ 0x37f000f8,
+ 0x3921f500,
+ 0x0037f008,
+ 0x085b21f5,
+ 0x03e8e7f1,
+ 0xf07e21f4,
+ 0x21f50137,
+ 0xe7f10839,
+ 0x21f41388,
+ 0x0137f07e,
+ 0x085b21f5,
+ 0x1388e7f1,
+ 0xf87e21f4,
+/* 0x0961: i2c_bitw */
+ 0x5b21f500,
0xe8e7f108,
0x7e21f403,
- 0x088421f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x08d5: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x08d9: i2c_start */
- 0x8421f500,
- 0x0d11f408,
- 0x089c21f5,
- 0xf40611f4,
-/* 0x08ea: i2c_start_rep */
- 0x37f0300e,
- 0x4021f500,
- 0x0137f008,
- 0x086221f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xb421f550,
+ 0xad21f550,
0x0464b608,
-/* 0x0917: i2c_start_send */
- 0xf01f11f4,
- 0x21f50037,
- 0xe7f10862,
- 0x21f41388,
- 0x0037f07e,
- 0x084021f5,
- 0x1388e7f1,
-/* 0x0933: i2c_start_out */
- 0xf87e21f4,
-/* 0x0935: i2c_stop */
- 0x0037f000,
- 0x084021f5,
- 0xf50037f0,
- 0xf1086221,
- 0xf403e8e7,
+ 0xf11811f4,
+ 0xf41388e7,
0x37f07e21,
- 0x4021f501,
+ 0x3921f500,
0x88e7f108,
0x7e21f413,
- 0xf50137f0,
- 0xf1086221,
- 0xf41388e7,
- 0x00f87e21,
-/* 0x0968: i2c_bitw */
- 0x086221f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x08b421f5,
- 0xf40464b6,
- 0xe7f11811,
+/* 0x09a0: i2c_bitw_out */
+/* 0x09a2: i2c_bitr */
+ 0x37f000f8,
+ 0x5b21f501,
+ 0xe8e7f108,
+ 0x7e21f403,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xad21f550,
+ 0x0464b608,
+ 0xf51b11f4,
+ 0xf0089521,
+ 0x21f50037,
+ 0xe7f10839,
0x21f41388,
- 0x0037f07e,
- 0x084021f5,
- 0x1388e7f1,
-/* 0x09a7: i2c_bitw_out */
- 0xf87e21f4,
-/* 0x09a9: i2c_bitr */
- 0x0137f000,
- 0x086221f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x08b421f5,
- 0xf40464b6,
- 0x21f51b11,
- 0x37f0089c,
- 0x4021f500,
- 0x88e7f108,
- 0x7e21f413,
- 0xf4013cf0,
-/* 0x09ee: i2c_bitr_done */
- 0x00f80131,
-/* 0x09f0: i2c_get_byte */
- 0xf00057f0,
-/* 0x09f6: i2c_get_byte_next */
- 0x54b60847,
+ 0x013cf07e,
+/* 0x09e7: i2c_bitr_done */
+ 0xf80131f4,
+/* 0x09e9: i2c_get_byte */
+ 0x0057f000,
+/* 0x09ef: i2c_get_byte_next */
+ 0xb60847f0,
+ 0x76bb0154,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb609a221,
+ 0x11f40464,
+ 0x0553fd2b,
+ 0xf40142b6,
+ 0x37f0d81b,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b609a9,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x6821f550,
- 0x0464b609,
-/* 0x0a40: i2c_get_byte_done */
-/* 0x0a42: i2c_put_byte */
- 0x47f000f8,
-/* 0x0a45: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x096821f5,
- 0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x64b60961,
+/* 0x0a39: i2c_get_byte_done */
+/* 0x0a3b: i2c_put_byte */
+ 0xf000f804,
+/* 0x0a3e: i2c_put_byte_next */
+ 0x42b60847,
+ 0x3854ff01,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xa921f550,
+ 0x6121f550,
0x0464b609,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x0a9b: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x0a9d: i2c_addr */
- 0x0076bb00,
+ 0xb03411f4,
+ 0x1bf40046,
+ 0x0076bbd8,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608d9,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0x64b609a2,
+ 0x0f11f404,
+ 0xb00076bb,
+ 0x1bf40136,
+ 0x0132f406,
+/* 0x0a94: i2c_put_byte_done */
+/* 0x0a96: i2c_addr */
+ 0x76bb00f8,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb60a4221,
-/* 0x0ae2: i2c_addr_done */
- 0x00f80464,
-/* 0x0ae4: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b702e4,
- 0xee980d1c,
-/* 0x0af3: i2c_acquire */
- 0xf500f800,
- 0xf40ae421,
- 0xd9f00421,
- 0x4021f403,
-/* 0x0b02: i2c_release */
- 0x21f500f8,
- 0x21f40ae4,
- 0x03daf004,
- 0xf84021f4,
-/* 0x0b11: i2c_recv */
- 0x0132f400,
- 0xb6f8c1c7,
- 0x16b00214,
- 0x3a1ff528,
- 0xf413a001,
- 0x0032980c,
- 0x0ccc13a0,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+ 0xb608d221,
+ 0x11f40464,
+ 0x2ec3e729,
+ 0x0134b601,
+ 0xbb0553fd,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0af321f5,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b31bf5,
- 0xbb0057f0,
+ 0x0a3b21f5,
+/* 0x0adb: i2c_addr_done */
+ 0xf80464b6,
+/* 0x0add: i2c_acquire_addr */
+ 0xf8cec700,
+ 0xb702e4b6,
+ 0x980d1ce0,
+ 0x00f800ee,
+/* 0x0aec: i2c_acquire */
+ 0x0add21f5,
+ 0xf00421f4,
+ 0x21f403d9,
+/* 0x0afb: i2c_release */
+ 0xf500f840,
+ 0xf40add21,
+ 0xdaf00421,
+ 0x4021f403,
+/* 0x0b0a: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13a0013a,
+ 0x32980cf4,
+ 0xcc13a000,
+ 0x0031980c,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xec21f550,
+ 0x0464b60a,
+ 0xd6b0d0fc,
+ 0xb31bf500,
+ 0x0057f000,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x9621f550,
+ 0x0464b60a,
+ 0x00d011f5,
+ 0xbbe0c5c7,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a9d21f5,
+ 0x0a3b21f5,
0xf50464b6,
- 0xc700d011,
- 0x76bbe0c5,
+ 0xf000ad11,
+ 0x76bb0157,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb60a4221,
+ 0xb60a9621,
0x11f50464,
- 0x57f000ad,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b60a9d,
- 0x8a11f504,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b609f0,
- 0x6a11f404,
- 0xbbe05bcb,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x093521f5,
- 0xb90464b6,
- 0x74bd025b,
-/* 0x0c17: i2c_recv_not_rd08 */
- 0xb0430ef4,
- 0x1bf401d6,
- 0x0057f03d,
- 0x0a9d21f5,
- 0xc73311f4,
- 0x21f5e0c5,
- 0x11f40a42,
- 0x0057f029,
- 0x0a9d21f5,
- 0xc71f11f4,
- 0x21f5e0b5,
- 0x11f40a42,
- 0x3521f515,
- 0xc774bd09,
- 0x1bf408c5,
- 0x0232f409,
-/* 0x0c57: i2c_recv_not_wr08 */
-/* 0x0c57: i2c_recv_done */
- 0xc7030ef4,
- 0x21f5f8ce,
- 0xe0fc0b02,
- 0x12f4d0fc,
- 0x027cb90a,
- 0x033621f5,
-/* 0x0c6c: i2c_recv_exit */
-/* 0x0c6e: i2c_init */
+ 0x76bb008a,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb609e921,
+ 0x11f40464,
+ 0xe05bcb6a,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2e21f550,
+ 0x0464b609,
+ 0xbd025bb9,
+ 0x430ef474,
+/* 0x0c10: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x57f03d1b,
+ 0x9621f500,
+ 0x3311f40a,
+ 0xf5e0c5c7,
+ 0xf40a3b21,
+ 0x57f02911,
+ 0x9621f500,
+ 0x1f11f40a,
+ 0xf5e0b5c7,
+ 0xf40a3b21,
+ 0x21f51511,
+ 0x74bd092e,
+ 0xf408c5c7,
+ 0x32f4091b,
+ 0x030ef402,
+/* 0x0c50: i2c_recv_not_wr08 */
+/* 0x0c50: i2c_recv_done */
+ 0xf5f8cec7,
+ 0xfc0afb21,
+ 0xf4d0fce0,
+ 0x7cb90a12,
+ 0x3621f502,
+/* 0x0c65: i2c_recv_exit */
+/* 0x0c67: i2c_init */
+ 0xf800f803,
+/* 0x0c69: test_recv */
+ 0xd817f100,
+ 0x0614b605,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x04b605d8,
+ 0x0001d006,
+ 0xe7f104bd,
+ 0xe3f1d900,
+ 0x21f5134f,
+ 0x00f80256,
+/* 0x0c90: test_init */
+ 0x0800e7f1,
+ 0x025621f5,
+/* 0x0c9a: idle_recv */
0x00f800f8,
-/* 0x0c70: test_recv */
- 0x05d817f1,
- 0xcf0614b6,
- 0x10b60011,
- 0xd807f101,
- 0x0604b605,
- 0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0x5621f513,
-/* 0x0c97: test_init */
- 0xf100f802,
- 0xf50800e7,
- 0xf8025621,
-/* 0x0ca1: idle_recv */
-/* 0x0ca3: idle */
- 0xf400f800,
- 0x17f10031,
- 0x14b605d4,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d407,
- 0x01d00604,
-/* 0x0cbf: idle_loop */
- 0xf004bd00,
- 0x32f45817,
-/* 0x0cc5: idle_proc */
-/* 0x0cc5: idle_proc_exec */
- 0xb910f902,
- 0x21f5021e,
- 0x10fc033f,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0cd9: idle_proc_next */
- 0x5810b6ef,
- 0xf4061fb8,
- 0x02f4e61b,
- 0x0028f4dd,
- 0x00bb0ef4,
+/* 0x0c9c: idle */
+ 0xf10031f4,
+ 0xb605d417,
+ 0x11cf0614,
+ 0x0110b600,
+ 0x05d407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0cb8: idle_loop */
+ 0xf45817f0,
+/* 0x0cbe: idle_proc */
+/* 0x0cbe: idle_proc_exec */
+ 0x10f90232,
+ 0xf5021eb9,
+ 0xfc033f21,
+ 0x0911f410,
+ 0xf40231f4,
+/* 0x0cd2: idle_proc_next */
+ 0x10b6ef0e,
+ 0x061fb858,
+ 0xf4e61bf4,
+ 0x28f4dd02,
+ 0xbb0ef400,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a..1663bf9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
@@ -82,15 +82,15 @@ memx_train_tail:
// $r0 - zero
memx_func_enter:
#if NVKM_PPWR_CHIPSET == GT215
- movw $r8 0x1610
+ mov $r8 0x1610
nv_rd32($r7, $r8)
imm32($r6, 0xfffffffc)
and $r7 $r6
- movw $r6 0x2
+ mov $r6 0x2
or $r7 $r6
nv_wr32($r8, $r7)
#else
- movw $r6 0x001620
+ mov $r6 0x001620
imm32($r7, ~0x00000aa2);
nv_rd32($r8, $r6)
and $r8 $r7
@@ -101,7 +101,7 @@ memx_func_enter:
and $r8 $r7
nv_wr32($r6, $r8)
- movw $r6 0x0026f0
+ mov $r6 0x0026f0
nv_rd32($r8, $r6)
and $r8 $r7
nv_wr32($r6, $r8)
@@ -136,19 +136,19 @@ memx_func_leave:
bra nz #memx_func_leave_wait
#if NVKM_PPWR_CHIPSET == GT215
- movw $r8 0x1610
+ mov $r8 0x1610
nv_rd32($r7, $r8)
imm32($r6, 0xffffffcc)
and $r7 $r6
nv_wr32($r8, $r7)
#else
- movw $r6 0x0026f0
+ mov $r6 0x0026f0
imm32($r7, 0x00000001)
nv_rd32($r8, $r6)
or $r8 $r7
nv_wr32($r6, $r8)
- movw $r6 0x001620
+ mov $r6 0x001620
nv_rd32($r8, $r6)
or $r8 $r7
nv_wr32($r6, $r8)
@@ -177,11 +177,11 @@ memx_func_wait_vblank:
bra #memx_func_wait_vblank_fini
memx_func_wait_vblank_head1:
- movw $r7 0x20
+ mov $r7 0x20
bra #memx_func_wait_vblank_0
memx_func_wait_vblank_head0:
- movw $r7 0x8
+ mov $r7 0x8
memx_func_wait_vblank_0:
nv_iord($r6, NV_PPWR_INPUT)
@@ -273,13 +273,13 @@ memx_func_train:
// $r5 - outer loop counter
// $r6 - inner loop counter
// $r7 - entry counter (#memx_train_head + $r7)
- movw $r5 0x3
- movw $r7 0x0
+ mov $r5 0x3
+ mov $r7 0x0
// Read random memory to wake up... things
imm32($r9, 0x700000)
nv_rd32($r8,$r9)
- movw $r14 0x2710
+ mov $r14 0x2710
call(nsec)
memx_func_train_loop_outer:
@@ -289,9 +289,9 @@ memx_func_train:
nv_wr32($r9, $r8)
push $r5
- movw $r6 0x0
+ mov $r6 0x0
memx_func_train_loop_inner:
- movw $r8 0x1111
+ mov $r8 0x1111
mulu $r9 $r6 $r8
shl b32 $r8 $r9 0x10
or $r8 $r9
@@ -315,7 +315,7 @@ memx_func_train:
// $r5 - inner inner loop counter
// $r9 - result
- movw $r5 0
+ mov $r5 0
imm32($r9, 0x8300ffff)
memx_func_train_loop_4x:
imm32($r10, 0x100080)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index e698f48..ed08120 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/acr_r364.o
nvkm-y += nvkm/subdev/secboot/acr_r367.o
+nvkm-y += nvkm/subdev/secboot/acr_r370.o
nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
+nvkm-y += nvkm/subdev/secboot/gp108.o
nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index b615fc8..73a2ac8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long);
struct nvkm_acr *acr_r364_new(unsigned long);
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
+struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
new file mode 100644
index 0000000..2f890df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r370.h"
+#include "acr_r367.h"
+
+#include <core/msgqueue.h>
+#include <engine/falcon.h>
+#include <engine/sec2.h>
+
+static void
+acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ struct acr_r370_flcn_bl_desc *desc = _desc;
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ u64 base, addr_code, addr_data;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+}
+
+const struct acr_r352_ls_func
+acr_r370_ls_fecs_func = {
+ .load = acr_ls_ucode_load_fecs,
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
+const struct acr_r352_ls_func
+acr_r370_ls_gpccs_func = {
+ .load = acr_ls_ucode_load_gpccs,
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+};
+
+static void
+acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
+ struct acr_r370_flcn_bl_desc *desc = _desc;
+ u64 base, addr_code, addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ /* For some reason we should not add app_resident_code_offset here */
+ addr_code = base;
+ addr_data = base + pdesc->app_resident_data_offset;
+ addr_args = sec->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->argc = 1;
+ /* args are stored at the beginning of EMEM */
+ desc->argv = 0x01000000;
+}
+
+const struct acr_r352_ls_func
+acr_r370_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
+ .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ .post_run = acr_ls_sec2_post_run,
+};
+
+void
+acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+ u64 offset)
+{
+ struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
+
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+ bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
+ bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
+ bl_desc->code_entry_point = 0;
+ bl_desc->code_dma_base = u64_to_flcn64(offset);
+ bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
+ bl_desc->data_size = hdr->data_size;
+}
+
+const struct acr_r352_func
+acr_r370_func = {
+ .fixup_hs_desc = acr_r367_fixup_hs_desc,
+ .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ .shadow_blob = true,
+ .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
+ .ls_fill_headers = acr_r367_ls_fill_headers,
+ .ls_write_wpr = acr_r367_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
+ },
+};
+
+struct nvkm_acr *
+acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
+ unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
new file mode 100644
index 0000000..3426f86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_ACR_R370_H__
+#define __NVKM_SECBOOT_ACR_R370_H__
+
+#include "priv.h"
+struct hsf_load_header;
+
+/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
+struct acr_r370_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+};
+
+void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
+extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
+extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index ddb795b..7bdef93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -20,90 +20,12 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "acr_r370.h"
#include "acr_r367.h"
-#include <engine/falcon.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
-/*
- * r375 ACR: similar to r367, but with a unified bootloader descriptor
- * structure for GR and PMU falcons.
- */
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r375_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- u32 argc;
- u32 argv;
-};
-
-static void
-acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r375_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-static void
-acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_ls_func
-acr_r375_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-
static void
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
@@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r375_flcn_bl_desc *desc = _desc;
+ struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
@@ -136,23 +58,22 @@ const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run,
};
-
const struct acr_r352_func
acr_r375_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index f3b3c66..1f7a3c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return gm200_secboot_run_blob(sb, blob, falcon);
}
-static const struct nvkm_secboot_func
+const struct nvkm_secboot_func
gp102_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
new file mode 100644
index 0000000..e8c27ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gm200.h"
+#include "acr.h"
+
+int
+gp108_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
+ BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS) |
+ BIT(NVKM_SECBOOT_FALCON_SEC2));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
+
+ if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
+ acr->func->dtor(acr);
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
+}
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index d9091f0..959a7b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
int nvkm_secboot_falcon_run(struct nvkm_secboot *);
+extern const struct nvkm_secboot_func gp102_secboot;
+
struct flcn_u64 {
u32 lo;
u32 hi;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 7ba56b1..550702e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,7 +9,9 @@ nvkm-y += nvkm/subdev/therm/nv40.o
nvkm-y += nvkm/subdev/therm/nv50.o
nvkm-y += nvkm/subdev/therm/g84.o
nvkm-y += nvkm/subdev/therm/gt215.o
+nvkm-y += nvkm/subdev/therm/gf100.o
nvkm-y += nvkm/subdev/therm/gf119.o
+nvkm-y += nvkm/subdev/therm/gk104.o
nvkm-y += nvkm/subdev/therm/gm107.o
nvkm-y += nvkm/subdev/therm/gm200.o
nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index f27fc6d..3695cde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,6 +21,7 @@
*
* Authors: Martin Peres
*/
+#include <nvkm/core/option.h>
#include "priv.h"
int
@@ -297,6 +298,38 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
return -EINVAL;
}
+void
+nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
+{
+ if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
+ return;
+
+ nvkm_debug(&therm->subdev,
+ "Enabling clockgating\n");
+ therm->func->clkgate_enable(therm);
+}
+
+void
+nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
+{
+ if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
+ return;
+
+ nvkm_debug(&therm->subdev,
+ "Preparing clockgating for %s\n",
+ suspend ? "suspend" : "fini");
+ therm->func->clkgate_fini(therm, suspend);
+}
+
+static void
+nvkm_therm_clkgate_oneinit(struct nvkm_therm *therm)
+{
+ if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+ return;
+
+ nvkm_info(&therm->subdev, "Clockgating enabled\n");
+}
+
static void
nvkm_therm_intr(struct nvkm_subdev *subdev)
{
@@ -333,6 +366,7 @@ nvkm_therm_oneinit(struct nvkm_subdev *subdev)
nvkm_therm_fan_ctor(therm);
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
nvkm_therm_sensor_preinit(therm);
+ nvkm_therm_clkgate_oneinit(therm);
return 0;
}
@@ -357,6 +391,16 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
return 0;
}
+void
+nvkm_therm_clkgate_init(struct nvkm_therm *therm,
+ const struct nvkm_therm_clkgate_pack *p)
+{
+ if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
+ return;
+
+ therm->func->clkgate_init(therm, p);
+}
+
static void *
nvkm_therm_dtor(struct nvkm_subdev *subdev)
{
@@ -374,15 +418,10 @@ nvkm_therm = {
.intr = nvkm_therm_intr,
};
-int
-nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+void
+nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
+ int index, const struct nvkm_therm_func *func)
{
- struct nvkm_therm *therm;
-
- if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
- return -ENOMEM;
-
nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
therm->func = func;
@@ -395,5 +434,20 @@ nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
therm->attr_get = nvkm_therm_attr_get;
therm->attr_set = nvkm_therm_attr_set;
therm->mode = therm->suspend = -1; /* undefined */
+
+ therm->clkgating_enabled = nvkm_boolopt(device->cfgopt,
+ "NvPmEnableGating", false);
+}
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+ int index, struct nvkm_therm **ptherm)
+{
+ struct nvkm_therm *therm;
+
+ if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_therm_ctor(therm, device, index, func);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
new file mode 100644
index 0000000..5ae6913
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include <core/device.h>
+
+#include "priv.h"
+
+#define pack_for_each_init(init, pack, head) \
+ for (pack = head; pack && pack->init; pack++) \
+ for (init = pack->init; init && init->count; init++)
+void
+gf100_clkgate_init(struct nvkm_therm *therm,
+ const struct nvkm_therm_clkgate_pack *p)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ const struct nvkm_therm_clkgate_pack *pack;
+ const struct nvkm_therm_clkgate_init *init;
+ u32 next, addr;
+
+ pack_for_each_init(init, pack, p) {
+ next = init->addr + init->count * 8;
+ addr = init->addr;
+
+ nvkm_trace(&therm->subdev, "{ 0x%06x, %d, 0x%08x }\n",
+ init->addr, init->count, init->data);
+ while (addr < next) {
+ nvkm_trace(&therm->subdev, "\t0x%06x = 0x%08x\n",
+ addr, init->data);
+ nvkm_wr32(device, addr, init->data);
+ addr += 8;
+ }
+ }
+}
+
+/*
+ * TODO: Fermi clockgating isn't understood fully yet, so we don't specify any
+ * clockgate functions to use
+ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
index c0c4bfd..cfb25af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,15 +19,17 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
+ * Authors: Lyude Paul
*/
-#ifndef __AMDGPU_POWERPLAY_H__
-#define __AMDGPU_POWERPLAY_H__
+#ifndef __GF100_THERM_H__
+#define __GF100_THERM_H__
-#include "amd_shared.h"
+#include <core/device.h>
-extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block;
+struct gf100_idle_filter {
+ u32 fecs;
+ u32 hubmmu;
+};
-#endif /* __AMDGPU_POWERPLAY_H__ */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 06dcfd6..0981b02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -49,7 +49,7 @@ pwm_info(struct nvkm_therm *therm, int line)
return -ENODEV;
}
-static int
+int
gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
struct nvkm_device *device = therm->subdev.device;
@@ -63,7 +63,7 @@ gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
return 0;
}
-static int
+int
gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_device *device = therm->subdev.device;
@@ -85,7 +85,7 @@ gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
return -EINVAL;
}
-static int
+int
gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_device *device = therm->subdev.device;
@@ -102,7 +102,7 @@ gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
-static int
+int
gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
new file mode 100644
index 0000000..4e03971
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include <core/device.h>
+
+#include "priv.h"
+#include "gk104.h"
+
+void
+gk104_clkgate_enable(struct nvkm_therm *base)
+{
+ struct gk104_therm *therm = gk104_therm(base);
+ struct nvkm_device *dev = therm->base.subdev.device;
+ const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
+ int i;
+
+ /* Program ENG_MANT, ENG_FILTER */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
+ }
+
+ /* magic */
+ nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs);
+ nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
+
+ /* Enable clockgating (ENG_CLK = RUN->AUTO) */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
+ }
+}
+
+void
+gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
+{
+ struct gk104_therm *therm = gk104_therm(base);
+ struct nvkm_device *dev = therm->base.subdev.device;
+ const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
+ int i;
+
+ /* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
+ }
+}
+
+const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
+ { NVKM_ENGINE_GR, 0x00 },
+ { NVKM_ENGINE_MSPDEC, 0x04 },
+ { NVKM_ENGINE_MSPPP, 0x08 },
+ { NVKM_ENGINE_MSVLD, 0x0c },
+ { NVKM_ENGINE_CE0, 0x10 },
+ { NVKM_ENGINE_CE1, 0x14 },
+ { NVKM_ENGINE_MSENC, 0x18 },
+ { NVKM_ENGINE_CE2, 0x1c },
+ { NVKM_SUBDEV_NR, 0 },
+};
+
+const struct gf100_idle_filter gk104_idle_filter = {
+ .fecs = 0x00001000,
+ .hubmmu = 0x00001000,
+};
+
+static const struct nvkm_therm_func
+gk104_therm_func = {
+ .init = gf119_therm_init,
+ .fini = g84_therm_fini,
+ .pwm_ctrl = gf119_fan_pwm_ctrl,
+ .pwm_get = gf119_fan_pwm_get,
+ .pwm_set = gf119_fan_pwm_set,
+ .pwm_clock = gf119_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .fan_sense = gt215_therm_fan_sense,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+ .clkgate_init = gf100_clkgate_init,
+ .clkgate_enable = gk104_clkgate_enable,
+ .clkgate_fini = gk104_clkgate_fini,
+};
+
+static int
+gk104_therm_new_(const struct nvkm_therm_func *func,
+ struct nvkm_device *device,
+ int index,
+ const struct gk104_clkgate_engine_info *clkgate_order,
+ const struct gf100_idle_filter *idle_filter,
+ struct nvkm_therm **ptherm)
+{
+ struct gk104_therm *therm = kzalloc(sizeof(*therm), GFP_KERNEL);
+
+ if (!therm)
+ return -ENOMEM;
+
+ nvkm_therm_ctor(&therm->base, device, index, func);
+ *ptherm = &therm->base;
+ therm->clkgate_order = clkgate_order;
+ therm->idle_filter = idle_filter;
+
+ return 0;
+}
+
+int
+gk104_therm_new(struct nvkm_device *device,
+ int index, struct nvkm_therm **ptherm)
+{
+ return gk104_therm_new_(&gk104_therm_func, device, index,
+ gk104_clkgate_engine_info, &gk104_idle_filter,
+ ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
new file mode 100644
index 0000000..293e774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+
+#ifndef __GK104_THERM_H__
+#define __GK104_THERM_H__
+#define gk104_therm(p) (container_of((p), struct gk104_therm, base))
+
+#include <subdev/therm.h>
+#include "priv.h"
+#include "gf100.h"
+
+struct gk104_clkgate_engine_info {
+ enum nvkm_devidx engine;
+ u8 offset;
+};
+
+struct gk104_therm {
+ struct nvkm_therm base;
+
+ const struct gk104_clkgate_engine_info *clkgate_order;
+ const struct gf100_idle_filter *idle_filter;
+};
+
+extern const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[];
+extern const struct gf100_idle_filter gk104_idle_filter;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f..4caf401 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
return -ENODEV;
}
-static void
+void
gt215_therm_init(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 1f46e37..21659da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -32,6 +32,8 @@
int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
int index, struct nvkm_therm **);
+void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
+ int index, const struct nvkm_therm_func *func);
struct nvkm_fan {
struct nvkm_therm *parent;
@@ -66,8 +68,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
int nvkm_therm_fan_user_get(struct nvkm_therm *);
int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
-int nvkm_therm_preinit(struct nvkm_therm *);
-
int nvkm_therm_sensor_init(struct nvkm_therm *);
int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend);
void nvkm_therm_sensor_preinit(struct nvkm_therm *);
@@ -96,6 +96,11 @@ struct nvkm_therm_func {
int (*fan_sense)(struct nvkm_therm *);
void (*program_alarms)(struct nvkm_therm *);
+
+ void (*clkgate_init)(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+ void (*clkgate_enable)(struct nvkm_therm *);
+ void (*clkgate_fini)(struct nvkm_therm *, bool);
};
void nv40_therm_intr(struct nvkm_therm *);
@@ -111,9 +116,21 @@ void g84_therm_fini(struct nvkm_therm *);
int gt215_therm_fan_sense(struct nvkm_therm *);
+void gf100_clkgate_init(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+
void g84_therm_init(struct nvkm_therm *);
+
+int gf119_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
+int gf119_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
+int gf119_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
+int gf119_fan_pwm_clock(struct nvkm_therm *, int);
void gf119_therm_init(struct nvkm_therm *);
+void gk104_therm_init(struct nvkm_therm *);
+void gk104_clkgate_enable(struct nvkm_therm *);
+void gk104_clkgate_fini(struct nvkm_therm *, bool);
+
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
int nvkm_fannil_create(struct nvkm_therm *);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 542a765..9eabd72 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -1,7 +1,7 @@
/*
* Analog TV Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -40,14 +40,12 @@ static const struct videomode tvc_pal_vm = {
DISPLAY_FLAGS_VSYNC_LOW,
};
-static const struct of_device_id tvc_of_match[];
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(ddata->dev, "connect\n");
@@ -55,10 +53,19 @@ static int tvc_connect(struct omap_dss_device *dssdev)
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(ddata->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.atv->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -73,6 +80,9 @@ static void tvc_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.atv->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tvc_enable(struct omap_dss_device *dssdev)
@@ -175,32 +185,12 @@ static struct omap_dss_driver tvc_driver = {
.set_wss = tvc_set_wss,
};
-static int tvc_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int tvc_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -208,10 +198,6 @@ static int tvc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- r = tvc_probe_of(pdev);
- if (r)
- return r;
-
ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
@@ -224,28 +210,22 @@ static int tvc_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tvc_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
tvc_disable(dssdev);
tvc_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 05fa24a..6d8cbd9 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -1,7 +1,7 @@
/*
* Generic DVI Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -44,6 +45,14 @@ struct panel_drv_data {
struct videomode vm;
struct i2c_adapter *i2c_adapter;
+
+ struct gpio_desc *hpd_gpio;
+
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ /* mutex for hpd fields above */
+ struct mutex hpd_lock;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -51,16 +60,25 @@ struct panel_drv_data {
static int dvic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dvi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -73,6 +91,9 @@ static void dvic_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dvi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int dvic_enable(struct omap_dss_device *dssdev)
@@ -177,6 +198,9 @@ static int dvic_read_edid(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
int r, l, bytes_read;
+ if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio))
+ return -ENODEV;
+
if (!ddata->i2c_adapter)
return -ENODEV;
@@ -208,6 +232,9 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
unsigned char out;
int r;
+ if (ddata->hpd_gpio)
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
+
if (!ddata->i2c_adapter)
return true;
@@ -216,6 +243,60 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
return r == 0;
}
+static int dvic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return -ENOTSUPP;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+ return 0;
+}
+
+static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void dvic_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void dvic_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
static struct omap_dss_driver dvic_driver = {
.connect = dvic_connect,
.disconnect = dvic_disconnect,
@@ -229,23 +310,60 @@ static struct omap_dss_driver dvic_driver = {
.read_edid = dvic_read_edid,
.detect = dvic_detect,
+
+ .register_hpd_cb = dvic_register_hpd_cb,
+ .unregister_hpd_cb = dvic_unregister_hpd_cb,
+ .enable_hpd = dvic_enable_hpd,
+ .disable_hpd = dvic_disable_hpd,
};
+static irqreturn_t dvic_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (dvic_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int dvic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
struct device_node *adapter_node;
struct i2c_adapter *adapter;
+ struct gpio_desc *gpio;
+ int r;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse HPD gpio\n");
+ return PTR_ERR(gpio);
}
- ddata->in = in;
+ ddata->hpd_gpio = gpio;
+
+ mutex_init(&ddata->hpd_lock);
+
+ if (ddata->hpd_gpio) {
+ r = devm_request_threaded_irq(&pdev->dev,
+ gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "DVI HPD", ddata);
+ if (r)
+ return r;
+ }
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
@@ -253,7 +371,6 @@ static int dvic_probe_of(struct platform_device *pdev)
of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
- omap_dss_put_device(ddata->in);
return -EPROBE_DEFER;
}
@@ -275,9 +392,6 @@ static int dvic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = dvic_probe_of(pdev);
if (r)
return r;
@@ -300,9 +414,8 @@ static int dvic_probe(struct platform_device *pdev)
return 0;
err_reg:
- omap_dss_put_device(ddata->in);
-
i2c_put_adapter(ddata->i2c_adapter);
+ mutex_destroy(&ddata->hpd_lock);
return r;
}
@@ -311,17 +424,16 @@ static int __exit dvic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
dvic_disable(dssdev);
dvic_disconnect(dssdev);
- omap_dss_put_device(in);
-
i2c_put_adapter(ddata->i2c_adapter);
+ mutex_destroy(&ddata->hpd_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 4600d38..ca30ed9 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -1,7 +1,7 @@
/*
* HDMI Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@ struct panel_drv_data {
static int hdmic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(ddata->dev, "connect\n");
@@ -63,10 +63,19 @@ static int hdmic_connect(struct omap_dss_device *dssdev)
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(ddata->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.hdmi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -81,6 +90,9 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.hdmi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int hdmic_enable(struct omap_dss_device *dssdev)
@@ -302,7 +314,6 @@ static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int gpio;
/* HPD GPIO */
@@ -312,14 +323,6 @@ static int hdmic_probe_of(struct platform_device *pdev)
else
ddata->hpd_gpio = -ENODEV;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -336,9 +339,6 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = hdmic_probe_of(pdev);
if (r)
return r;
@@ -349,7 +349,7 @@ static int hdmic_probe(struct platform_device *pdev)
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
GPIOF_DIR_IN, "hdmi_hpd");
if (r)
- goto err_reg;
+ return r;
r = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(ddata->hpd_gpio),
@@ -358,7 +358,7 @@ static int hdmic_probe(struct platform_device *pdev)
IRQF_ONESHOT,
"hdmic hpd", ddata);
if (r)
- goto err_reg;
+ return r;
}
ddata->vm = hdmic_default_vm;
@@ -373,28 +373,22 @@ static int hdmic_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit hdmic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
hdmic_disable(dssdev);
hdmic_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index b1f6aa0..afee1b8 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -6,7 +6,7 @@
*
* based on encoder-tfp410
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@ static int opa362_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(dssdev->dev, "connect\n");
@@ -44,13 +44,22 @@ static int opa362_connect(struct omap_dss_device *dssdev,
if (omapdss_device_is_connected(dssdev))
return -EBUSY;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.atv->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
+ ddata->in = in;
return 0;
}
@@ -74,6 +83,9 @@ static void opa362_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.atv->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int opa362_enable(struct omap_dss_device *dssdev)
@@ -171,19 +183,13 @@ static const struct omapdss_atv_ops opa362_atv_ops = {
static int opa362_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev, *in;
+ struct omap_dss_device *dssdev;
struct gpio_desc *gpio;
int r;
dev_dbg(&pdev->dev, "probe\n");
- if (node == NULL) {
- dev_err(&pdev->dev, "Unable to find device tree\n");
- return -EINVAL;
- }
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -196,14 +202,6 @@ static int opa362_probe(struct platform_device *pdev)
ddata->enable_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
dssdev = &ddata->dssdev;
dssdev->ops.atv = &opa362_atv_ops;
dssdev->dev = &pdev->dev;
@@ -214,20 +212,16 @@ static int opa362_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit opa362_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -239,8 +233,6 @@ static int __exit opa362_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
opa362_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 947295f9..ed7ae38 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -1,7 +1,7 @@
/*
* TFP410 DPI-to-DVI encoder driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -32,19 +32,28 @@ static int tfp410_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return -EBUSY;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
+ ddata->in = in;
return 0;
}
@@ -66,6 +75,9 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.dpi->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -165,7 +177,6 @@ static int tfp410_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "powerdown-gpios", 0);
@@ -173,18 +184,11 @@ static int tfp410_probe_of(struct platform_device *pdev)
if (gpio_is_valid(gpio) || gpio == -ENOENT) {
ddata->pd_gpio = gpio;
} else {
- dev_err(&pdev->dev, "failed to parse PD gpio\n");
+ if (gpio != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to parse PD gpio\n");
return gpio;
}
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -200,9 +204,6 @@ static int tfp410_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = tfp410_probe_of(pdev);
if (r)
return r;
@@ -213,7 +214,7 @@ static int tfp410_probe(struct platform_device *pdev)
if (r) {
dev_err(&pdev->dev, "Failed to request PD GPIO %d\n",
ddata->pd_gpio);
- goto err_gpio;
+ return r;
}
}
@@ -228,21 +229,16 @@ static int tfp410_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tfp410_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -254,8 +250,6 @@ static int __exit tfp410_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
tfp410_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index e3d98d7..d275bf1 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -1,7 +1,7 @@
/*
* TPD12S015 HDMI ESD protection & level shifter chip driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -40,12 +40,20 @@ static int tpd_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.hdmi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
@@ -56,6 +64,7 @@ static int tpd_connect(struct omap_dss_device *dssdev,
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
+ ddata->in = in;
return 0;
}
@@ -77,6 +86,9 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.hdmi->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tpd_enable(struct omap_dss_device *dssdev)
@@ -269,23 +281,6 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int tpd_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int tpd_probe(struct platform_device *pdev)
{
struct omap_dss_device *in, *dssdev;
@@ -299,37 +294,24 @@ static int tpd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- r = tpd_probe_of(pdev);
- if (r)
- return r;
-
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->ct_cp_hpd_gpio = gpio;
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->ls_oe_gpio = gpio;
gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
GPIOD_IN);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->hpd_gpio = gpio;
@@ -340,7 +322,7 @@ static int tpd_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"tpd12s015 hpd", ddata);
if (r)
- goto err_gpio;
+ return r;
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
@@ -355,21 +337,16 @@ static int tpd_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tpd_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -381,8 +358,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
tpd_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e065f7e..6cbf570 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -1,7 +1,7 @@
/*
* Generic MIPI DPI Panel Driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -38,16 +38,25 @@ struct panel_drv_data {
static int panel_dpi_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -60,6 +69,9 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int panel_dpi_enable(struct omap_dss_device *dssdev)
@@ -87,11 +99,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
}
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- if (ddata->backlight) {
- ddata->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(ddata->backlight);
- }
+ backlight_enable(ddata->backlight);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
@@ -106,10 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
if (!omapdss_device_is_enabled(dssdev))
return;
- if (ddata->backlight) {
- ddata->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(ddata->backlight);
- }
+ backlight_disable(ddata->backlight);
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
regulator_disable(ddata->vcc_supply);
@@ -164,8 +169,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct device_node *bl_node;
- struct omap_dss_device *in;
int r;
struct display_timing timing;
struct gpio_desc *gpio;
@@ -190,39 +193,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
if (IS_ERR(ddata->vcc_supply))
return PTR_ERR(ddata->vcc_supply);
- bl_node = of_parse_phandle(node, "backlight", 0);
- if (bl_node) {
- ddata->backlight = of_find_backlight_by_node(bl_node);
- of_node_put(bl_node);
+ ddata->backlight = devm_of_find_backlight(&pdev->dev);
- if (!ddata->backlight)
- return -EPROBE_DEFER;
- }
+ if (IS_ERR(ddata->backlight))
+ return PTR_ERR(ddata->backlight);
r = of_get_display_timing(node, "panel-timing", &timing);
if (r) {
dev_err(&pdev->dev, "failed to get video timing\n");
- goto error_free_backlight;
+ return r;
}
videomode_from_timing(&timing, &ddata->vm);
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- r = PTR_ERR(in);
- goto error_free_backlight;
- }
-
- ddata->in = in;
-
return 0;
-
-error_free_backlight:
- if (ddata->backlight)
- put_device(&ddata->backlight->dev);
-
- return r;
}
static int panel_dpi_probe(struct platform_device *pdev)
@@ -231,9 +215,6 @@ static int panel_dpi_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
@@ -254,32 +235,22 @@ static int panel_dpi_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit panel_dpi_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
panel_dpi_disable(dssdev);
panel_dpi_disconnect(dssdev);
- omap_dss_put_device(in);
-
- if (ddata->backlight)
- put_device(&ddata->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 92c556a..428de90 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1,7 +1,7 @@
/*
* Generic DSI Command Mode panel driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -22,9 +22,10 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
+#include <video/of_display_timing.h>
#include "../dss/omapdss.h"
@@ -49,6 +50,7 @@ struct panel_drv_data {
struct mutex lock;
struct backlight_device *bldev;
+ struct backlight_device *extbldev;
unsigned long hw_guard_end; /* next value of jiffies when we can
* issue the next sleep in/out command
@@ -56,11 +58,17 @@ struct panel_drv_data {
unsigned long hw_guard_wait; /* max guard time in jiffies */
/* panel HW configuration from DT or platform data */
- int reset_gpio;
- int ext_te_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *ext_te_gpio;
+
+ struct regulator *vpnl;
+ struct regulator *vddi;
bool use_dsi_backlight;
+ int width_mm;
+ int height_mm;
+
struct omap_dsi_pin_config pin_config;
/* runtime variables */
@@ -78,7 +86,7 @@ struct panel_drv_data {
struct workqueue_struct *workqueue;
bool ulps_enabled;
- unsigned ulps_timeout;
+ unsigned int ulps_timeout;
struct delayed_work ulps_work;
};
@@ -92,6 +100,30 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata);
static void dsicm_ulps_work(struct work_struct *work);
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+ struct backlight_device *backlight;
+
+ if (ddata->bldev)
+ backlight = ddata->bldev;
+ else if (ddata->extbldev)
+ backlight = ddata->extbldev;
+ else
+ return;
+
+ if (enable) {
+ backlight->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+ backlight->props.power = FB_BLANK_UNBLANK;
+ } else {
+ backlight->props.fb_blank = FB_BLANK_NORMAL;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+ }
+
+ backlight_update_status(backlight);
+}
+
static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
{
ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
@@ -255,8 +287,8 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata)
if (r)
goto err;
- if (gpio_is_valid(ddata->ext_te_gpio))
- disable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
in->ops.dsi->disable(in, false, true);
@@ -297,8 +329,8 @@ static int dsicm_exit_ulps(struct panel_drv_data *ddata)
goto err2;
}
- if (gpio_is_valid(ddata->ext_te_gpio))
- enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
dsicm_queue_ulps_work(ddata);
@@ -311,8 +343,8 @@ err2:
r = dsicm_panel_reset(ddata);
if (!r) {
- if (gpio_is_valid(ddata->ext_te_gpio))
- enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
ddata->ulps_enabled = false;
}
err1:
@@ -335,7 +367,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
struct omap_dss_device *in = ddata->in;
- int r;
+ int r = 0;
int level;
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
@@ -356,8 +388,6 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
in->ops.dsi->bus_unlock(in);
- } else {
- r = 0;
}
mutex_unlock(&ddata->lock);
@@ -483,7 +513,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- unsigned t;
+ unsigned int t;
mutex_lock(&ddata->lock);
t = ddata->ulps_enabled;
@@ -530,7 +560,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- unsigned t;
+ unsigned int t;
mutex_lock(&ddata->lock);
t = ddata->ulps_timeout;
@@ -560,16 +590,13 @@ static const struct attribute_group dsicm_attr_group = {
static void dsicm_hw_reset(struct panel_drv_data *ddata)
{
- if (!gpio_is_valid(ddata->reset_gpio))
- return;
-
- gpio_set_value(ddata->reset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 1);
udelay(10);
/* reset the panel */
- gpio_set_value(ddata->reset_gpio, 0);
+ gpiod_set_value(ddata->reset_gpio, 0);
/* assert reset */
udelay(10);
- gpio_set_value(ddata->reset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 1);
/* wait after releasing reset */
usleep_range(5000, 10000);
}
@@ -589,25 +616,43 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
.lp_clk_max = 10000000,
};
+ if (ddata->vpnl) {
+ r = regulator_enable(ddata->vpnl);
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "failed to enable VPNL: %d\n", r);
+ return r;
+ }
+ }
+
+ if (ddata->vddi) {
+ r = regulator_enable(ddata->vddi);
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "failed to enable VDDI: %d\n", r);
+ goto err_vpnl;
+ }
+ }
+
if (ddata->pin_config.num_pins > 0) {
r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
if (r) {
dev_err(&ddata->pdev->dev,
"failed to configure DSI pins\n");
- goto err0;
+ goto err_vddi;
}
}
r = in->ops.dsi->set_config(in, &dsi_config);
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
- goto err0;
+ goto err_vddi;
}
r = in->ops.dsi->enable(in);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err0;
+ goto err_vddi;
}
dsicm_hw_reset(ddata);
@@ -665,7 +710,13 @@ err:
dsicm_hw_reset(ddata);
in->ops.dsi->disable(in, true, false);
-err0:
+err_vddi:
+ if (ddata->vddi)
+ regulator_disable(ddata->vddi);
+err_vpnl:
+ if (ddata->vpnl)
+ regulator_disable(ddata->vpnl);
+
return r;
}
@@ -688,6 +739,11 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
in->ops.dsi->disable(in, true, false);
+ if (ddata->vddi)
+ regulator_disable(ddata->vddi);
+ if (ddata->vpnl)
+ regulator_disable(ddata->vpnl);
+
ddata->enabled = 0;
}
@@ -703,37 +759,46 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata)
static int dsicm_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
struct device *dev = &ddata->pdev->dev;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dsi->connect(in, dssdev);
if (r) {
dev_err(dev, "Failed to connect to video source\n");
- return r;
+ goto err_connect;
}
- r = in->ops.dsi->request_vc(ddata->in, &ddata->channel);
+ r = in->ops.dsi->request_vc(in, &ddata->channel);
if (r) {
dev_err(dev, "failed to get virtual channel\n");
goto err_req_vc;
}
- r = in->ops.dsi->set_vc_id(ddata->in, ddata->channel, TCH);
+ r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH);
if (r) {
dev_err(dev, "failed to set VC_ID\n");
goto err_vc_id;
}
+ ddata->in = in;
return 0;
err_vc_id:
- in->ops.dsi->release_vc(ddata->in, ddata->channel);
+ in->ops.dsi->release_vc(in, ddata->channel);
err_req_vc:
in->ops.dsi->disconnect(in, dssdev);
+err_connect:
+ omap_dss_put_device(in);
return r;
}
@@ -747,6 +812,9 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev)
in->ops.dsi->release_vc(in, ddata->channel);
in->ops.dsi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int dsicm_enable(struct omap_dss_device *dssdev)
@@ -782,6 +850,8 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
+ dsicm_bl_power(ddata, true);
+
return 0;
err:
dev_dbg(&ddata->pdev->dev, "enable failed\n");
@@ -797,6 +867,8 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dev_dbg(&ddata->pdev->dev, "disable\n");
+ dsicm_bl_power(ddata, false);
+
mutex_lock(&ddata->lock);
dsicm_cancel_ulps_work(ddata);
@@ -890,7 +962,7 @@ static int dsicm_update(struct omap_dss_device *dssdev,
if (r)
goto err;
- if (ddata->te_enabled && gpio_is_valid(ddata->ext_te_gpio)) {
+ if (ddata->te_enabled && ddata->ext_te_gpio) {
schedule_delayed_work(&ddata->te_timeout_work,
msecs_to_jiffies(250));
atomic_set(&ddata->do_update, 1);
@@ -937,7 +1009,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
else
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
- if (!gpio_is_valid(ddata->ext_te_gpio))
+ if (!ddata->ext_te_gpio)
in->ops.dsi->enable_te(in, enable);
/* possible panel bug */
@@ -1004,7 +1076,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
int r;
int first = 1;
int plen;
- unsigned buf_used = 0;
+ unsigned int buf_used = 0;
if (size < w * h * 3)
return -ENOMEM;
@@ -1099,6 +1171,45 @@ static void dsicm_ulps_work(struct work_struct *work)
mutex_unlock(&ddata->lock);
}
+static void dsicm_get_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *vm = ddata->vm;
+}
+
+static int dsicm_check_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ int ret = 0;
+
+ if (vm->hactive != ddata->vm.hactive)
+ ret = -EINVAL;
+
+ if (vm->vactive != ddata->vm.vactive)
+ ret = -EINVAL;
+
+ if (ret) {
+ dev_warn(dssdev->dev, "wrong resolution: %d x %d",
+ vm->hactive, vm->vactive);
+ dev_warn(dssdev->dev, "panel resolution: %d x %d",
+ ddata->vm.hactive, ddata->vm.vactive);
+ }
+
+ return ret;
+}
+
+static void dsicm_get_size(struct omap_dss_device *dssdev,
+ unsigned int *width, unsigned int *height)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *width = ddata->width_mm;
+ *height = ddata->height_mm;
+}
+
static struct omap_dss_driver dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
@@ -1109,6 +1220,10 @@ static struct omap_dss_driver dsicm_ops = {
.update = dsicm_update,
.sync = dsicm_sync,
+ .get_timings = dsicm_get_timings,
+ .check_timings = dsicm_check_timings,
+ .get_size = dsicm_get_size,
+
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
@@ -1118,41 +1233,78 @@ static struct omap_dss_driver dsicm_ops = {
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct device_node *backlight;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in;
- int gpio;
+ struct display_timing timing;
+ int err;
+
+ ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->reset_gpio)) {
+ err = PTR_ERR(ddata->reset_gpio);
+ dev_err(&pdev->dev, "reset gpio request failed: %d", err);
+ return err;
+ }
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "failed to parse reset gpio\n");
- return gpio;
+ ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
+ GPIOD_IN);
+ if (IS_ERR(ddata->ext_te_gpio)) {
+ err = PTR_ERR(ddata->ext_te_gpio);
+ dev_err(&pdev->dev, "TE gpio request failed: %d", err);
+ return err;
}
- ddata->reset_gpio = gpio;
- gpio = of_get_named_gpio(node, "te-gpios", 0);
- if (gpio_is_valid(gpio) || gpio == -ENOENT) {
- ddata->ext_te_gpio = gpio;
+ err = of_get_display_timing(node, "panel-timing", &timing);
+ if (!err) {
+ videomode_from_timing(&timing, &ddata->vm);
+ if (!ddata->vm.pixelclock)
+ ddata->vm.pixelclock =
+ ddata->vm.hactive * ddata->vm.vactive * 60;
} else {
- dev_err(&pdev->dev, "failed to parse TE gpio\n");
- return gpio;
+ dev_warn(&pdev->dev,
+ "failed to get video timing, using defaults\n");
}
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
+ ddata->width_mm = 0;
+ of_property_read_u32(node, "width-mm", &ddata->width_mm);
+
+ ddata->height_mm = 0;
+ of_property_read_u32(node, "height-mm", &ddata->height_mm);
+
+ ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
+ if (IS_ERR(ddata->vpnl)) {
+ err = PTR_ERR(ddata->vpnl);
+ if (err == -EPROBE_DEFER)
+ return err;
+ ddata->vpnl = NULL;
}
- ddata->in = in;
+ ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
+ if (IS_ERR(ddata->vddi)) {
+ err = PTR_ERR(ddata->vddi);
+ if (err == -EPROBE_DEFER)
+ return err;
+ ddata->vddi = NULL;
+ }
- /* TODO: ulps, backlight */
+ backlight = of_parse_phandle(node, "backlight", 0);
+ if (backlight) {
+ ddata->extbldev = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!ddata->extbldev)
+ return -EPROBE_DEFER;
+ } else {
+ /* assume native backlight support */
+ ddata->use_dsi_backlight = true;
+ }
+
+ /* TODO: ulps */
return 0;
}
static int dsicm_probe(struct platform_device *pdev)
{
- struct backlight_properties props;
struct panel_drv_data *ddata;
struct backlight_device *bldev = NULL;
struct device *dev = &pdev->dev;
@@ -1168,17 +1320,14 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (!pdev->dev.of_node)
- return -ENODEV;
+ ddata->vm.hactive = 864;
+ ddata->vm.vactive = 480;
+ ddata->vm.pixelclock = 864 * 480 * 60;
r = dsicm_probe_of(pdev);
if (r)
return r;
- ddata->vm.hactive = 864;
- ddata->vm.vactive = 480;
- ddata->vm.pixelclock = 864 * 480 * 60;
-
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
@@ -1200,31 +1349,15 @@ static int dsicm_probe(struct platform_device *pdev)
atomic_set(&ddata->do_update, 0);
- if (gpio_is_valid(ddata->reset_gpio)) {
- r = devm_gpio_request_one(dev, ddata->reset_gpio,
- GPIOF_OUT_INIT_LOW, "taal rst");
- if (r) {
- dev_err(dev, "failed to request reset gpio\n");
- return r;
- }
- }
-
- if (gpio_is_valid(ddata->ext_te_gpio)) {
- r = devm_gpio_request_one(dev, ddata->ext_te_gpio,
- GPIOF_IN, "taal irq");
- if (r) {
- dev_err(dev, "GPIO request failed\n");
- return r;
- }
-
- r = devm_request_irq(dev, gpio_to_irq(ddata->ext_te_gpio),
+ if (ddata->ext_te_gpio) {
+ r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
dsicm_te_isr,
IRQF_TRIGGER_RISING,
"taal vsync", ddata);
if (r) {
dev_err(dev, "IRQ request failed\n");
- return r;
+ goto err_reg;
}
INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
@@ -1234,48 +1367,43 @@ static int dsicm_probe(struct platform_device *pdev)
}
ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (ddata->workqueue == NULL) {
- dev_err(dev, "can't create workqueue\n");
- return -ENOMEM;
+ if (!ddata->workqueue) {
+ r = -ENOMEM;
+ goto err_reg;
}
INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
dsicm_hw_reset(ddata);
if (ddata->use_dsi_backlight) {
- memset(&props, 0, sizeof(props));
+ struct backlight_properties props = { 0 };
props.max_brightness = 255;
-
props.type = BACKLIGHT_RAW;
- bldev = backlight_device_register(dev_name(dev),
- dev, ddata, &dsicm_bl_ops, &props);
+
+ bldev = devm_backlight_device_register(dev, dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
goto err_bl;
}
ddata->bldev = bldev;
-
- bldev->props.fb_blank = FB_BLANK_UNBLANK;
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 255;
-
- dsicm_bl_update_status(bldev);
}
r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
if (r) {
dev_err(dev, "failed to create sysfs files\n");
- goto err_sysfs_create;
+ goto err_bl;
}
return 0;
-err_sysfs_create:
- backlight_device_unregister(bldev);
err_bl:
destroy_workqueue(ddata->workqueue);
err_reg:
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
return r;
}
@@ -1283,7 +1411,6 @@ static int __exit dsicm_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct backlight_device *bldev;
dev_dbg(&pdev->dev, "remove\n");
@@ -1294,14 +1421,8 @@ static int __exit dsicm_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
- bldev = ddata->bldev;
- if (bldev != NULL) {
- bldev->props.power = FB_BLANK_POWERDOWN;
- dsicm_bl_update_status(bldev);
- backlight_device_unregister(bldev);
- }
-
- omap_dss_put_device(ddata->in);
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
dsicm_cancel_ulps_work(ddata);
destroy_workqueue(ddata->workqueue);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 74d1396..7541970 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -1,7 +1,7 @@
/*
* LG.Philips LB035Q02 LCD Panel driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
* Based on a driver by: Steve Sakoman <steve@sakoman.com>
*
@@ -119,18 +119,27 @@ static void init_lb035q02_panel(struct spi_device *spi)
static int lb035q02_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
init_lb035q02_panel(ddata->spi);
+ ddata->in = in;
return 0;
}
@@ -143,6 +152,9 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int lb035q02_enable(struct omap_dss_device *dssdev)
@@ -230,9 +242,7 @@ static struct omap_dss_driver lb035q02_ops = {
static int lb035q02_probe_of(struct spi_device *spi)
{
- struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
struct gpio_desc *gpio;
gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
@@ -243,14 +253,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
ddata->enable_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -268,9 +270,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = lb035q02_probe_of(spi);
if (r)
return r;
@@ -287,29 +286,22 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int lb035q02_panel_spi_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
lb035q02_disable(dssdev);
lb035q02_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index df8132d..9a3b27f 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -1,7 +1,7 @@
/*
* NEC NL8048HL11 Panel driver
*
- * Copyright (C) 2010 Texas Instruments Inc.
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
* Author: Erik Gilling <konkers@android.com>
* Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
@@ -115,16 +115,25 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi)
static int nec_8048_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -137,6 +146,9 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int nec_8048_enable(struct omap_dss_device *dssdev)
@@ -226,7 +238,6 @@ static int nec_8048_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "reset-gpios", 0);
@@ -239,14 +250,6 @@ static int nec_8048_probe_of(struct spi_device *spi)
/* XXX the panel spec doesn't mention any QVGA pin?? */
ddata->qvga_gpio = -ENOENT;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -277,9 +280,6 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = nec_8048_probe_of(spi);
if (r)
return r;
@@ -288,14 +288,14 @@ static int nec_8048_probe(struct spi_device *spi)
r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
GPIOF_OUT_INIT_HIGH, "lcd QVGA");
if (r)
- goto err_gpio;
+ return r;
}
if (gpio_is_valid(ddata->res_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->res_gpio,
GPIOF_OUT_INIT_LOW, "lcd RES");
if (r)
- goto err_gpio;
+ return r;
}
ddata->vm = nec_8048_panel_vm;
@@ -310,22 +310,16 @@ static int nec_8048_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int nec_8048_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -334,8 +328,6 @@ static int nec_8048_remove(struct spi_device *spi)
nec_8048_disable(dssdev);
nec_8048_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 98d170a..bb5b680 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -1,7 +1,7 @@
/*
* LCD panel driver for Sharp LS037V7DW01
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -61,16 +61,25 @@ static const struct videomode sharp_ls_vm = {
static int sharp_ls_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -83,6 +92,9 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int sharp_ls_enable(struct omap_dss_device *dssdev)
@@ -210,8 +222,6 @@ static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
static int sharp_ls_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int r;
ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
@@ -245,14 +255,6 @@ static int sharp_ls_probe_of(struct platform_device *pdev)
if (r)
return r;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -268,9 +270,6 @@ static int sharp_ls_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = sharp_ls_probe_of(pdev);
if (r)
return r;
@@ -287,29 +286,22 @@ static int sharp_ls_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit sharp_ls_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
sharp_ls_disable(dssdev);
sharp_ls_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 8e5bff4..92fe125 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -216,12 +216,12 @@ static void set_display_state(struct panel_drv_data *ddata, int enabled)
static int panel_enabled(struct panel_drv_data *ddata)
{
+ __be32 v;
u32 disp_status;
int enabled;
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS,
- (u8 *)&disp_status, 4);
- disp_status = __be32_to_cpu(disp_status);
+ acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4);
+ disp_status = __be32_to_cpu(v);
enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
dev_dbg(&ddata->spi->dev,
"LCD panel %senabled by bootloader (status 0x%04x)\n",
@@ -289,7 +289,7 @@ static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
}
-static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode)
+static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode)
{
u16 cabc_ctrl;
@@ -303,12 +303,12 @@ static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode)
acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
}
-static unsigned get_cabc_mode(struct panel_drv_data *ddata)
+static unsigned int get_cabc_mode(struct panel_drv_data *ddata)
{
return ddata->cabc_mode;
}
-static unsigned get_hw_cabc_mode(struct panel_drv_data *ddata)
+static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata)
{
u8 cabc_ctrl;
@@ -510,16 +510,25 @@ static const struct attribute_group bldev_attr_group = {
static int acx565akm_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.sdi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -532,6 +541,9 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.sdi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
@@ -700,12 +712,6 @@ static int acx565akm_probe_of(struct spi_device *spi)
ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
- ddata->in = omapdss_of_find_source_for_first_ep(np);
- if (IS_ERR(ddata->in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(ddata->in);
- }
-
return 0;
}
@@ -720,9 +726,6 @@ static int acx565akm_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
- if (!spi->dev.of_node)
- return -ENODEV;
-
spi->mode = SPI_MODE_3;
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
@@ -826,7 +829,6 @@ err_sysfs:
err_reg_bl:
err_detect:
err_gpio:
- omap_dss_put_device(ddata->in);
return r;
}
@@ -834,7 +836,6 @@ static int acx565akm_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -846,8 +847,6 @@ static int acx565akm_remove(struct spi_device *spi)
acx565akm_disable(dssdev);
acx565akm_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 0a38a0e..b5d8a00 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -169,16 +169,25 @@ enum jbt_register {
static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -191,6 +200,9 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
@@ -362,23 +374,6 @@ static struct omap_dss_driver td028ttec1_ops = {
.check_timings = td028ttec1_panel_check_timings,
};
-static int td028ttec1_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int td028ttec1_panel_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
@@ -404,13 +399,6 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
ddata->spi_dev = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
- r = td028ttec1_probe_of(spi);
- if (r)
- return r;
-
ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
@@ -423,21 +411,16 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int td028ttec1_panel_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
@@ -446,21 +429,31 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
td028ttec1_panel_disable(dssdev);
td028ttec1_panel_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "omapdss,tpo,td028ttec1", },
+ /* keep to not break older DTB */
{ .compatible = "omapdss,toppoly,td028ttec1", },
{},
};
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
+static const struct spi_device_id td028ttec1_ids[] = {
+ { "toppoly,td028ttec1", 0 },
+ { "tpo,td028ttec1", 0},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
+
+
static struct spi_driver td028ttec1_spi_driver = {
.probe = td028ttec1_panel_probe,
.remove = td028ttec1_panel_remove,
+ .id_table = td028ttec1_ids,
.driver = {
.name = "panel-tpo-td028ttec1",
@@ -471,7 +464,6 @@ static struct spi_driver td028ttec1_spi_driver = {
module_spi_driver(td028ttec1_spi_driver);
-MODULE_ALIAS("spi:toppoly,td028ttec1");
MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index ac4a6d4..c08e22b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -340,16 +340,25 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
static int tpo_td043_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -362,6 +371,9 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tpo_td043_enable(struct omap_dss_device *dssdev)
@@ -463,7 +475,6 @@ static int tpo_td043_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "reset-gpios", 0);
@@ -473,14 +484,6 @@ static int tpo_td043_probe_of(struct spi_device *spi)
}
ddata->nreset_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -509,9 +512,6 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = tpo_td043_probe_of(spi);
if (r)
return r;
@@ -564,7 +564,6 @@ err_reg:
err_sysfs:
err_gpio_req:
err_regulator:
- omap_dss_put_device(ddata->in);
return r;
}
@@ -572,7 +571,6 @@ static int tpo_td043_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -581,8 +579,6 @@ static int tpo_td043_remove(struct spi_device *spi)
tpo_td043_disable(dssdev);
tpo_td043_disconnect(dssdev);
- omap_dss_put_device(in);
-
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 13e91fa..99e8cb8 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -1,12 +1,28 @@
+/*
+ * OMAP Display Subsystem Base
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/list.h>
+
+#include "dss.h"
#include "omapdss.h"
-static bool dss_initialized;
-static const struct dispc_ops *ops;
+static struct dss_device *dss_device;
static struct list_head omapdss_comp_list;
@@ -16,27 +32,27 @@ struct omapdss_comp_node {
bool dss_core_component;
};
-void omapdss_set_is_initialized(bool set)
+struct dss_device *omapdss_get_dss(void)
{
- dss_initialized = set;
+ return dss_device;
}
-EXPORT_SYMBOL(omapdss_set_is_initialized);
+EXPORT_SYMBOL(omapdss_get_dss);
-bool omapdss_is_initialized(void)
+void omapdss_set_dss(struct dss_device *dss)
{
- return dss_initialized;
+ dss_device = dss;
}
-EXPORT_SYMBOL(omapdss_is_initialized);
+EXPORT_SYMBOL(omapdss_set_dss);
-void dispc_set_ops(const struct dispc_ops *o)
+struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
- ops = o;
+ return dss->dispc;
}
-EXPORT_SYMBOL(dispc_set_ops);
+EXPORT_SYMBOL(dispc_get_dispc);
-const struct dispc_ops *dispc_get_ops(void)
+const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
{
- return ops;
+ return dss->dispc_ops;
}
EXPORT_SYMBOL(dispc_get_ops);
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 197ddbc..acef7ec 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/core.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -30,38 +28,21 @@
#include "dss.h"
/* INIT */
-static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
- dss_init_platform_driver,
- dispc_init_platform_driver,
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_init_platform_driver,
+ &omap_dsihw_driver,
#endif
#ifdef CONFIG_OMAP2_DSS_VENC
- venc_init_platform_driver,
+ &omap_venchw_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi4_init_platform_driver,
+ &omapdss_hdmi4hw_driver,
#endif
#ifdef CONFIG_OMAP5_DSS_HDMI
- hdmi5_init_platform_driver,
-#endif
-};
-
-static void (*dss_output_drv_unreg_funcs[])(void) = {
-#ifdef CONFIG_OMAP5_DSS_HDMI
- hdmi5_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi4_uninit_platform_driver,
+ &omapdss_hdmi5hw_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- venc_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_uninit_platform_driver,
-#endif
- dispc_uninit_platform_driver,
- dss_uninit_platform_driver,
};
static struct platform_device *omap_drm_device;
@@ -69,13 +50,11 @@ static struct platform_device *omap_drm_device;
static int __init omap_dss_init(void)
{
int r;
- int i;
- for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
- r = dss_output_drv_reg_funcs[i]();
- if (r)
- goto err_reg;
- }
+ r = platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+ if (r)
+ goto err_reg;
omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
if (IS_ERR(omap_drm_device)) {
@@ -86,22 +65,18 @@ static int __init omap_dss_init(void)
return 0;
err_reg:
- for (i = ARRAY_SIZE(dss_output_drv_reg_funcs) - i;
- i < ARRAY_SIZE(dss_output_drv_reg_funcs);
- ++i)
- dss_output_drv_unreg_funcs[i]();
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
return r;
}
static void __exit omap_dss_exit(void)
{
- int i;
-
platform_device_unregister(omap_drm_device);
- for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i)
- dss_output_drv_unreg_funcs[i]();
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
}
module_init(omap_dss_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 0f4fdb2..5e2e65e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dispc.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -49,6 +47,8 @@
#include "dss.h"
#include "dispc.h"
+struct dispc_device;
+
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
@@ -58,11 +58,12 @@ enum omap_burst_size {
BURST_SIZE_X8 = 2,
};
-#define REG_GET(idx, start, end) \
- FLD_GET(dispc_read_reg(idx), start, end)
+#define REG_GET(dispc, idx, start, end) \
+ FLD_GET(dispc_read_reg(dispc, idx), start, end)
-#define REG_FLD_MOD(idx, val, start, end) \
- dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+#define REG_FLD_MOD(dispc, idx, val, start, end) \
+ dispc_write_reg(dispc, idx, \
+ FLD_MOD(dispc_read_reg(dispc, idx), val, start, end))
/* DISPC has feature id */
enum dispc_feature_id {
@@ -107,7 +108,8 @@ struct dispc_features {
unsigned int max_downscale;
unsigned int max_line_width;
unsigned int min_pcd;
- int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
+ int (*calc_scaling)(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
@@ -164,9 +166,12 @@ struct dispc_features {
#define DISPC_MAX_NR_FIFOS 5
#define DISPC_MAX_CHANNEL_GAMMA 4
-static struct {
+struct dispc_device {
struct platform_device *pdev;
void __iomem *base;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
int irq;
irq_handler_t user_handler;
@@ -193,7 +198,7 @@ static struct {
/* DISPC_CONTROL & DISPC_CONFIG lock*/
spinlock_t control_lock;
-} dispc;
+};
enum omap_color_component {
/* used for all color formats for OMAP3 and earlier
@@ -347,313 +352,315 @@ static const struct {
},
};
-struct color_conv_coef {
- int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
- int full_range;
-};
-
-static unsigned long dispc_fclk_rate(void);
-static unsigned long dispc_core_clk_rate(void);
-static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
-static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+static unsigned long dispc_fclk_rate(struct dispc_device *dispc);
+static unsigned long dispc_core_clk_rate(struct dispc_device *dispc);
+static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel);
+static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel);
-static unsigned long dispc_plane_pclk_rate(enum omap_plane_id plane);
-static unsigned long dispc_plane_lclk_rate(enum omap_plane_id plane);
+static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane);
-static void dispc_clear_irqstatus(u32 mask);
-static bool dispc_mgr_is_enabled(enum omap_channel channel);
-static void dispc_clear_irqstatus(u32 mask);
+static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-static inline void dispc_write_reg(const u16 idx, u32 val)
+static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
- __raw_writel(val, dispc.base + idx);
+ __raw_writel(val, dispc->base + idx);
}
-static inline u32 dispc_read_reg(const u16 idx)
+static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
{
- return __raw_readl(dispc.base + idx);
+ return __raw_readl(dispc->base + idx);
}
-static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
+static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
+ enum mgr_reg_fields regfld)
{
const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- return REG_GET(rfld.reg, rfld.high, rfld.low);
+
+ return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
}
-static void mgr_fld_write(enum omap_channel channel,
- enum mgr_reg_fields regfld, int val) {
+static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
+ enum mgr_reg_fields regfld, int val)
+{
const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
unsigned long flags;
- if (need_lock)
- spin_lock_irqsave(&dispc.control_lock, flags);
-
- REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
-
- if (need_lock)
- spin_unlock_irqrestore(&dispc.control_lock, flags);
+ if (need_lock) {
+ spin_lock_irqsave(&dispc->control_lock, flags);
+ REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
+ spin_unlock_irqrestore(&dispc->control_lock, flags);
+ } else {
+ REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
+ }
}
-static int dispc_get_num_ovls(void)
+static int dispc_get_num_ovls(struct dispc_device *dispc)
{
- return dispc.feat->num_ovls;
+ return dispc->feat->num_ovls;
}
-static int dispc_get_num_mgrs(void)
+static int dispc_get_num_mgrs(struct dispc_device *dispc)
{
- return dispc.feat->num_mgrs;
+ return dispc->feat->num_mgrs;
}
-static void dispc_get_reg_field(enum dispc_feat_reg_field id,
+static void dispc_get_reg_field(struct dispc_device *dispc,
+ enum dispc_feat_reg_field id,
u8 *start, u8 *end)
{
- if (id >= dispc.feat->num_reg_fields)
+ if (id >= dispc->feat->num_reg_fields)
BUG();
- *start = dispc.feat->reg_fields[id].start;
- *end = dispc.feat->reg_fields[id].end;
+ *start = dispc->feat->reg_fields[id].start;
+ *end = dispc->feat->reg_fields[id].end;
}
-static bool dispc_has_feature(enum dispc_feature_id id)
+static bool dispc_has_feature(struct dispc_device *dispc,
+ enum dispc_feature_id id)
{
unsigned int i;
- for (i = 0; i < dispc.feat->num_features; i++) {
- if (dispc.feat->features[i] == id)
+ for (i = 0; i < dispc->feat->num_features; i++) {
+ if (dispc->feat->features[i] == id)
return true;
}
return false;
}
-#define SR(reg) \
- dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
-#define RR(reg) \
- dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
+#define SR(dispc, reg) \
+ dispc->ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(dispc, DISPC_##reg)
+#define RR(dispc, reg) \
+ dispc_write_reg(dispc, DISPC_##reg, dispc->ctx[DISPC_##reg / sizeof(u32)])
-static void dispc_save_context(void)
+static void dispc_save_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_save_context\n");
- SR(IRQENABLE);
- SR(CONTROL);
- SR(CONFIG);
- SR(LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- SR(GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
- SR(CONTROL2);
- SR(CONFIG2);
+ SR(dispc, IRQENABLE);
+ SR(dispc, CONTROL);
+ SR(dispc, CONFIG);
+ SR(dispc, LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ SR(dispc, GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
+ SR(dispc, CONTROL2);
+ SR(dispc, CONFIG2);
}
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
- SR(CONTROL3);
- SR(CONFIG3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
+ SR(dispc, CONTROL3);
+ SR(dispc, CONFIG3);
}
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- SR(DEFAULT_COLOR(i));
- SR(TRANS_COLOR(i));
- SR(SIZE_MGR(i));
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ SR(dispc, DEFAULT_COLOR(i));
+ SR(dispc, TRANS_COLOR(i));
+ SR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- SR(TIMING_H(i));
- SR(TIMING_V(i));
- SR(POL_FREQ(i));
- SR(DIVISORo(i));
-
- SR(DATA_CYCLE1(i));
- SR(DATA_CYCLE2(i));
- SR(DATA_CYCLE3(i));
-
- if (dispc_has_feature(FEAT_CPR)) {
- SR(CPR_COEF_R(i));
- SR(CPR_COEF_G(i));
- SR(CPR_COEF_B(i));
+ SR(dispc, TIMING_H(i));
+ SR(dispc, TIMING_V(i));
+ SR(dispc, POL_FREQ(i));
+ SR(dispc, DIVISORo(i));
+
+ SR(dispc, DATA_CYCLE1(i));
+ SR(dispc, DATA_CYCLE2(i));
+ SR(dispc, DATA_CYCLE3(i));
+
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ SR(dispc, CPR_COEF_R(i));
+ SR(dispc, CPR_COEF_G(i));
+ SR(dispc, CPR_COEF_B(i));
}
}
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- SR(OVL_BA0(i));
- SR(OVL_BA1(i));
- SR(OVL_POSITION(i));
- SR(OVL_SIZE(i));
- SR(OVL_ATTRIBUTES(i));
- SR(OVL_FIFO_THRESHOLD(i));
- SR(OVL_ROW_INC(i));
- SR(OVL_PIXEL_INC(i));
- if (dispc_has_feature(FEAT_PRELOAD))
- SR(OVL_PRELOAD(i));
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ SR(dispc, OVL_BA0(i));
+ SR(dispc, OVL_BA1(i));
+ SR(dispc, OVL_POSITION(i));
+ SR(dispc, OVL_SIZE(i));
+ SR(dispc, OVL_ATTRIBUTES(i));
+ SR(dispc, OVL_FIFO_THRESHOLD(i));
+ SR(dispc, OVL_ROW_INC(i));
+ SR(dispc, OVL_PIXEL_INC(i));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ SR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
- SR(OVL_WINDOW_SKIP(i));
- SR(OVL_TABLE_BA(i));
+ SR(dispc, OVL_WINDOW_SKIP(i));
+ SR(dispc, OVL_TABLE_BA(i));
continue;
}
- SR(OVL_FIR(i));
- SR(OVL_PICTURE_SIZE(i));
- SR(OVL_ACCU0(i));
- SR(OVL_ACCU1(i));
+ SR(dispc, OVL_FIR(i));
+ SR(dispc, OVL_PICTURE_SIZE(i));
+ SR(dispc, OVL_ACCU0(i));
+ SR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_H(i, j));
+ SR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_HV(i, j));
+ SR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
- SR(OVL_CONV_COEF(i, j));
+ SR(dispc, OVL_CONV_COEF(i, j));
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_V(i, j));
+ SR(dispc, OVL_FIR_COEF_V(i, j));
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(i));
- SR(OVL_BA1_UV(i));
- SR(OVL_FIR2(i));
- SR(OVL_ACCU2_0(i));
- SR(OVL_ACCU2_1(i));
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ SR(dispc, OVL_BA0_UV(i));
+ SR(dispc, OVL_BA1_UV(i));
+ SR(dispc, OVL_FIR2(i));
+ SR(dispc, OVL_ACCU2_0(i));
+ SR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_H2(i, j));
+ SR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_HV2(i, j));
+ SR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_V2(i, j));
+ SR(dispc, OVL_FIR_COEF_V2(i, j));
}
- if (dispc_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(i));
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ SR(dispc, OVL_ATTRIBUTES2(i));
}
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- SR(DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ SR(dispc, DIVISOR);
- dispc.ctx_valid = true;
+ dispc->ctx_valid = true;
DSSDBG("context saved\n");
}
-static void dispc_restore_context(void)
+static void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_restore_context\n");
- if (!dispc.ctx_valid)
+ if (!dispc->ctx_valid)
return;
- /*RR(IRQENABLE);*/
- /*RR(CONTROL);*/
- RR(CONFIG);
- RR(LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- RR(GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- RR(CONFIG2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- RR(CONFIG3);
-
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- RR(DEFAULT_COLOR(i));
- RR(TRANS_COLOR(i));
- RR(SIZE_MGR(i));
+ /*RR(dispc, IRQENABLE);*/
+ /*RR(dispc, CONTROL);*/
+ RR(dispc, CONFIG);
+ RR(dispc, LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ RR(dispc, GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ RR(dispc, CONFIG2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ RR(dispc, CONFIG3);
+
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ RR(dispc, DEFAULT_COLOR(i));
+ RR(dispc, TRANS_COLOR(i));
+ RR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- RR(TIMING_H(i));
- RR(TIMING_V(i));
- RR(POL_FREQ(i));
- RR(DIVISORo(i));
-
- RR(DATA_CYCLE1(i));
- RR(DATA_CYCLE2(i));
- RR(DATA_CYCLE3(i));
-
- if (dispc_has_feature(FEAT_CPR)) {
- RR(CPR_COEF_R(i));
- RR(CPR_COEF_G(i));
- RR(CPR_COEF_B(i));
+ RR(dispc, TIMING_H(i));
+ RR(dispc, TIMING_V(i));
+ RR(dispc, POL_FREQ(i));
+ RR(dispc, DIVISORo(i));
+
+ RR(dispc, DATA_CYCLE1(i));
+ RR(dispc, DATA_CYCLE2(i));
+ RR(dispc, DATA_CYCLE3(i));
+
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ RR(dispc, CPR_COEF_R(i));
+ RR(dispc, CPR_COEF_G(i));
+ RR(dispc, CPR_COEF_B(i));
}
}
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- RR(OVL_BA0(i));
- RR(OVL_BA1(i));
- RR(OVL_POSITION(i));
- RR(OVL_SIZE(i));
- RR(OVL_ATTRIBUTES(i));
- RR(OVL_FIFO_THRESHOLD(i));
- RR(OVL_ROW_INC(i));
- RR(OVL_PIXEL_INC(i));
- if (dispc_has_feature(FEAT_PRELOAD))
- RR(OVL_PRELOAD(i));
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ RR(dispc, OVL_BA0(i));
+ RR(dispc, OVL_BA1(i));
+ RR(dispc, OVL_POSITION(i));
+ RR(dispc, OVL_SIZE(i));
+ RR(dispc, OVL_ATTRIBUTES(i));
+ RR(dispc, OVL_FIFO_THRESHOLD(i));
+ RR(dispc, OVL_ROW_INC(i));
+ RR(dispc, OVL_PIXEL_INC(i));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ RR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
- RR(OVL_WINDOW_SKIP(i));
- RR(OVL_TABLE_BA(i));
+ RR(dispc, OVL_WINDOW_SKIP(i));
+ RR(dispc, OVL_TABLE_BA(i));
continue;
}
- RR(OVL_FIR(i));
- RR(OVL_PICTURE_SIZE(i));
- RR(OVL_ACCU0(i));
- RR(OVL_ACCU1(i));
+ RR(dispc, OVL_FIR(i));
+ RR(dispc, OVL_PICTURE_SIZE(i));
+ RR(dispc, OVL_ACCU0(i));
+ RR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_H(i, j));
+ RR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_HV(i, j));
+ RR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
- RR(OVL_CONV_COEF(i, j));
+ RR(dispc, OVL_CONV_COEF(i, j));
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_V(i, j));
+ RR(dispc, OVL_FIR_COEF_V(i, j));
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(i));
- RR(OVL_BA1_UV(i));
- RR(OVL_FIR2(i));
- RR(OVL_ACCU2_0(i));
- RR(OVL_ACCU2_1(i));
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ RR(dispc, OVL_BA0_UV(i));
+ RR(dispc, OVL_BA1_UV(i));
+ RR(dispc, OVL_FIR2(i));
+ RR(dispc, OVL_ACCU2_0(i));
+ RR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_H2(i, j));
+ RR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_HV2(i, j));
+ RR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_V2(i, j));
+ RR(dispc, OVL_FIR_COEF_V2(i, j));
}
- if (dispc_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(i));
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ RR(dispc, OVL_ATTRIBUTES2(i));
}
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- RR(DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ RR(dispc, DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
- RR(CONTROL);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- RR(CONTROL2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- RR(CONTROL3);
+ RR(dispc, CONTROL);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ RR(dispc, CONTROL2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ RR(dispc, CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
- dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
+ dispc_clear_irqstatus(dispc, DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
* the context is fully restored
*/
- RR(IRQENABLE);
+ RR(dispc, IRQENABLE);
DSSDBG("context restored\n");
}
@@ -661,146 +668,159 @@ static void dispc_restore_context(void)
#undef SR
#undef RR
-int dispc_runtime_get(void)
+int dispc_runtime_get(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_get\n");
- r = pm_runtime_get_sync(&dispc.pdev->dev);
+ r = pm_runtime_get_sync(&dispc->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-void dispc_runtime_put(void)
+void dispc_runtime_put(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_put\n");
- r = pm_runtime_put_sync(&dispc.pdev->dev);
+ r = pm_runtime_put_sync(&dispc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
-static u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
-static u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc.feat->no_framedone_tv)
+ if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
return 0;
return mgr_desc[channel].framedone_irq;
}
-static u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
-u32 dispc_wb_get_framedone_irq(void)
+static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
-static void dispc_mgr_enable(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
/* flush posted write */
- mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+ mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_is_enabled(enum omap_channel channel)
+static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+ return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_go_busy(enum omap_channel channel)
+static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
+ return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
-static void dispc_mgr_go(enum omap_channel channel)
+static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
- WARN_ON(!dispc_mgr_is_enabled(channel));
- WARN_ON(dispc_mgr_go_busy(channel));
+ WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
+ WARN_ON(dispc_mgr_go_busy(dispc, channel));
DSSDBG("GO %s\n", mgr_desc[channel].name);
- mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-bool dispc_wb_go_busy(void)
+static bool dispc_wb_go_busy(struct dispc_device *dispc)
{
- return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
+ return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
-void dispc_wb_go(void)
+static void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
- enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
+ enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
if (!enable)
return;
- go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
+ go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
if (go) {
DSSERR("GO bit not down for WB\n");
return;
}
- REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6);
}
-static void dispc_ovl_write_firh_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firh_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H(plane, reg), value);
}
-static void dispc_ovl_write_firhv_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firhv_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV(plane, reg), value);
}
-static void dispc_ovl_write_firv_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firv_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V(plane, reg), value);
}
-static void dispc_ovl_write_firh2_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firh2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H2(plane, reg), value);
}
-static void dispc_ovl_write_firhv2_reg(enum omap_plane_id plane, int reg,
- u32 value)
+static void dispc_ovl_write_firhv2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
+ u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
}
-static void dispc_ovl_write_firv2_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firv2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
- int fir_vinc, int five_taps,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_scale_coef(struct dispc_device *dispc,
+ enum omap_plane_id plane, int fir_hinc,
+ int fir_vinc, int five_taps,
+ enum omap_color_component color_comp)
{
const struct dispc_coef *h_coef, *v_coef;
int i;
@@ -821,11 +841,11 @@ static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
| FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
- dispc_ovl_write_firh_reg(plane, i, h);
- dispc_ovl_write_firhv_reg(plane, i, hv);
+ dispc_ovl_write_firh_reg(dispc, plane, i, h);
+ dispc_ovl_write_firhv_reg(dispc, plane, i, hv);
} else {
- dispc_ovl_write_firh2_reg(plane, i, h);
- dispc_ovl_write_firhv2_reg(plane, i, hv);
+ dispc_ovl_write_firh2_reg(dispc, plane, i, h);
+ dispc_ovl_write_firhv2_reg(dispc, plane, i, hv);
}
}
@@ -836,72 +856,113 @@ static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
- dispc_ovl_write_firv_reg(plane, i, v);
+ dispc_ovl_write_firv_reg(dispc, plane, i, v);
else
- dispc_ovl_write_firv2_reg(plane, i, v);
+ dispc_ovl_write_firv2_reg(dispc, plane, i, v);
}
}
}
+struct csc_coef_yuv2rgb {
+ int ry, rcb, rcr, gy, gcb, gcr, by, bcb, bcr;
+ bool full_range;
+};
-static void dispc_ovl_write_color_conv_coef(enum omap_plane_id plane,
- const struct color_conv_coef *ct)
+struct csc_coef_rgb2yuv {
+ int yr, yg, yb, cbr, cbg, cbb, crr, crg, crb;
+ bool full_range;
+};
+
+static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct csc_coef_yuv2rgb *ct)
{
#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
#undef CVAL
}
-static void dispc_setup_color_conv_coef(void)
+static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
+ const struct csc_coef_rgb2yuv *ct)
+{
+ const enum omap_plane_id plane = OMAP_DSS_WB;
+
+#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
+
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+
+#undef CVAL
+}
+
+static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
{
int i;
- int num_ovl = dispc_get_num_ovls();
- const struct color_conv_coef ctbl_bt601_5_ovl = {
- /* YUV -> RGB */
- 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ int num_ovl = dispc_get_num_ovls(dispc);
+
+ /* YUV -> RGB, ITU-R BT.601, limited range */
+ const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr */
+ 298, -100, -208, /* gy, gcb, gcr */
+ 298, 516, 0, /* by, bcb, bcr */
+ false, /* limited range */
};
- const struct color_conv_coef ctbl_bt601_5_wb = {
- /* RGB -> YUV */
- 66, 129, 25, 112, -94, -18, -38, -74, 112, 0,
+
+ /* RGB -> YUV, ITU-R BT.601, limited range */
+ const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
+ 66, 129, 25, /* yr, yg, yb */
+ -38, -74, 112, /* cbr, cbg, cbb */
+ 112, -94, -18, /* crr, crg, crb */
+ false, /* limited range */
};
for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_ovl);
+ dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
- if (dispc.feat->has_writeback)
- dispc_ovl_write_color_conv_coef(OMAP_DSS_WB, &ctbl_bt601_5_wb);
+ if (dispc->feat->has_writeback)
+ dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
}
-static void dispc_ovl_set_ba0(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba0(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA0(plane), paddr);
}
-static void dispc_ovl_set_ba1(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba1(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA1(plane), paddr);
}
-static void dispc_ovl_set_ba0_uv(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba0_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA0_UV(plane), paddr);
}
-static void dispc_ovl_set_ba1_uv(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba1_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA1_UV(plane), paddr);
}
-static void dispc_ovl_set_pos(enum omap_plane_id plane,
- enum omap_overlay_caps caps, int x, int y)
+static void dispc_ovl_set_pos(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps, int x, int y)
{
u32 val;
@@ -910,22 +971,24 @@ static void dispc_ovl_set_pos(enum omap_plane_id plane,
val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
- dispc_write_reg(DISPC_OVL_POSITION(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_POSITION(plane), val);
}
-static void dispc_ovl_set_input_size(enum omap_plane_id plane, int width,
- int height)
+static void dispc_ovl_set_input_size(struct dispc_device *dispc,
+ enum omap_plane_id plane, int width,
+ int height)
{
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_GFX || plane == OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
else
- dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
}
-static void dispc_ovl_set_output_size(enum omap_plane_id plane, int width,
- int height)
+static void dispc_ovl_set_output_size(struct dispc_device *dispc,
+ enum omap_plane_id plane, int width,
+ int height)
{
u32 val;
@@ -934,64 +997,72 @@ static void dispc_ovl_set_output_size(enum omap_plane_id plane, int width,
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
else
- dispc_write_reg(DISPC_OVL_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
}
-static void dispc_ovl_set_zorder(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u8 zorder)
+static void dispc_ovl_set_zorder(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps, u8 zorder)
{
if ((caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
return;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
}
-static void dispc_ovl_enable_zorder_planes(void)
+static void dispc_ovl_enable_zorder_planes(struct dispc_device *dispc)
{
int i;
- if (!dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (!dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
return;
- for (i = 0; i < dispc_get_num_ovls(); i++)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++)
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
-static void dispc_ovl_set_pre_mult_alpha(enum omap_plane_id plane,
- enum omap_overlay_caps caps, bool enable)
+static void dispc_ovl_set_pre_mult_alpha(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ bool enable)
{
if ((caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
return;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
}
-static void dispc_ovl_setup_global_alpha(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u8 global_alpha)
+static void dispc_ovl_setup_global_alpha(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ u8 global_alpha)
{
- static const unsigned shifts[] = { 0, 8, 16, 24, };
+ static const unsigned int shifts[] = { 0, 8, 16, 24, };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
}
-static void dispc_ovl_set_pix_inc(enum omap_plane_id plane, s32 inc)
+static void dispc_ovl_set_pix_inc(struct dispc_device *dispc,
+ enum omap_plane_id plane, s32 inc)
{
- dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
+ dispc_write_reg(dispc, DISPC_OVL_PIXEL_INC(plane), inc);
}
-static void dispc_ovl_set_row_inc(enum omap_plane_id plane, s32 inc)
+static void dispc_ovl_set_row_inc(struct dispc_device *dispc,
+ enum omap_plane_id plane, s32 inc)
{
- dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
+ dispc_write_reg(dispc, DISPC_OVL_ROW_INC(plane), inc);
}
-static void dispc_ovl_set_color_mode(enum omap_plane_id plane, u32 fourcc)
+static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
u32 m = 0;
if (plane != OMAP_DSS_GFX) {
@@ -1060,7 +1131,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane_id plane, u32 fourcc)
}
}
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
static bool format_is_yuv(u32 fourcc)
@@ -1075,19 +1146,21 @@ static bool format_is_yuv(u32 fourcc)
}
}
-static void dispc_ovl_configure_burst_type(enum omap_plane_id plane,
- enum omap_dss_rotation_type rotation_type)
+static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_dss_rotation_type rotation)
{
- if (dispc_has_feature(FEAT_BURST_2D) == 0)
+ if (dispc_has_feature(dispc, FEAT_BURST_2D) == 0)
return;
- if (rotation_type == OMAP_DSS_ROT_TILER)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+ if (rotation == OMAP_DSS_ROT_TILER)
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
else
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
}
-static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
+static void dispc_ovl_set_channel_out(struct dispc_device *dispc,
+ enum omap_plane_id plane,
enum omap_channel channel)
{
int shift;
@@ -1108,8 +1181,8 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
return;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
@@ -1124,7 +1197,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
chan2 = 1;
break;
case OMAP_DSS_CHANNEL_LCD3:
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
chan = 0;
chan2 = 2;
} else {
@@ -1146,10 +1219,11 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
} else {
val = FLD_MOD(val, channel, shift, shift);
}
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
-static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
+static enum omap_channel dispc_ovl_get_channel_out(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
int shift;
u32 val;
@@ -1168,12 +1242,12 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
return 0;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
if (FLD_GET(val, shift, shift) == 1)
return OMAP_DSS_CHANNEL_DIGIT;
- if (!dispc_has_feature(FEAT_MGR_LCD2))
+ if (!dispc_has_feature(dispc, FEAT_MGR_LCD2))
return OMAP_DSS_CHANNEL_LCD;
switch (FLD_GET(val, 31, 30)) {
@@ -1189,47 +1263,44 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
}
}
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
-{
- enum omap_plane_id plane = OMAP_DSS_WB;
-
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
-}
-
-static void dispc_ovl_set_burst_size(enum omap_plane_id plane,
- enum omap_burst_size burst_size)
+static void dispc_ovl_set_burst_size(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_burst_size burst_size)
{
- static const unsigned shifts[] = { 6, 14, 14, 14, 14, };
+ static const unsigned int shifts[] = { 6, 14, 14, 14, 14, };
int shift;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), burst_size,
+ shift + 1, shift);
}
-static void dispc_configure_burst_sizes(void)
+static void dispc_configure_burst_sizes(struct dispc_device *dispc)
{
int i;
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < dispc_get_num_ovls(); ++i)
- dispc_ovl_set_burst_size(i, burst_size);
- if (dispc.feat->has_writeback)
- dispc_ovl_set_burst_size(OMAP_DSS_WB, burst_size);
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
+ dispc_ovl_set_burst_size(dispc, i, burst_size);
+ if (dispc->feat->has_writeback)
+ dispc_ovl_set_burst_size(dispc, OMAP_DSS_WB, burst_size);
}
-static u32 dispc_ovl_get_burst_size(enum omap_plane_id plane)
+static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
- return dispc.feat->burst_size_unit * 8;
+ return dispc->feat->burst_size_unit * 8;
}
-static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
+static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
const u32 *modes;
unsigned int i;
- modes = dispc.feat->supported_color_modes[plane];
+ modes = dispc->feat->supported_color_modes[plane];
for (i = 0; modes[i]; ++i) {
if (modes[i] == fourcc)
@@ -1239,21 +1310,24 @@ static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
return false;
}
-static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
+static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
- return dispc.feat->supported_color_modes[plane];
+ return dispc->feat->supported_color_modes[plane];
}
-static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_cpr(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT)
return;
- mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_CPR, enable);
}
-static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- const struct omap_dss_cpr_coefs *coefs)
+static void dispc_mgr_set_cpr_coef(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1267,48 +1341,50 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
FLD_VAL(coefs->bb, 9, 0);
- dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
- dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
- dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_B(channel), coef_b);
}
-static void dispc_ovl_set_vid_color_conv(enum omap_plane_id plane,
- bool enable)
+static void dispc_ovl_set_vid_color_conv(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
val = FLD_MOD(val, enable, 9, 9);
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
-static void dispc_ovl_enable_replication(enum omap_plane_id plane,
- enum omap_overlay_caps caps, bool enable)
+static void dispc_ovl_enable_replication(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ bool enable)
{
- static const unsigned shifts[] = { 5, 10, 10, 10 };
+ static const unsigned int shifts[] = { 5, 10, 10, 10 };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_REPLICATION) == 0)
return;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
-static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
- u16 height)
+static void dispc_mgr_set_size(struct dispc_device *dispc,
+ enum omap_channel channel, u16 width, u16 height)
{
u32 val;
- val = FLD_VAL(height - 1, dispc.feat->mgr_height_start, 16) |
- FLD_VAL(width - 1, dispc.feat->mgr_width_start, 0);
+ val = FLD_VAL(height - 1, dispc->feat->mgr_height_start, 16) |
+ FLD_VAL(width - 1, dispc->feat->mgr_width_start, 0);
- dispc_write_reg(DISPC_SIZE_MGR(channel), val);
+ dispc_write_reg(dispc, DISPC_SIZE_MGR(channel), val);
}
-static void dispc_init_fifos(void)
+static void dispc_init_fifos(struct dispc_device *dispc)
{
u32 size;
int fifo;
@@ -1316,20 +1392,21 @@ static void dispc_init_fifos(void)
u32 unit;
int i;
- unit = dispc.feat->buffer_size_unit;
+ unit = dispc->feat->buffer_size_unit;
- dispc_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOSIZE, &start, &end);
- for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
- size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end);
+ for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
+ size = REG_GET(dispc, DISPC_OVL_FIFO_SIZE_STATUS(fifo),
+ start, end);
size *= unit;
- dispc.fifo_size[fifo] = size;
+ dispc->fifo_size[fifo] = size;
/*
* By default fifos are mapped directly to overlays, fifo 0 to
* ovl 0, fifo 1 to ovl 1, etc.
*/
- dispc.fifo_assignment[fifo] = fifo;
+ dispc->fifo_assignment[fifo] = fifo;
}
/*
@@ -1339,68 +1416,71 @@ static void dispc_init_fifos(void)
* giving GFX plane a larger fifo. WB but should work fine with a
* smaller fifo.
*/
- if (dispc.feat->gfx_fifo_workaround) {
+ if (dispc->feat->gfx_fifo_workaround) {
u32 v;
- v = dispc_read_reg(DISPC_GLOBAL_BUFFER);
+ v = dispc_read_reg(dispc, DISPC_GLOBAL_BUFFER);
v = FLD_MOD(v, 4, 2, 0); /* GFX BUF top to WB */
v = FLD_MOD(v, 4, 5, 3); /* GFX BUF bottom to WB */
v = FLD_MOD(v, 0, 26, 24); /* WB BUF top to GFX */
v = FLD_MOD(v, 0, 29, 27); /* WB BUF bottom to GFX */
- dispc_write_reg(DISPC_GLOBAL_BUFFER, v);
+ dispc_write_reg(dispc, DISPC_GLOBAL_BUFFER, v);
- dispc.fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
- dispc.fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
+ dispc->fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
+ dispc->fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
}
/*
* Setup default fifo thresholds.
*/
- for (i = 0; i < dispc_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
- dispc_ovl_compute_fifo_thresholds(i, &low, &high,
- use_fifomerge, manual_update);
+ dispc_ovl_compute_fifo_thresholds(dispc, i, &low, &high,
+ use_fifomerge, manual_update);
- dispc_ovl_set_fifo_threshold(i, low, high);
+ dispc_ovl_set_fifo_threshold(dispc, i, low, high);
}
- if (dispc.feat->has_writeback) {
+ if (dispc->feat->has_writeback) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
- dispc_ovl_compute_fifo_thresholds(OMAP_DSS_WB, &low, &high,
- use_fifomerge, manual_update);
+ dispc_ovl_compute_fifo_thresholds(dispc, OMAP_DSS_WB,
+ &low, &high, use_fifomerge,
+ manual_update);
- dispc_ovl_set_fifo_threshold(OMAP_DSS_WB, low, high);
+ dispc_ovl_set_fifo_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
-static u32 dispc_ovl_get_fifo_size(enum omap_plane_id plane)
+static u32 dispc_ovl_get_fifo_size(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
int fifo;
u32 size = 0;
- for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
- if (dispc.fifo_assignment[fifo] == plane)
- size += dispc.fifo_size[fifo];
+ for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
+ if (dispc->fifo_assignment[fifo] == plane)
+ size += dispc->fifo_size[fifo];
}
return size;
}
-void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
- u32 high)
+void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
- unit = dispc.feat->buffer_size_unit;
+ unit = dispc->feat->buffer_size_unit;
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
@@ -1408,18 +1488,20 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
low /= unit;
high /= unit;
- dispc_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
- dispc_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOHIGHTHRESHOLD,
+ &hi_start, &hi_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOLOWTHRESHOLD,
+ &lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
- REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
+ REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
lo_start, lo_end) * unit,
- REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
+ REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
hi_start, hi_end) * unit,
low * unit, high * unit);
- dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
+ dispc_write_reg(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
@@ -1428,42 +1510,43 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
* large for the preload field, set the threshold to the maximum value
* that can be held by the preload register
*/
- if (dispc_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
- plane != OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD) &&
+ dispc->feat->set_max_preload && plane != OMAP_DSS_WB)
+ dispc_write_reg(dispc, DISPC_OVL_PRELOAD(plane),
+ min(high, 0xfffu));
}
-void dispc_enable_fifomerge(bool enable)
+void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_FIFO_MERGE)) {
+ if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
- REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
-void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
- bool manual_update)
+void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 *fifo_low, u32 *fifo_high,
+ bool use_fifomerge, bool manual_update)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
-
- unsigned buf_unit = dispc.feat->buffer_size_unit;
- unsigned ovl_fifo_size, total_fifo_size, burst_size;
+ unsigned int buf_unit = dispc->feat->buffer_size_unit;
+ unsigned int ovl_fifo_size, total_fifo_size, burst_size;
int i;
- burst_size = dispc_ovl_get_burst_size(plane);
- ovl_fifo_size = dispc_ovl_get_fifo_size(plane);
+ burst_size = dispc_ovl_get_burst_size(dispc, plane);
+ ovl_fifo_size = dispc_ovl_get_fifo_size(dispc, plane);
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < dispc_get_num_ovls(); ++i)
- total_fifo_size += dispc_ovl_get_fifo_size(i);
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
+ total_fifo_size += dispc_ovl_get_fifo_size(dispc, i);
} else {
total_fifo_size = ovl_fifo_size;
}
@@ -1474,7 +1557,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* combined fifo size
*/
- if (manual_update && dispc_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dispc_has_feature(dispc, FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else if (plane == OMAP_DSS_WB) {
@@ -1491,7 +1574,8 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
}
}
-static void dispc_ovl_set_mflag(enum omap_plane_id plane, bool enable)
+static void dispc_ovl_set_mflag(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
int bit;
@@ -1500,17 +1584,18 @@ static void dispc_ovl_set_mflag(enum omap_plane_id plane, bool enable)
else
bit = 23;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
}
-static void dispc_ovl_set_mflag_threshold(enum omap_plane_id plane,
- int low, int high)
+static void dispc_ovl_set_mflag_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ int low, int high)
{
- dispc_write_reg(DISPC_OVL_MFLAG_THRESHOLD(plane),
+ dispc_write_reg(dispc, DISPC_OVL_MFLAG_THRESHOLD(plane),
FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
}
-static void dispc_init_mflag(void)
+static void dispc_init_mflag(struct dispc_device *dispc)
{
int i;
@@ -1524,16 +1609,16 @@ static void dispc_init_mflag(void)
*
* As a work-around, set force MFLAG to always on.
*/
- dispc_write_reg(DISPC_GLOBAL_MFLAG_ATTRIBUTE,
+ dispc_write_reg(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE,
(1 << 0) | /* MFLAG_CTRL = force always on */
(0 << 2)); /* MFLAG_START = disable */
- for (i = 0; i < dispc_get_num_ovls(); ++i) {
- u32 size = dispc_ovl_get_fifo_size(i);
- u32 unit = dispc.feat->buffer_size_unit;
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
+ u32 size = dispc_ovl_get_fifo_size(dispc, i);
+ u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
- dispc_ovl_set_mflag(i, true);
+ dispc_ovl_set_mflag(dispc, i, true);
/*
* Simulation team suggests below thesholds:
@@ -1544,15 +1629,15 @@ static void dispc_init_mflag(void)
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
- dispc_ovl_set_mflag_threshold(i, low, high);
+ dispc_ovl_set_mflag_threshold(dispc, i, low, high);
}
- if (dispc.feat->has_writeback) {
- u32 size = dispc_ovl_get_fifo_size(OMAP_DSS_WB);
- u32 unit = dispc.feat->buffer_size_unit;
+ if (dispc->feat->has_writeback) {
+ u32 size = dispc_ovl_get_fifo_size(dispc, OMAP_DSS_WB);
+ u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
- dispc_ovl_set_mflag(OMAP_DSS_WB, true);
+ dispc_ovl_set_mflag(dispc, OMAP_DSS_WB, true);
/*
* Simulation team suggests below thesholds:
@@ -1563,98 +1648,112 @@ static void dispc_init_mflag(void)
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
- dispc_ovl_set_mflag_threshold(OMAP_DSS_WB, low, high);
+ dispc_ovl_set_mflag_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
-static void dispc_ovl_set_fir(enum omap_plane_id plane,
- int hinc, int vinc,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_fir(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ int hinc, int vinc,
+ enum omap_color_component color_comp)
{
u32 val;
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
- dispc_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end);
- dispc_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIRHINC,
+ &hinc_start, &hinc_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIRVINC,
+ &vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
- dispc_write_reg(DISPC_OVL_FIR(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_FIR(plane), val);
} else {
val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
- dispc_write_reg(DISPC_OVL_FIR2(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_FIR2(plane), val);
}
}
-static void dispc_ovl_set_vid_accu0(enum omap_plane_id plane, int haccu,
+static void dispc_ovl_set_vid_accu0(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
+ &hor_start, &hor_end);
+ dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
+ &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
- dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU0(plane), val);
}
-static void dispc_ovl_set_vid_accu1(enum omap_plane_id plane, int haccu,
+static void dispc_ovl_set_vid_accu1(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
+ &hor_start, &hor_end);
+ dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
+ &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
- dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU1(plane), val);
}
-static void dispc_ovl_set_vid_accu2_0(enum omap_plane_id plane, int haccu,
- int vaccu)
+static void dispc_ovl_set_vid_accu2_0(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
+ int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
- dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU2_0(plane), val);
}
-static void dispc_ovl_set_vid_accu2_1(enum omap_plane_id plane, int haccu,
- int vaccu)
+static void dispc_ovl_set_vid_accu2_1(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
+ int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
- dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU2_1(plane), val);
}
-static void dispc_ovl_set_scale_param(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool five_taps, u8 rotation,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_scale_param(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool five_taps, u8 rotation,
+ enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
- dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps,
- color_comp);
- dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
+ dispc_ovl_set_scale_coef(dispc, plane, fir_hinc, fir_vinc, five_taps,
+ color_comp);
+ dispc_ovl_set_fir(dispc, plane, fir_hinc, fir_vinc, color_comp);
}
-static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
- bool ilace, u32 fourcc, u8 rotation)
+static void dispc_ovl_set_accu_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, u32 fourcc, u8 rotation)
{
int h_accu2_0, h_accu2_1;
int v_accu2_0, v_accu2_1;
@@ -1735,25 +1834,26 @@ static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
- dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
- dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+ dispc_ovl_set_vid_accu2_0(dispc, plane, h_accu2_0, v_accu2_0);
+ dispc_ovl_set_vid_accu2_1(dispc, plane, h_accu2_1, v_accu2_1);
}
-static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling_common(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
int accu0 = 0;
int accu1 = 0;
u32 l;
- dispc_ovl_set_scale_param(plane, orig_width, orig_height,
- out_width, out_height, five_taps,
- rotation, DISPC_COLOR_COMPONENT_RGB_Y);
- l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
+ out_width, out_height, five_taps,
+ rotation, DISPC_COLOR_COMPONENT_RGB_Y);
+ l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
/* RESIZEENABLE and VERTICALTAPS */
l &= ~((0x3 << 5) | (0x1 << 21));
@@ -1762,19 +1862,19 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
- if (dispc_has_feature(FEAT_RESIZECONF)) {
+ if (dispc_has_feature(dispc, FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
- if (dispc_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ if (dispc_has_feature(dispc, FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
/*
* field 0 = even field = bottom field
@@ -1789,33 +1889,35 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
}
}
- dispc_ovl_set_vid_accu0(plane, 0, accu0);
- dispc_ovl_set_vid_accu1(plane, 0, accu1);
+ dispc_ovl_set_vid_accu0(dispc, plane, 0, accu0);
+ dispc_ovl_set_vid_accu1(dispc, plane, 0, accu1);
}
-static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
- if (!dispc_has_feature(FEAT_HANDLE_UV_SEPARATE))
+ if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
return;
if (!format_is_yuv(fourcc)) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
+ 0, 8, 8);
return;
}
- dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
- out_height, ilace, fourcc, rotation);
+ dispc_ovl_set_accu_uv(dispc, plane, orig_width, orig_height, out_width,
+ out_height, ilace, fourcc, rotation);
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -1857,46 +1959,43 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
if (out_height != orig_height)
scale_y = true;
- dispc_ovl_set_scale_param(plane, orig_width, orig_height,
- out_width, out_height, five_taps,
- rotation, DISPC_COLOR_COMPONENT_UV);
+ dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
+ out_width, out_height, five_taps,
+ rotation, DISPC_COLOR_COMPONENT_UV);
if (plane != OMAP_DSS_WB)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
(scale_x || scale_y) ? 1 : 0, 8, 8);
/* set H scaling */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
}
-static void dispc_ovl_set_scaling(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_ovl_set_scaling_common(plane,
- orig_width, orig_height,
- out_width, out_height,
- ilace, five_taps,
- fieldmode, fourcc,
- rotation);
+ dispc_ovl_set_scaling_common(dispc, plane, orig_width, orig_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
- dispc_ovl_set_scaling_uv(plane,
- orig_width, orig_height,
- out_width, out_height,
- ilace, five_taps,
- fieldmode, fourcc,
- rotation);
+ dispc_ovl_set_scaling_uv(dispc, plane, orig_width, orig_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
}
-static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
- enum omap_dss_rotation_type rotation_type, u32 fourcc)
+static void dispc_ovl_set_rotation_attrs(struct dispc_device *dispc,
+ enum omap_plane_id plane, u8 rotation,
+ enum omap_dss_rotation_type rotation_type,
+ u32 fourcc)
{
bool row_repeat = false;
int vidrot = 0;
@@ -1950,19 +2049,20 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
if (fourcc == DRM_FORMAT_NV12 && rotation_type != OMAP_DSS_ROT_TILER)
vidrot = 1;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
- if (dispc_has_feature(FEAT_ROWREPEATENABLE))
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
+ if (dispc_has_feature(dispc, FEAT_ROWREPEATENABLE))
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
- if (dispc_ovl_color_mode_supported(plane, DRM_FORMAT_NV12)) {
+ if (dispc_ovl_color_mode_supported(dispc, plane, DRM_FORMAT_NV12)) {
bool doublestride =
fourcc == DRM_FORMAT_NV12 &&
rotation_type == OMAP_DSS_ROT_TILER &&
!drm_rotation_90_or_270(rotation);
/* DOUBLESTRIDE */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
+ doublestride, 22, 22);
}
}
@@ -2008,8 +2108,8 @@ static s32 pixinc(int pixels, u8 ps)
}
static void calc_offset(u16 screen_width, u16 width,
- u32 fourcc, bool fieldmode,
- unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+ u32 fourcc, bool fieldmode, unsigned int field_offset,
+ unsigned int *offset0, unsigned int *offset1,
s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim,
enum omap_dss_rotation_type rotation_type, u8 rotation)
{
@@ -2199,27 +2299,31 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
return pclk;
}
-static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
int error;
u16 in_width, in_height;
int min_factor = min(*decim_x, *decim_y);
- const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
*five_taps = false;
do {
in_height = height / *decim_y;
in_width = width / *decim_x;
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
- *core_clk > dispc_core_clk_rate());
+ *core_clk > dispc_core_clk_rate(dispc));
if (error) {
if (*decim_x == *decim_y) {
*decim_x = min_factor;
@@ -2244,16 +2348,20 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
return 0;
}
-static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_34xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
int error;
u16 in_width, in_height;
- const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
do {
in_height = height / *decim_y;
@@ -2270,7 +2378,7 @@ again:
in_width, in_height, out_width,
out_height, fourcc);
else
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
@@ -2284,7 +2392,7 @@ again:
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
- !*core_clk || *core_clk > dispc_core_clk_rate());
+ !*core_clk || *core_clk > dispc_core_clk_rate(dispc));
if (!error) {
/* verify that we're inside the limits of scaler */
@@ -2328,24 +2436,28 @@ again:
return 0;
}
-static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
u16 in_height = height / *decim_y;
- const int maxsinglelinewidth = dispc.feat->max_line_width;
- const int maxdownscale = dispc.feat->max_downscale;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
+ const int maxdownscale = dispc->feat->max_downscale;
if (mem_to_mem) {
in_width_max = out_width * maxdownscale;
} else {
- in_width_max = dispc_core_clk_rate() /
- DIV_ROUND_UP(pclk, out_width);
+ in_width_max = dispc_core_clk_rate(dispc)
+ / DIV_ROUND_UP(pclk, out_width);
}
*decim_x = DIV_ROUND_UP(width, in_width_max);
@@ -2383,7 +2495,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
}
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
}
@@ -2391,15 +2503,20 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
-static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
- enum omap_overlay_caps caps,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, u16 pos_x,
- enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
-{
- const int maxdownscale = dispc.feat->max_downscale;
+static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ unsigned long pclk, unsigned long lclk,
+ enum omap_overlay_caps caps,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim, u16 pos_x,
+ enum omap_dss_rotation_type rotation_type,
+ bool mem_to_mem)
+{
+ int maxhdownscale = dispc->feat->max_downscale;
+ int maxvdownscale = dispc->feat->max_downscale;
const int max_decim_limit = 16;
unsigned long core_clk = 0;
int decim_x, decim_y, ret;
@@ -2407,6 +2524,20 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
+ if (plane == OMAP_DSS_WB) {
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
+ maxhdownscale = maxvdownscale = 2;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ maxhdownscale = 2;
+ maxvdownscale = 4;
+ break;
+ default:
+ break;
+ }
+ }
if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
@@ -2420,12 +2551,12 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
} else {
*x_predecim = max_decim_limit;
*y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dispc_has_feature(FEAT_BURST_2D)) ?
+ dispc_has_feature(dispc, FEAT_BURST_2D)) ?
2 : max_decim_limit;
}
- decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
- decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+ decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxhdownscale);
+ decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxvdownscale);
if (decim_x > *x_predecim || out_width > width * 8)
return -EINVAL;
@@ -2433,10 +2564,11 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
- out_width, out_height, fourcc, five_taps,
- x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
- mem_to_mem);
+ ret = dispc->feat->calc_scaling(dispc, pclk, lclk, vm, width, height,
+ out_width, out_height, fourcc,
+ five_taps, x_predecim, y_predecim,
+ &decim_x, &decim_y, pos_x, &core_clk,
+ mem_to_mem);
if (ret)
return ret;
@@ -2452,13 +2584,13 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
out_height / (height / decim_y), DIV_FRAC(out_height, height / decim_y),
*five_taps ? 5 : 3,
- core_clk, dispc_core_clk_rate());
+ core_clk, dispc_core_clk_rate(dispc));
- if (!core_clk || core_clk > dispc_core_clk_rate()) {
+ if (!core_clk || core_clk > dispc_core_clk_rate(dispc)) {
DSSERR("failed to set up scaling, "
"required core clk rate = %lu Hz, "
"current core clk rate = %lu Hz\n",
- core_clk, dispc_core_clk_rate());
+ core_clk, dispc_core_clk_rate(dispc));
return -EINVAL;
}
@@ -2467,19 +2599,23 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
return 0;
}
-static int dispc_ovl_setup_common(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
- u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
- u16 out_width, u16 out_height, u32 fourcc,
- u8 rotation, u8 zorder, u8 pre_mult_alpha,
- u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct videomode *vm,
- bool mem_to_mem)
+static int dispc_ovl_setup_common(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ u32 paddr, u32 p_uv_addr,
+ u16 screen_width, int pos_x, int pos_y,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, u8 rotation, u8 zorder,
+ u8 pre_mult_alpha, u8 global_alpha,
+ enum omap_dss_rotation_type rotation_type,
+ bool replication, const struct videomode *vm,
+ bool mem_to_mem)
{
bool five_taps = true;
bool fieldmode = false;
int r, cconv = 0;
- unsigned offset0, offset1;
+ unsigned int offset0, offset1;
s32 row_inc;
s32 pix_inc;
u16 frame_width, frame_height;
@@ -2488,8 +2624,12 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
- unsigned long pclk = dispc_plane_pclk_rate(plane);
- unsigned long lclk = dispc_plane_lclk_rate(plane);
+ unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
+ unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
+
+ /* when setting up WB, dispc_plane_pclk_rate() returns 0 */
+ if (plane == OMAP_DSS_WB)
+ pclk = vm->pixelclock;
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
@@ -2502,27 +2642,28 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
out_width = out_width == 0 ? width : out_width;
out_height = out_height == 0 ? height : out_height;
- if (ilace && height == out_height)
- fieldmode = true;
+ if (plane != OMAP_DSS_WB) {
+ if (ilace && height == out_height)
+ fieldmode = true;
- if (ilace) {
- if (fieldmode)
- in_height /= 2;
- pos_y /= 2;
- out_height /= 2;
+ if (ilace) {
+ if (fieldmode)
+ in_height /= 2;
+ pos_y /= 2;
+ out_height /= 2;
- DSSDBG("adjusting for ilace: height %d, pos_y %d, "
- "out_height %d\n", in_height, pos_y,
- out_height);
+ DSSDBG("adjusting for ilace: height %d, pos_y %d, out_height %d\n",
+ in_height, pos_y, out_height);
+ }
}
- if (!dispc_ovl_color_mode_supported(plane, fourcc))
+ if (!dispc_ovl_color_mode_supported(dispc, plane, fourcc))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
- in_height, out_width, out_height, fourcc,
- &five_taps, &x_predecim, &y_predecim, pos_x,
- rotation_type, mem_to_mem);
+ r = dispc_ovl_calc_scaling(dispc, plane, pclk, lclk, caps, vm, in_width,
+ in_height, out_width, out_height, fourcc,
+ &five_taps, &x_predecim, &y_predecim, pos_x,
+ rotation_type, mem_to_mem);
if (r)
return r;
@@ -2584,60 +2725,62 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
- dispc_ovl_set_color_mode(plane, fourcc);
+ dispc_ovl_set_color_mode(dispc, plane, fourcc);
- dispc_ovl_configure_burst_type(plane, rotation_type);
+ dispc_ovl_configure_burst_type(dispc, plane, rotation_type);
- if (dispc.feat->reverse_ilace_field_order)
+ if (dispc->feat->reverse_ilace_field_order)
swap(offset0, offset1);
- dispc_ovl_set_ba0(plane, paddr + offset0);
- dispc_ovl_set_ba1(plane, paddr + offset1);
+ dispc_ovl_set_ba0(dispc, plane, paddr + offset0);
+ dispc_ovl_set_ba1(dispc, plane, paddr + offset1);
if (fourcc == DRM_FORMAT_NV12) {
- dispc_ovl_set_ba0_uv(plane, p_uv_addr + offset0);
- dispc_ovl_set_ba1_uv(plane, p_uv_addr + offset1);
+ dispc_ovl_set_ba0_uv(dispc, plane, p_uv_addr + offset0);
+ dispc_ovl_set_ba1_uv(dispc, plane, p_uv_addr + offset1);
}
- if (dispc.feat->last_pixel_inc_missing)
+ if (dispc->feat->last_pixel_inc_missing)
row_inc += pix_inc - 1;
- dispc_ovl_set_row_inc(plane, row_inc);
- dispc_ovl_set_pix_inc(plane, pix_inc);
+ dispc_ovl_set_row_inc(dispc, plane, row_inc);
+ dispc_ovl_set_pix_inc(dispc, plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, in_width,
in_height, out_width, out_height);
- dispc_ovl_set_pos(plane, caps, pos_x, pos_y);
+ dispc_ovl_set_pos(dispc, plane, caps, pos_x, pos_y);
- dispc_ovl_set_input_size(plane, in_width, in_height);
+ dispc_ovl_set_input_size(dispc, plane, in_width, in_height);
if (caps & OMAP_DSS_OVL_CAP_SCALE) {
- dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
- out_height, ilace, five_taps, fieldmode,
- fourcc, rotation);
- dispc_ovl_set_output_size(plane, out_width, out_height);
- dispc_ovl_set_vid_color_conv(plane, cconv);
+ dispc_ovl_set_scaling(dispc, plane, in_width, in_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
+ dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
+ dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
}
- dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, fourcc);
+ dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
+ fourcc);
- dispc_ovl_set_zorder(plane, caps, zorder);
- dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
- dispc_ovl_setup_global_alpha(plane, caps, global_alpha);
+ dispc_ovl_set_zorder(dispc, plane, caps, zorder);
+ dispc_ovl_set_pre_mult_alpha(dispc, plane, caps, pre_mult_alpha);
+ dispc_ovl_setup_global_alpha(dispc, plane, caps, global_alpha);
- dispc_ovl_enable_replication(plane, caps, replication);
+ dispc_ovl_enable_replication(dispc, plane, caps, replication);
return 0;
}
-static int dispc_ovl_setup(enum omap_plane_id plane,
- const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel)
+static int dispc_ovl_setup(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct omap_overlay_info *oi,
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel)
{
int r;
- enum omap_overlay_caps caps = dispc.feat->overlay_caps[plane];
+ enum omap_overlay_caps caps = dispc->feat->overlay_caps[plane];
const bool replication = true;
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
@@ -2646,9 +2789,9 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
oi->fourcc, oi->rotation, channel, replication);
- dispc_ovl_set_channel_out(plane, channel);
+ dispc_ovl_set_channel_out(dispc, plane, channel);
- r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr,
+ r = dispc_ovl_setup_common(dispc, plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
@@ -2657,8 +2800,10 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
return r;
}
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm)
+static int dispc_wb_setup(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in)
{
int r;
u32 l;
@@ -2672,15 +2817,20 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ in_height /= 2;
+
DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
"rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
- r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
+ r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
replication, vm, mem_to_mem);
+ if (r)
+ return r;
switch (wi->fourcc) {
case DRM_FORMAT_RGB565:
@@ -2699,132 +2849,162 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
}
/* setup extra DISPC_WB_ATTRIBUTES */
- l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
+ l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */
l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
if (mem_to_mem)
l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
else
l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
if (mem_to_mem) {
/* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
} else {
- int wbdelay;
+ u32 wbdelay;
+
+ if (channel_in == DSS_WB_TV_MGR)
+ wbdelay = vm->vsync_len + vm->vback_porch;
+ else
+ wbdelay = vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
- wbdelay = min(vm->vfront_porch +
- vm->vsync_len + vm->vback_porch, (u32)255);
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ wbdelay /= 2;
+
+ wbdelay = min(wbdelay, 255u);
/* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
}
- return r;
+ return 0;
}
-static int dispc_ovl_enable(enum omap_plane_id plane, bool enable)
+static bool dispc_has_writeback(struct dispc_device *dispc)
+{
+ return dispc->feat->has_writeback;
+}
+
+static int dispc_ovl_enable(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
return 0;
}
-static enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
+static enum omap_dss_output_id
+dispc_mgr_get_supported_outputs(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return dss_get_supported_outputs(channel);
+ return dss_get_supported_outputs(dispc->dss, channel);
}
-static void dispc_lcd_enable_signal_polarity(bool act_high)
+static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc,
+ bool act_high)
{
- if (!dispc_has_feature(FEAT_LCDENABLEPOL))
+ if (!dispc_has_feature(dispc, FEAT_LCDENABLEPOL))
return;
- REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
}
-void dispc_lcd_enable_signal(bool enable)
+void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_LCDENABLESIGNAL))
+ if (!dispc_has_feature(dispc, FEAT_LCDENABLESIGNAL))
return;
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 28, 28);
}
-void dispc_pck_free_enable(bool enable)
+void dispc_pck_free_enable(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_PCKFREEENABLE))
+ if (!dispc_has_feature(dispc, FEAT_PCKFREEENABLE))
return;
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_fifohandcheck(struct dispc_device *dispc,
+ enum omap_channel channel,
+ bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
-static void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
+static void dispc_mgr_set_lcd_type_tft(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STNTFT, 1);
}
-static void dispc_set_loadmode(enum omap_dss_load_mode mode)
+static void dispc_set_loadmode(struct dispc_device *dispc,
+ enum omap_dss_load_mode mode)
{
- REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, mode, 2, 1);
}
-static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
+static void dispc_mgr_set_default_color(struct dispc_device *dispc,
+ enum omap_channel channel, u32 color)
{
- dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
+ dispc_write_reg(dispc, DISPC_DEFAULT_COLOR(channel), color);
}
-static void dispc_mgr_set_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type type,
- u32 trans_key)
+static void dispc_mgr_set_trans_key(struct dispc_device *dispc,
+ enum omap_channel ch,
+ enum omap_dss_trans_key_type type,
+ u32 trans_key)
{
- mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type);
+ mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKSELECTION, type);
- dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
+ dispc_write_reg(dispc, DISPC_TRANS_COLOR(ch), trans_key);
}
-static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_trans_key(struct dispc_device *dispc,
+ enum omap_channel ch, bool enable)
{
- mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable);
+ mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKENABLE, enable);
}
-static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
- bool enable)
+static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
+ enum omap_channel ch,
+ bool enable)
{
- if (!dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ if (!dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
- REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
-static void dispc_mgr_setup(enum omap_channel channel,
- const struct omap_overlay_manager_info *info)
+static void dispc_mgr_setup(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_overlay_manager_info *info)
{
- dispc_mgr_set_default_color(channel, info->default_color);
- dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
- dispc_mgr_enable_trans_key(channel, info->trans_enabled);
- dispc_mgr_enable_alpha_fixed_zorder(channel,
+ dispc_mgr_set_default_color(dispc, channel, info->default_color);
+ dispc_mgr_set_trans_key(dispc, channel, info->trans_key_type,
+ info->trans_key);
+ dispc_mgr_enable_trans_key(dispc, channel, info->trans_enabled);
+ dispc_mgr_enable_alpha_fixed_zorder(dispc, channel,
info->partial_alpha_enabled);
- if (dispc_has_feature(FEAT_CPR)) {
- dispc_mgr_enable_cpr(channel, info->cpr_enable);
- dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ dispc_mgr_enable_cpr(dispc, channel, info->cpr_enable);
+ dispc_mgr_set_cpr_coef(dispc, channel, &info->cpr_coefs);
}
}
-static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
+static void dispc_mgr_set_tft_data_lines(struct dispc_device *dispc,
+ enum omap_channel channel,
+ u8 data_lines)
{
int code;
@@ -2846,10 +3026,11 @@ static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_line
return;
}
- mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
-static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
+static void dispc_mgr_set_io_pad_mode(struct dispc_device *dispc,
+ enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
@@ -2872,68 +3053,74 @@ static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
return;
}
- l = dispc_read_reg(DISPC_CONTROL);
+ l = dispc_read_reg(dispc, DISPC_CONTROL);
l = FLD_MOD(l, gpout0, 15, 15);
l = FLD_MOD(l, gpout1, 16, 16);
- dispc_write_reg(DISPC_CONTROL, l);
+ dispc_write_reg(dispc, DISPC_CONTROL, l);
}
-static void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
-static void dispc_mgr_set_lcd_config(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config)
+static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config)
{
- dispc_mgr_set_io_pad_mode(config->io_pad_mode);
+ dispc_mgr_set_io_pad_mode(dispc, config->io_pad_mode);
- dispc_mgr_enable_stallmode(channel, config->stallmode);
- dispc_mgr_enable_fifohandcheck(channel, config->fifohandcheck);
+ dispc_mgr_enable_stallmode(dispc, channel, config->stallmode);
+ dispc_mgr_enable_fifohandcheck(dispc, channel, config->fifohandcheck);
- dispc_mgr_set_clock_div(channel, &config->clock_info);
+ dispc_mgr_set_clock_div(dispc, channel, &config->clock_info);
- dispc_mgr_set_tft_data_lines(channel, config->video_port_width);
+ dispc_mgr_set_tft_data_lines(dispc, channel, config->video_port_width);
- dispc_lcd_enable_signal_polarity(config->lcden_sig_polarity);
+ dispc_lcd_enable_signal_polarity(dispc, config->lcden_sig_polarity);
- dispc_mgr_set_lcd_type_tft(channel);
+ dispc_mgr_set_lcd_type_tft(dispc, channel);
}
-static bool _dispc_mgr_size_ok(u16 width, u16 height)
+static bool _dispc_mgr_size_ok(struct dispc_device *dispc,
+ u16 width, u16 height)
{
- return width <= dispc.feat->mgr_width_max &&
- height <= dispc.feat->mgr_height_max;
+ return width <= dispc->feat->mgr_width_max &&
+ height <= dispc->feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
- int vsw, int vfp, int vbp)
+static bool _dispc_lcd_timings_ok(struct dispc_device *dispc,
+ int hsync_len, int hfp, int hbp,
+ int vsw, int vfp, int vbp)
{
- if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
- hfp < 1 || hfp > dispc.feat->hp_max ||
- hbp < 1 || hbp > dispc.feat->hp_max ||
- vsw < 1 || vsw > dispc.feat->sw_max ||
- vfp < 0 || vfp > dispc.feat->vp_max ||
- vbp < 0 || vbp > dispc.feat->vp_max)
+ if (hsync_len < 1 || hsync_len > dispc->feat->sw_max ||
+ hfp < 1 || hfp > dispc->feat->hp_max ||
+ hbp < 1 || hbp > dispc->feat->hp_max ||
+ vsw < 1 || vsw > dispc->feat->sw_max ||
+ vfp < 0 || vfp > dispc->feat->vp_max ||
+ vbp < 0 || vbp > dispc->feat->vp_max)
return false;
return true;
}
-static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
- unsigned long pclk)
+static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
+ enum omap_channel channel,
+ unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk;
+ return pclk <= dispc->feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk;
+ return pclk <= dispc->feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
+bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel,
+ const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
+ if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
+ if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
@@ -2941,7 +3128,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len,
vm->hfront_porch, vm->hback_porch,
vm->vsync_len, vm->vfront_porch,
vm->vback_porch))
@@ -2951,21 +3138,22 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
- FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
- FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
- FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
- FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc->feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc->feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc->feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc->feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc->feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc->feat->bp_start, 20);
- dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
- dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
+ dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
+ dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
@@ -3003,12 +3191,12 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
FLD_VAL(vs, 12, 12);
/* always set ALIGN bit when available */
- if (dispc.feat->supports_sync_align)
+ if (dispc->feat->supports_sync_align)
l |= (1 << 18);
- dispc_write_reg(DISPC_POL_FREQ(channel), l);
+ dispc_write_reg(dispc, DISPC_POL_FREQ(channel), l);
- if (dispc.syscon_pol) {
+ if (dispc->syscon_pol) {
const int shifts[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 1,
@@ -3023,8 +3211,8 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
mask <<= 16 + shifts[channel];
val <<= 16 + shifts[channel];
- regmap_update_bits(dispc.syscon_pol, dispc.syscon_pol_offset,
- mask, val);
+ regmap_update_bits(dispc->syscon_pol, dispc->syscon_pol_offset,
+ mask, val);
}
}
@@ -3039,22 +3227,23 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
}
/* change name to mode? */
-static void dispc_mgr_set_timings(enum omap_channel channel,
- const struct videomode *vm)
+static void dispc_mgr_set_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm)
{
- unsigned xtot, ytot;
+ unsigned int xtot, ytot;
unsigned long ht, vt;
struct videomode t = *vm;
DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
- if (!dispc_mgr_timings_ok(channel, &t)) {
+ if (!dispc_mgr_timings_ok(dispc, channel, &t)) {
BUG();
return;
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, &t);
+ _dispc_mgr_set_lcd_timings(dispc, channel, &t);
xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
@@ -3078,52 +3267,54 @@ static void dispc_mgr_set_timings(enum omap_channel channel,
if (t.flags & DISPLAY_FLAGS_INTERLACED)
t.vactive /= 2;
- if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL,
+ if (dispc->feat->supports_double_pixel)
+ REG_FLD_MOD(dispc, DISPC_CONTROL,
!!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
19, 17);
}
- dispc_mgr_set_size(channel, t.hactive, t.vactive);
+ dispc_mgr_set_size(dispc, channel, t.hactive, t.vactive);
}
-static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
- u16 pck_div)
+static void dispc_mgr_set_lcd_divisor(struct dispc_device *dispc,
+ enum omap_channel channel, u16 lck_div,
+ u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 1);
- dispc_write_reg(DISPC_DIVISORo(channel),
+ dispc_write_reg(dispc, DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- if (!dispc_has_feature(FEAT_CORE_CLK_DIV) &&
+ if (!dispc_has_feature(dispc, FEAT_CORE_CLK_DIV) &&
channel == OMAP_DSS_CHANNEL_LCD)
- dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
+ dispc->core_clk_rate = dispc_fclk_rate(dispc) / lck_div;
}
-static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
- int *pck_div)
+static void dispc_mgr_get_lcd_divisor(struct dispc_device *dispc,
+ enum omap_channel channel, int *lck_div,
+ int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
-static unsigned long dispc_fclk_rate(void)
+static unsigned long dispc_fclk_rate(struct dispc_device *dispc)
{
unsigned long r;
enum dss_clk_source src;
- src = dss_get_dispc_clk_source();
+ src = dss_get_dispc_clk_source(dispc->dss);
if (src == DSS_CLK_SRC_FCK) {
- r = dss_get_dispc_clk_rate();
+ r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
- pll = dss_pll_find_by_src(src);
+ pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
@@ -3132,7 +3323,8 @@ static unsigned long dispc_fclk_rate(void)
return r;
}
-static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
+static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel)
{
int lcd;
unsigned long r;
@@ -3140,28 +3332,29 @@ static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
/* for TV, LCLK rate is the FCLK rate */
if (!dss_mgr_is_lcd(channel))
- return dispc_fclk_rate();
+ return dispc_fclk_rate(dispc);
- src = dss_get_lcd_clk_source(channel);
+ src = dss_get_lcd_clk_source(dispc->dss, channel);
if (src == DSS_CLK_SRC_FCK) {
- r = dss_get_dispc_clk_rate();
+ r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
- pll = dss_pll_find_by_src(src);
+ pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
}
- lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+ lcd = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
return r / lcd;
}
-static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
+static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel)
{
unsigned long r;
@@ -3169,109 +3362,115 @@ static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
int pcd;
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
pcd = FLD_GET(l, 7, 0);
- r = dispc_mgr_lclk_rate(channel);
+ r = dispc_mgr_lclk_rate(dispc, channel);
return r / pcd;
} else {
- return dispc.tv_pclk_rate;
+ return dispc->tv_pclk_rate;
}
}
-void dispc_set_tv_pclk(unsigned long pclk)
+void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk)
{
- dispc.tv_pclk_rate = pclk;
+ dispc->tv_pclk_rate = pclk;
}
-static unsigned long dispc_core_clk_rate(void)
+static unsigned long dispc_core_clk_rate(struct dispc_device *dispc)
{
- return dispc.core_clk_rate;
+ return dispc->core_clk_rate;
}
-static unsigned long dispc_plane_pclk_rate(enum omap_plane_id plane)
+static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
- channel = dispc_ovl_get_channel_out(plane);
+ channel = dispc_ovl_get_channel_out(dispc, plane);
- return dispc_mgr_pclk_rate(channel);
+ return dispc_mgr_pclk_rate(dispc, channel);
}
-static unsigned long dispc_plane_lclk_rate(enum omap_plane_id plane)
+static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
- channel = dispc_ovl_get_channel_out(plane);
+ channel = dispc_ovl_get_channel_out(dispc, plane);
- return dispc_mgr_lclk_rate(channel);
+ return dispc_mgr_lclk_rate(dispc, channel);
}
-static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
+static void dispc_dump_clocks_channel(struct dispc_device *dispc,
+ struct seq_file *s,
+ enum omap_channel channel)
{
int lcd, pcd;
enum dss_clk_source lcd_clk_src;
seq_printf(s, "- %s -\n", mgr_desc[channel].name);
- lcd_clk_src = dss_get_lcd_clk_source(channel);
+ lcd_clk_src = dss_get_lcd_clk_source(dispc->dss, channel);
seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
dss_get_clk_source_name(lcd_clk_src));
- dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
+ dispc_mgr_get_lcd_divisor(dispc, channel, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_mgr_lclk_rate(channel), lcd);
+ dispc_mgr_lclk_rate(dispc, channel), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_mgr_pclk_rate(channel), pcd);
+ dispc_mgr_pclk_rate(dispc, channel), pcd);
}
-void dispc_dump_clocks(struct seq_file *s)
+void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s)
{
+ enum dss_clk_source dispc_clk_src;
int lcd;
u32 l;
- enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
- if (dispc_runtime_get())
+ if (dispc_runtime_get(dispc))
return;
seq_printf(s, "- DISPC -\n");
+ dispc_clk_src = dss_get_dispc_clk_source(dispc->dss);
seq_printf(s, "dispc fclk source = %s\n",
dss_get_clk_source_name(dispc_clk_src));
- seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate(dispc));
- if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
- l = dispc_read_reg(DISPC_DIVISOR);
+ l = dispc_read_reg(dispc, DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- (dispc_fclk_rate()/lcd), lcd);
+ (dispc_fclk_rate(dispc)/lcd), lcd);
}
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD);
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD3);
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
}
-static void dispc_dump_regs(struct seq_file *s)
+static int dispc_dump_regs(struct seq_file *s, void *p)
{
+ struct dispc_device *dispc = s->private;
int i, j;
const char *mgr_names[] = {
[OMAP_DSS_CHANNEL_LCD] = "LCD",
@@ -3288,186 +3487,190 @@ static void dispc_dump_regs(struct seq_file *s)
};
const char **p_names;
-#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
+#define DUMPREG(dispc, r) \
+ seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(dispc, r))
- if (dispc_runtime_get())
- return;
+ if (dispc_runtime_get(dispc))
+ return 0;
/* DISPC common registers */
- DUMPREG(DISPC_REVISION);
- DUMPREG(DISPC_SYSCONFIG);
- DUMPREG(DISPC_SYSSTATUS);
- DUMPREG(DISPC_IRQSTATUS);
- DUMPREG(DISPC_IRQENABLE);
- DUMPREG(DISPC_CONTROL);
- DUMPREG(DISPC_CONFIG);
- DUMPREG(DISPC_CAPABLE);
- DUMPREG(DISPC_LINE_STATUS);
- DUMPREG(DISPC_LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- DUMPREG(DISPC_GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
- DUMPREG(DISPC_CONTROL2);
- DUMPREG(DISPC_CONFIG2);
+ DUMPREG(dispc, DISPC_REVISION);
+ DUMPREG(dispc, DISPC_SYSCONFIG);
+ DUMPREG(dispc, DISPC_SYSSTATUS);
+ DUMPREG(dispc, DISPC_IRQSTATUS);
+ DUMPREG(dispc, DISPC_IRQENABLE);
+ DUMPREG(dispc, DISPC_CONTROL);
+ DUMPREG(dispc, DISPC_CONFIG);
+ DUMPREG(dispc, DISPC_CAPABLE);
+ DUMPREG(dispc, DISPC_LINE_STATUS);
+ DUMPREG(dispc, DISPC_LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ DUMPREG(dispc, DISPC_GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
+ DUMPREG(dispc, DISPC_CONTROL2);
+ DUMPREG(dispc, DISPC_CONFIG2);
}
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
- DUMPREG(DISPC_CONTROL3);
- DUMPREG(DISPC_CONFIG3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
+ DUMPREG(dispc, DISPC_CONTROL3);
+ DUMPREG(dispc, DISPC_CONFIG3);
}
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
#define DISPC_REG(i, name) name(i)
-#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
+#define DUMPREG(dispc, i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
(int)(48 - strlen(#r) - strlen(p_names[i])), " ", \
- dispc_read_reg(DISPC_REG(i, r)))
+ dispc_read_reg(dispc, DISPC_REG(i, r)))
p_names = mgr_names;
/* DISPC channel specific registers */
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- DUMPREG(i, DISPC_DEFAULT_COLOR);
- DUMPREG(i, DISPC_TRANS_COLOR);
- DUMPREG(i, DISPC_SIZE_MGR);
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ DUMPREG(dispc, i, DISPC_DEFAULT_COLOR);
+ DUMPREG(dispc, i, DISPC_TRANS_COLOR);
+ DUMPREG(dispc, i, DISPC_SIZE_MGR);
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- DUMPREG(i, DISPC_TIMING_H);
- DUMPREG(i, DISPC_TIMING_V);
- DUMPREG(i, DISPC_POL_FREQ);
- DUMPREG(i, DISPC_DIVISORo);
+ DUMPREG(dispc, i, DISPC_TIMING_H);
+ DUMPREG(dispc, i, DISPC_TIMING_V);
+ DUMPREG(dispc, i, DISPC_POL_FREQ);
+ DUMPREG(dispc, i, DISPC_DIVISORo);
- DUMPREG(i, DISPC_DATA_CYCLE1);
- DUMPREG(i, DISPC_DATA_CYCLE2);
- DUMPREG(i, DISPC_DATA_CYCLE3);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE1);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE2);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE3);
- if (dispc_has_feature(FEAT_CPR)) {
- DUMPREG(i, DISPC_CPR_COEF_R);
- DUMPREG(i, DISPC_CPR_COEF_G);
- DUMPREG(i, DISPC_CPR_COEF_B);
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ DUMPREG(dispc, i, DISPC_CPR_COEF_R);
+ DUMPREG(dispc, i, DISPC_CPR_COEF_G);
+ DUMPREG(dispc, i, DISPC_CPR_COEF_B);
}
}
p_names = ovl_names;
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- DUMPREG(i, DISPC_OVL_BA0);
- DUMPREG(i, DISPC_OVL_BA1);
- DUMPREG(i, DISPC_OVL_POSITION);
- DUMPREG(i, DISPC_OVL_SIZE);
- DUMPREG(i, DISPC_OVL_ATTRIBUTES);
- DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
- DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
- DUMPREG(i, DISPC_OVL_ROW_INC);
- DUMPREG(i, DISPC_OVL_PIXEL_INC);
-
- if (dispc_has_feature(FEAT_PRELOAD))
- DUMPREG(i, DISPC_OVL_PRELOAD);
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0);
+ DUMPREG(dispc, i, DISPC_OVL_BA1);
+ DUMPREG(dispc, i, DISPC_OVL_POSITION);
+ DUMPREG(dispc, i, DISPC_OVL_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
+ DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
+ DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
+
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ DUMPREG(dispc, i, DISPC_OVL_PRELOAD);
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
if (i == OMAP_DSS_GFX) {
- DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
- DUMPREG(i, DISPC_OVL_TABLE_BA);
+ DUMPREG(dispc, i, DISPC_OVL_WINDOW_SKIP);
+ DUMPREG(dispc, i, DISPC_OVL_TABLE_BA);
continue;
}
- DUMPREG(i, DISPC_OVL_FIR);
- DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
- DUMPREG(i, DISPC_OVL_ACCU0);
- DUMPREG(i, DISPC_OVL_ACCU1);
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(i, DISPC_OVL_BA0_UV);
- DUMPREG(i, DISPC_OVL_BA1_UV);
- DUMPREG(i, DISPC_OVL_FIR2);
- DUMPREG(i, DISPC_OVL_ACCU2_0);
- DUMPREG(i, DISPC_OVL_ACCU2_1);
+ DUMPREG(dispc, i, DISPC_OVL_FIR);
+ DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU1);
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
+ DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
+ DUMPREG(dispc, i, DISPC_OVL_FIR2);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
- if (dispc_has_feature(FEAT_ATTR2))
- DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
- if (dispc.feat->has_writeback) {
+ if (dispc->feat->has_writeback) {
i = OMAP_DSS_WB;
- DUMPREG(i, DISPC_OVL_BA0);
- DUMPREG(i, DISPC_OVL_BA1);
- DUMPREG(i, DISPC_OVL_SIZE);
- DUMPREG(i, DISPC_OVL_ATTRIBUTES);
- DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
- DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
- DUMPREG(i, DISPC_OVL_ROW_INC);
- DUMPREG(i, DISPC_OVL_PIXEL_INC);
-
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
-
- DUMPREG(i, DISPC_OVL_FIR);
- DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
- DUMPREG(i, DISPC_OVL_ACCU0);
- DUMPREG(i, DISPC_OVL_ACCU1);
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(i, DISPC_OVL_BA0_UV);
- DUMPREG(i, DISPC_OVL_BA1_UV);
- DUMPREG(i, DISPC_OVL_FIR2);
- DUMPREG(i, DISPC_OVL_ACCU2_0);
- DUMPREG(i, DISPC_OVL_ACCU2_1);
+ DUMPREG(dispc, i, DISPC_OVL_BA0);
+ DUMPREG(dispc, i, DISPC_OVL_BA1);
+ DUMPREG(dispc, i, DISPC_OVL_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
+ DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
+ DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
+
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
+
+ DUMPREG(dispc, i, DISPC_OVL_FIR);
+ DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU1);
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
+ DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
+ DUMPREG(dispc, i, DISPC_OVL_FIR2);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
- if (dispc_has_feature(FEAT_ATTR2))
- DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
#undef DISPC_REG
#undef DUMPREG
#define DISPC_REG(plane, name, i) name(plane, i)
-#define DUMPREG(plane, name, i) \
+#define DUMPREG(dispc, plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
(int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \
- dispc_read_reg(DISPC_REG(plane, name, i)))
+ dispc_read_reg(dispc, DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
- for (i = 1; i < dispc_get_num_ovls(); i++) {
+ for (i = 1; i < dispc_get_num_ovls(dispc); i++) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_HV, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV, j);
for (j = 0; j < 5; j++)
- DUMPREG(i, DISPC_OVL_CONV_COEF, j);
+ DUMPREG(dispc, i, DISPC_OVL_CONV_COEF, j);
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V, j);
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H2, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_HV2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV2, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_V2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V2, j);
}
}
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
#undef DISPC_REG
#undef DUMPREG
+
+ return 0;
}
/* calculate clock rates using dividers in cinfo */
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
- struct dispc_clock_info *cinfo)
+int dispc_calc_clock_rates(struct dispc_device *dispc,
+ unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo)
{
if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
return -EINVAL;
@@ -3480,16 +3683,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-bool dispc_div_calc(unsigned long dispc_freq,
- unsigned long pck_min, unsigned long pck_max,
- dispc_div_calc_func func, void *data)
+bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data)
{
int lckd, lckd_start, lckd_stop;
int pckd, pckd_start, pckd_stop;
unsigned long pck, lck;
unsigned long lck_max;
unsigned long pckd_hw_min, pckd_hw_max;
- unsigned min_fck_per_pck;
+ unsigned int min_fck_per_pck;
unsigned long fck;
#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
@@ -3498,10 +3701,10 @@ bool dispc_div_calc(unsigned long dispc_freq,
min_fck_per_pck = 0;
#endif
- pckd_hw_min = dispc.feat->min_pcd;
+ pckd_hw_min = dispc->feat->min_pcd;
pckd_hw_max = 255;
- lck_max = dss_get_max_fck_rate();
+ lck_max = dss_get_max_fck_rate(dispc->dss);
pck_min = pck_min ? pck_min : 1;
pck_max = pck_max ? pck_max : ULONG_MAX;
@@ -3524,8 +3727,8 @@ bool dispc_div_calc(unsigned long dispc_freq,
* also. Thus we need to use the calculated lck. For
* OMAP4+ the DISPC fclk is a separate clock.
*/
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- fck = dispc_core_clk_rate();
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ fck = dispc_core_clk_rate(dispc);
else
fck = lck;
@@ -3540,24 +3743,27 @@ bool dispc_div_calc(unsigned long dispc_freq,
return false;
}
-void dispc_mgr_set_clock_div(enum omap_channel channel,
- const struct dispc_clock_info *cinfo)
+void dispc_mgr_set_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
- dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
+ dispc_mgr_set_lcd_divisor(dispc, channel, cinfo->lck_div,
+ cinfo->pck_div);
}
-int dispc_mgr_get_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo)
+int dispc_mgr_get_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ struct dispc_clock_info *cinfo)
{
unsigned long fck;
- fck = dispc_fclk_rate();
+ fck = dispc_fclk_rate(dispc);
- cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16);
- cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0);
+ cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
+ cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -3565,53 +3771,56 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
return 0;
}
-static u32 dispc_read_irqstatus(void)
+static u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
- return dispc_read_reg(DISPC_IRQSTATUS);
+ return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
-static void dispc_clear_irqstatus(u32 mask)
+static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
- dispc_write_reg(DISPC_IRQSTATUS, mask);
+ dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
-static void dispc_write_irqenable(u32 mask)
+static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
- u32 old_mask = dispc_read_reg(DISPC_IRQENABLE);
+ u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
- dispc_clear_irqstatus((mask ^ old_mask) & mask);
+ dispc_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
- dispc_write_reg(DISPC_IRQENABLE, mask);
+ dispc_write_reg(dispc, DISPC_IRQENABLE, mask);
/* flush posted write */
- dispc_read_reg(DISPC_IRQENABLE);
+ dispc_read_reg(dispc, DISPC_IRQENABLE);
}
-void dispc_enable_sidle(void)
+void dispc_enable_sidle(struct dispc_device *dispc)
{
- REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */
+ /* SIDLEMODE: smart idle */
+ REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 2, 4, 3);
}
-void dispc_disable_sidle(void)
+void dispc_disable_sidle(struct dispc_device *dispc)
{
- REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
+ REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
-static u32 dispc_mgr_gamma_size(enum omap_channel channel)
+static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+ enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return 0;
return gdesc->len;
}
-static void dispc_mgr_write_gamma_table(enum omap_channel channel)
+static void dispc_mgr_write_gamma_table(struct dispc_device *dispc,
+ enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- u32 *table = dispc.gamma_table[channel];
+ u32 *table = dispc->gamma_table[channel];
unsigned int i;
DSSDBG("%s: channel %d\n", __func__, channel);
@@ -3624,26 +3833,26 @@ static void dispc_mgr_write_gamma_table(enum omap_channel channel)
else if (i == 0)
v |= 1 << 31;
- dispc_write_reg(gdesc->reg, v);
+ dispc_write_reg(dispc, gdesc->reg, v);
}
}
-static void dispc_restore_gamma_tables(void)
+static void dispc_restore_gamma_tables(struct dispc_device *dispc)
{
DSSDBG("%s()\n", __func__);
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return;
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD);
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_DIGIT);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD3);
}
static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
@@ -3651,18 +3860,19 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
-static void dispc_mgr_set_gamma(enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length)
+static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- u32 *table = dispc.gamma_table[channel];
+ u32 *table = dispc->gamma_table[channel];
uint i;
DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
channel, length, gdesc->len);
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return;
if (lut == NULL || length < 2) {
@@ -3694,82 +3904,83 @@ static void dispc_mgr_set_gamma(enum omap_channel channel,
}
}
- if (dispc.is_enabled)
- dispc_mgr_write_gamma_table(channel);
+ if (dispc->is_enabled)
+ dispc_mgr_write_gamma_table(dispc, channel);
}
-static int dispc_init_gamma_tables(void)
+static int dispc_init_gamma_tables(struct dispc_device *dispc)
{
int channel;
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return 0;
- for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
+ for (channel = 0; channel < ARRAY_SIZE(dispc->gamma_table); channel++) {
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
u32 *gt;
if (channel == OMAP_DSS_CHANNEL_LCD2 &&
- !dispc_has_feature(FEAT_MGR_LCD2))
+ !dispc_has_feature(dispc, FEAT_MGR_LCD2))
continue;
if (channel == OMAP_DSS_CHANNEL_LCD3 &&
- !dispc_has_feature(FEAT_MGR_LCD3))
+ !dispc_has_feature(dispc, FEAT_MGR_LCD3))
continue;
- gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
- sizeof(u32), GFP_KERNEL);
+ gt = devm_kmalloc_array(&dispc->pdev->dev, gdesc->len,
+ sizeof(u32), GFP_KERNEL);
if (!gt)
return -ENOMEM;
- dispc.gamma_table[channel] = gt;
+ dispc->gamma_table[channel] = gt;
- dispc_mgr_set_gamma(channel, NULL, 0);
+ dispc_mgr_set_gamma(dispc, channel, NULL, 0);
}
return 0;
}
-static void _omap_dispc_initial_config(void)
+static void _omap_dispc_initial_config(struct dispc_device *dispc)
{
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
- if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
- l = dispc_read_reg(DISPC_DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
+ l = dispc_read_reg(dispc, DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
l = FLD_MOD(l, 1, 23, 16);
- dispc_write_reg(DISPC_DIVISOR, l);
+ dispc_write_reg(dispc, DISPC_DIVISOR, l);
- dispc.core_clk_rate = dispc_fclk_rate();
+ dispc->core_clk_rate = dispc_fclk_rate(dispc);
}
/* Use gamma table mode, instead of palette mode */
- if (dispc.feat->has_gamma_table)
- REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+ if (dispc->feat->has_gamma_table)
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 3, 3);
/* For older DSS versions (FEAT_FUNCGATED) this enables
* func-clock auto-gating. For newer versions
- * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
+ * (dispc->feat->has_gamma_table) this enables tv-out gamma tables.
*/
- if (dispc_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
- REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ if (dispc_has_feature(dispc, FEAT_FUNCGATED) ||
+ dispc->feat->has_gamma_table)
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef();
+ dispc_setup_color_conv_coef(dispc);
- dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
+ dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
- dispc_init_fifos();
+ dispc_init_fifos(dispc);
- dispc_configure_burst_sizes();
+ dispc_configure_burst_sizes(dispc);
- dispc_ovl_enable_zorder_planes();
+ dispc_ovl_enable_zorder_planes(dispc);
- if (dispc.feat->mstandby_workaround)
- REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
+ if (dispc->feat->mstandby_workaround)
+ REG_FLD_MOD(dispc, DISPC_MSTANDBY_CTRL, 1, 0, 0);
- if (dispc_has_feature(FEAT_MFLAG))
- dispc_init_mflag();
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ dispc_init_mflag(dispc);
}
static const enum dispc_feature_id omap2_dispc_features_list[] = {
@@ -4288,41 +4499,55 @@ static const struct dispc_features omap54xx_dispc_feats = {
static irqreturn_t dispc_irq_handler(int irq, void *arg)
{
- if (!dispc.is_enabled)
+ struct dispc_device *dispc = arg;
+
+ if (!dispc->is_enabled)
return IRQ_NONE;
- return dispc.user_handler(irq, dispc.user_data);
+ return dispc->user_handler(irq, dispc->user_data);
}
-static int dispc_request_irq(irq_handler_t handler, void *dev_id)
+static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id)
{
int r;
- if (dispc.user_handler != NULL)
+ if (dispc->user_handler != NULL)
return -EBUSY;
- dispc.user_handler = handler;
- dispc.user_data = dev_id;
+ dispc->user_handler = handler;
+ dispc->user_data = dev_id;
/* ensure the dispc_irq_handler sees the values above */
smp_wmb();
- r = devm_request_irq(&dispc.pdev->dev, dispc.irq, dispc_irq_handler,
- IRQF_SHARED, "OMAP DISPC", &dispc);
+ r = devm_request_irq(&dispc->pdev->dev, dispc->irq, dispc_irq_handler,
+ IRQF_SHARED, "OMAP DISPC", dispc);
if (r) {
- dispc.user_handler = NULL;
- dispc.user_data = NULL;
+ dispc->user_handler = NULL;
+ dispc->user_data = NULL;
}
return r;
}
-static void dispc_free_irq(void *dev_id)
+static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
+{
+ devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
+
+ dispc->user_handler = NULL;
+ dispc->user_data = NULL;
+}
+
+static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
- devm_free_irq(&dispc.pdev->dev, dispc.irq, &dispc);
+ u32 limit = 0;
- dispc.user_handler = NULL;
- dispc.user_data = NULL;
+ /* Optional maximum memory bandwidth */
+ of_property_read_u32(dispc->pdev->dev.of_node, "max-memory-bandwidth",
+ &limit);
+
+ return limit;
}
/*
@@ -4396,18 +4621,19 @@ static struct i734_buf {
void *vaddr;
} i734_buf;
-static int dispc_errata_i734_wa_init(void)
+static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
{
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return 0;
i734_buf.size = i734.ovli.width * i734.ovli.height *
color_mode_to_bpp(i734.ovli.fourcc) / 8;
- i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
- &i734_buf.paddr, GFP_KERNEL);
+ i734_buf.vaddr = dma_alloc_writecombine(&dispc->pdev->dev,
+ i734_buf.size, &i734_buf.paddr,
+ GFP_KERNEL);
if (!i734_buf.vaddr) {
- dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
+ dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed",
__func__);
return -ENOMEM;
}
@@ -4415,72 +4641,73 @@ static int dispc_errata_i734_wa_init(void)
return 0;
}
-static void dispc_errata_i734_wa_fini(void)
+static void dispc_errata_i734_wa_fini(struct dispc_device *dispc)
{
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return;
- dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
+ dma_free_writecombine(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
i734_buf.paddr);
}
-static void dispc_errata_i734_wa(void)
+static void dispc_errata_i734_wa(struct dispc_device *dispc)
{
- u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
+ u32 framedone_irq = dispc_mgr_get_framedone_irq(dispc,
+ OMAP_DSS_CHANNEL_LCD);
struct omap_overlay_info ovli;
struct dss_lcd_mgr_config lcd_conf;
u32 gatestate;
unsigned int count;
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return;
- gatestate = REG_GET(DISPC_CONFIG, 8, 4);
+ gatestate = REG_GET(dispc, DISPC_CONFIG, 8, 4);
ovli = i734.ovli;
ovli.paddr = i734_buf.paddr;
lcd_conf = i734.lcd_conf;
/* Gate all LCD1 outputs */
- REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 0x1f, 8, 4);
/* Setup and enable GFX plane */
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, &i734.vm, false,
- OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_enable(OMAP_DSS_GFX, true);
+ dispc_ovl_setup(dispc, OMAP_DSS_GFX, &ovli, &i734.vm, false,
+ OMAP_DSS_CHANNEL_LCD);
+ dispc_ovl_enable(dispc, OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
- dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
- dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
+ dispc_mgr_setup(dispc, OMAP_DSS_CHANNEL_LCD, &i734.mgri);
+ dispc_calc_clock_rates(dispc, dss_get_dispc_clk_rate(dispc->dss),
&lcd_conf.clock_info);
- dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
+ dispc_mgr_set_lcd_config(dispc, OMAP_DSS_CHANNEL_LCD, &lcd_conf);
+ dispc_mgr_set_timings(dispc, OMAP_DSS_CHANNEL_LCD, &i734.vm);
- dispc_clear_irqstatus(framedone_irq);
+ dispc_clear_irqstatus(dispc, framedone_irq);
/* Enable and shut the channel to produce just one frame */
- dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
- dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
+ dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, true);
+ dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, false);
/* Busy wait for framedone. We can't fiddle with irq handlers
* in PM resume. Typically the loop runs less than 5 times and
* waits less than a micro second.
*/
count = 0;
- while (!(dispc_read_irqstatus() & framedone_irq)) {
+ while (!(dispc_read_irqstatus(dispc) & framedone_irq)) {
if (count++ > 10000) {
- dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
+ dev_err(&dispc->pdev->dev, "%s: framedone timeout\n",
__func__);
break;
}
}
- dispc_ovl_enable(OMAP_DSS_GFX, false);
+ dispc_ovl_enable(dispc, OMAP_DSS_GFX, false);
/* Clear all irq bits before continuing */
- dispc_clear_irqstatus(0xffffffff);
+ dispc_clear_irqstatus(dispc, 0xffffffff);
/* Restore the original state to LCD1 output gates */
- REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
static const struct dispc_ops dispc_ops = {
@@ -4497,6 +4724,8 @@ static const struct dispc_ops dispc_ops = {
.get_num_ovls = dispc_get_num_ovls,
.get_num_mgrs = dispc_get_num_mgrs,
+ .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
+
.mgr_enable = dispc_mgr_enable,
.mgr_is_enabled = dispc_mgr_is_enabled,
.mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
@@ -4514,6 +4743,12 @@ static const struct dispc_ops dispc_ops = {
.ovl_enable = dispc_ovl_enable,
.ovl_setup = dispc_ovl_setup,
.ovl_get_color_modes = dispc_ovl_get_color_modes,
+
+ .wb_get_framedone_irq = dispc_wb_get_framedone_irq,
+ .wb_setup = dispc_wb_setup,
+ .has_writeback = dispc_has_writeback,
+ .wb_go_busy = dispc_wb_go_busy,
+ .wb_go = dispc_wb_go,
};
/* DISPC HW IP initialisation */
@@ -4539,14 +4774,22 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct soc_device_attribute *soc;
+ struct dss_device *dss = dss_get_device(master);
+ struct dispc_device *dispc;
u32 rev;
int r = 0;
struct resource *dispc_mem;
struct device_node *np = pdev->dev.of_node;
- dispc.pdev = pdev;
+ dispc = kzalloc(sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
- spin_lock_init(&dispc.control_lock);
+ dispc->pdev = pdev;
+ platform_set_drvdata(pdev, dispc);
+ dispc->dss = dss;
+
+ spin_lock_init(&dispc->control_lock);
/*
* The OMAP3-based models can't be told apart using the compatible
@@ -4554,76 +4797,92 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
*/
soc = soc_device_match(dispc_soc_devices);
if (soc)
- dispc.feat = soc->data;
+ dispc->feat = soc->data;
else
- dispc.feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+ dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
- r = dispc_errata_i734_wa_init();
+ r = dispc_errata_i734_wa_init(dispc);
if (r)
- return r;
+ goto err_free;
- dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
- dispc.base = devm_ioremap_resource(&pdev->dev, dispc_mem);
- if (IS_ERR(dispc.base))
- return PTR_ERR(dispc.base);
+ dispc_mem = platform_get_resource(dispc->pdev, IORESOURCE_MEM, 0);
+ dispc->base = devm_ioremap_resource(&pdev->dev, dispc_mem);
+ if (IS_ERR(dispc->base)) {
+ r = PTR_ERR(dispc->base);
+ goto err_free;
+ }
- dispc.irq = platform_get_irq(dispc.pdev, 0);
- if (dispc.irq < 0) {
+ dispc->irq = platform_get_irq(dispc->pdev, 0);
+ if (dispc->irq < 0) {
DSSERR("platform_get_irq failed\n");
- return -ENODEV;
+ r = -ENODEV;
+ goto err_free;
}
if (np && of_property_read_bool(np, "syscon-pol")) {
- dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
- if (IS_ERR(dispc.syscon_pol)) {
+ dispc->syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (IS_ERR(dispc->syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
- return PTR_ERR(dispc.syscon_pol);
+ r = PTR_ERR(dispc->syscon_pol);
+ goto err_free;
}
if (of_property_read_u32_index(np, "syscon-pol", 1,
- &dispc.syscon_pol_offset)) {
+ &dispc->syscon_pol_offset)) {
dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_free;
}
}
- r = dispc_init_gamma_tables();
+ r = dispc_init_gamma_tables(dispc);
if (r)
- return r;
+ goto err_free;
pm_runtime_enable(&pdev->dev);
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dispc);
if (r)
goto err_runtime_get;
- _omap_dispc_initial_config();
+ _omap_dispc_initial_config(dispc);
- rev = dispc_read_reg(DISPC_REVISION);
+ rev = dispc_read_reg(dispc, DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
- dispc_set_ops(&dispc_ops);
+ dss->dispc = dispc;
+ dss->dispc_ops = &dispc_ops;
- dss_debugfs_create_file("dispc", dispc_dump_regs);
+ dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
+ dispc);
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_free:
+ kfree(dispc);
return r;
}
-static void dispc_unbind(struct device *dev, struct device *master,
- void *data)
+static void dispc_unbind(struct device *dev, struct device *master, void *data)
{
- dispc_set_ops(NULL);
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+ struct dss_device *dss = dispc->dss;
+
+ dss_debugfs_remove_file(dispc->debugfs);
+
+ dss->dispc = NULL;
+ dss->dispc_ops = NULL;
pm_runtime_disable(dev);
- dispc_errata_i734_wa_fini();
+ dispc_errata_i734_wa_fini(dispc);
+
+ kfree(dispc);
}
static const struct component_ops dispc_component_ops = {
@@ -4644,36 +4903,40 @@ static int dispc_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
- dispc.is_enabled = false;
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+
+ dispc->is_enabled = false;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
/* wait for current handler to finish before turning the DISPC off */
- synchronize_irq(dispc.irq);
+ synchronize_irq(dispc->irq);
- dispc_save_context();
+ dispc_save_context(dispc);
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+
/*
* The reset value for load mode is 0 (OMAP_DSS_LOAD_CLUT_AND_FRAME)
* but we always initialize it to 2 (OMAP_DSS_LOAD_FRAME_ONLY) in
* _omap_dispc_initial_config(). We can thus use it to detect if
* we have lost register context.
*/
- if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
- _omap_dispc_initial_config();
+ if (REG_GET(dispc, DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
+ _omap_dispc_initial_config(dispc);
- dispc_errata_i734_wa();
+ dispc_errata_i734_wa(dispc);
- dispc_restore_context();
+ dispc_restore_context(dispc);
- dispc_restore_gamma_tables();
+ dispc_restore_gamma_tables(dispc);
}
- dispc.is_enabled = true;
+ dispc->is_enabled = true;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
@@ -4685,7 +4948,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
.runtime_resume = dispc_runtime_resume,
};
-static struct platform_driver omap_dispchw_driver = {
+struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
.remove = dispc_remove,
.driver = {
@@ -4695,13 +4958,3 @@ static struct platform_driver omap_dispchw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dispc_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dispchw_driver);
-}
-
-void dispc_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dispchw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 003adce..e901dd1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -1,10 +1,7 @@
/*
- * linux/drivers/video/omap2/dss/dispc.h
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Archit Taneja <archit@ti.com>
*
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 34fad23..44804c8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/video/omap2/dss/dispc_coefs.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Chandrabhanu Mahapatra <cmahapatra@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 4227993..4241431 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/display.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -30,12 +28,11 @@
#include "omapdss.h"
-void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
{
*vm = dssdev->panel.vm;
}
-EXPORT_SYMBOL(omapdss_default_get_timings);
static LIST_HEAD(panel_list);
static DEFINE_MUTEX(panel_list_mutex);
@@ -175,17 +172,3 @@ out:
return dssdev;
}
EXPORT_SYMBOL(omap_dss_get_next_device);
-
-struct omap_dss_device *omap_dss_find_device(void *data,
- int (*match)(struct omap_dss_device *dssdev, void *data))
-{
- struct omap_dss_device *dssdev = NULL;
-
- while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) {
- if (match(dssdev, data))
- return dssdev;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(omap_dss_find_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ca1e3b48..fb1c27f 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dpi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -40,6 +38,7 @@
struct dpi_data {
struct platform_device *pdev;
enum dss_model dss_model;
+ struct dss_device *dss;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -52,8 +51,6 @@ struct dpi_data {
int data_lines;
struct omap_dss_device output;
-
- bool port_initialized;
};
static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
@@ -61,7 +58,8 @@ static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
return container_of(dssdev, struct dpi_data, output);
}
-static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
+ enum omap_channel channel)
{
/*
* Possible clock sources:
@@ -73,23 +71,23 @@ static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_1))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_1))
return DSS_CLK_SRC_PLL1_1;
break;
}
case OMAP_DSS_CHANNEL_LCD2:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL2_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_3))
return DSS_CLK_SRC_PLL2_3;
break;
}
case OMAP_DSS_CHANNEL_LCD3:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL2_1))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_1))
return DSS_CLK_SRC_PLL2_1;
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
break;
}
@@ -136,7 +134,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
}
case DSS_MODEL_DRA7:
- return dpi_get_clk_src_dra7xx(channel);
+ return dpi_get_clk_src_dra7xx(dpi, channel);
default:
return DSS_CLK_SRC_FCK;
@@ -145,7 +143,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
struct dpi_clk_calc_ctx {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
/* inputs */
@@ -193,8 +191,9 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
- return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->pll->dss->dispc, dispc,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
@@ -210,7 +209,7 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_get_max_fck_rate(),
+ ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss),
dpi_calc_hsdiv_cb, ctx);
}
@@ -220,8 +219,9 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->pll->dss->dispc, fck,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
@@ -259,7 +259,8 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
}
}
-static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
+ struct dpi_clk_calc_ctx *ctx)
{
int i;
@@ -280,7 +281,8 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
- ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx);
+ ok = dss_div_calc(dpi->dss, pck, ctx->pck_min,
+ dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
@@ -306,7 +308,7 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
@@ -324,11 +326,11 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
int r;
bool ok;
- ok = dpi_dss_clk_calc(pck_req, &ctx);
+ ok = dpi_dss_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
- r = dss_set_fck_rate(ctx.fck);
+ r = dss_set_fck_rate(dpi->dss, ctx.fck);
if (r)
return r;
@@ -343,8 +345,6 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
@@ -352,8 +352,8 @@ static int dpi_set_mode(struct dpi_data *dpi)
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
- &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
+ vm->pixelclock, &fck, &lck_div, &pck_div);
else
r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
@@ -369,16 +369,13 @@ static int dpi_set_mode(struct dpi_data *dpi)
vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&dpi->output, vm);
return 0;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
{
- struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
-
dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dpi->mgr_config.stallmode = false;
@@ -388,14 +385,13 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dpi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(channel, &dpi->mgr_config);
+ dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
int r;
mutex_lock(&dpi->lock);
@@ -412,11 +408,11 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
goto err_reg_enable;
}
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(out->port_num, channel);
+ r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel);
if (r)
goto err_src_sel;
@@ -434,7 +430,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&dpi->output);
if (r)
goto err_mgr_enable;
@@ -448,7 +444,7 @@ err_set_mode:
dss_pll_disable(dpi->pll);
err_pll_init:
err_src_sel:
- dispc_runtime_put();
+ dispc_runtime_put(dpi->dss->dispc);
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
@@ -461,18 +457,18 @@ err_no_out_mgr:
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
mutex_lock(&dpi->lock);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&dpi->output);
if (dpi->pll) {
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
- dispc_runtime_put();
+ dispc_runtime_put(dpi->dss->dispc);
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
@@ -520,7 +516,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, vm))
+ if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm))
return -EINVAL;
if (vm->pixelclock == 0)
@@ -533,7 +529,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -606,7 +602,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
dpi->clk_src = dpi_get_clk_src(dpi);
- pll = dss_pll_find_by_src(dpi->clk_src);
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
if (!pll)
return;
@@ -658,7 +654,6 @@ static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
int r;
r = dpi_init_regulator(dpi);
@@ -667,7 +662,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
dpi_init_pll(dpi);
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&dpi->output, dssdev);
if (r)
return r;
@@ -675,7 +670,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&dpi->output, dssdev);
return r;
}
@@ -686,7 +681,6 @@ static void dpi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
WARN_ON(dst != dssdev->dst);
@@ -695,7 +689,7 @@ static void dpi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&dpi->output, dssdev);
}
static const struct omapdss_dpi_ops dpi_ops = {
@@ -752,8 +746,8 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_unregister_output(out);
}
-int dpi_init_port(struct platform_device *pdev, struct device_node *port,
- enum dss_model dss_model)
+int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port, enum dss_model dss_model)
{
struct dpi_data *dpi;
struct device_node *ep;
@@ -780,14 +774,13 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port,
dpi->pdev = pdev;
dpi->dss_model = dss_model;
+ dpi->dss = dss;
port->data = dpi;
mutex_init(&dpi->lock);
dpi_init_output_port(dpi, port);
- dpi->port_initialized = true;
-
return 0;
err_datalines:
@@ -800,7 +793,7 @@ void dpi_uninit_port(struct device_node *port)
{
struct dpi_data *dpi = port->data;
- if (!dpi->port_initialized)
+ if (!dpi)
return;
dpi_uninit_output_port(port);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index c2cf6d9..d4a6806 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dsi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -121,11 +119,11 @@ struct dsi_reg { u16 module; u16 idx; };
#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
-#define REG_GET(dsidev, idx, start, end) \
- FLD_GET(dsi_read_reg(dsidev, idx), start, end)
+#define REG_GET(dsi, idx, start, end) \
+ FLD_GET(dsi_read_reg(dsi, idx), start, end)
-#define REG_FLD_MOD(dsidev, idx, val, start, end) \
- dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
+#define REG_FLD_MOD(dsi, idx, val, start, end) \
+ dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
/* Global interrupts */
#define DSI_IRQ_VC0 (1 << 0)
@@ -215,13 +213,12 @@ struct dsi_reg { u16 module; u16 idx; };
DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+struct dsi_data;
-static int dsi_display_init_dispc(struct platform_device *dsidev,
- enum omap_channel channel);
-static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- enum omap_channel channel);
+static int dsi_display_init_dispc(struct dsi_data *dsi);
+static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
+static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
/* DSI PLL HSDIV indices */
#define HSDIV_DISPC 0
@@ -271,10 +268,10 @@ enum dsi_vc_source {
struct dsi_irq_stats {
unsigned long last_reset;
- unsigned irq_count;
- unsigned dsi_irqs[32];
- unsigned vc_irqs[4][32];
- unsigned cio_irqs[32];
+ unsigned int irq_count;
+ unsigned int dsi_irqs[32];
+ unsigned int vc_irqs[4][32];
+ unsigned int cio_irqs[32];
};
struct dsi_isr_tables {
@@ -284,7 +281,7 @@ struct dsi_isr_tables {
};
struct dsi_clk_calc_ctx {
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
struct dss_pll *pll;
/* inputs */
@@ -331,7 +328,7 @@ struct dsi_of_data {
};
struct dsi_data {
- struct platform_device *pdev;
+ struct device *dev;
void __iomem *proto_base;
void __iomem *phy_base;
void __iomem *pll_base;
@@ -345,6 +342,7 @@ struct dsi_data {
struct clk *dss_clk;
struct regmap *syscon;
+ struct dss_device *dss;
struct dispc_clock_info user_dispc_cinfo;
struct dss_pll_clock_info user_dsi_cinfo;
@@ -375,7 +373,7 @@ struct dsi_data {
int update_channel;
#ifdef DSI_PERF_MEASURE
- unsigned update_bytes;
+ unsigned int update_bytes;
#endif
bool te_enabled;
@@ -402,19 +400,23 @@ struct dsi_data {
#endif
int debug_read;
int debug_write;
+ struct {
+ struct dss_debugfs_entry *irqs;
+ struct dss_debugfs_entry *regs;
+ } debugfs;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spinlock_t irq_stats_lock;
struct dsi_irq_stats irq_stats;
#endif
- unsigned num_lanes_supported;
- unsigned line_buffer_size;
+ unsigned int num_lanes_supported;
+ unsigned int line_buffer_size;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
- unsigned num_lanes_used;
+ unsigned int num_lanes_used;
- unsigned scp_clk_refcount;
+ unsigned int scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
struct videomode vm;
@@ -426,7 +428,7 @@ struct dsi_data {
};
struct dsi_packet_sent_handler_data {
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
struct completion *completion;
};
@@ -435,17 +437,12 @@ static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
-static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
+static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
- return dev_get_drvdata(&dsidev->dev);
+ return dev_get_drvdata(dssdev->dev);
}
-static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
-{
- return to_platform_device(dssdev->dev);
-}
-
-static struct platform_device *dsi_get_dsidev_from_id(int module)
+static struct dsi_data *dsi_get_dsi_from_id(int module)
{
struct omap_dss_device *out;
enum omap_dss_output_id id;
@@ -463,13 +460,12 @@ static struct platform_device *dsi_get_dsidev_from_id(int module)
out = omap_dss_get_output(id);
- return out ? to_platform_device(out->dev) : NULL;
+ return out ? to_dsi_data(out) : NULL;
}
-static inline void dsi_write_reg(struct platform_device *dsidev,
- const struct dsi_reg idx, u32 val)
+static inline void dsi_write_reg(struct dsi_data *dsi,
+ const struct dsi_reg idx, u32 val)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
@@ -482,10 +478,8 @@ static inline void dsi_write_reg(struct platform_device *dsidev,
__raw_writel(val, base + idx.idx);
}
-static inline u32 dsi_read_reg(struct platform_device *dsidev,
- const struct dsi_reg idx)
+static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
@@ -500,24 +494,20 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
static void dsi_bus_lock(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
down(&dsi->bus_lock);
}
static void dsi_bus_unlock(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
up(&dsi->bus_lock);
}
-static bool dsi_bus_is_locked(struct platform_device *dsidev)
+static bool dsi_bus_is_locked(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->bus_lock.count == 0;
}
@@ -526,8 +516,9 @@ static void dsi_completion_handler(void *data, u32 mask)
complete((struct completion *)data);
}
-static inline int wait_for_bit_change(struct platform_device *dsidev,
- const struct dsi_reg idx, int bitnum, int value)
+static inline bool wait_for_bit_change(struct dsi_data *dsi,
+ const struct dsi_reg idx,
+ int bitnum, int value)
{
unsigned long timeout;
ktime_t wait;
@@ -536,22 +527,22 @@ static inline int wait_for_bit_change(struct platform_device *dsidev,
/* first busyloop to see if the bit changes right away */
t = 100;
while (t-- > 0) {
- if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
- return value;
+ if (REG_GET(dsi, idx, bitnum, bitnum) == value)
+ return true;
}
/* then loop for 500ms, sleeping for 1ms in between */
timeout = jiffies + msecs_to_jiffies(500);
while (time_before(jiffies, timeout)) {
- if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
- return value;
+ if (REG_GET(dsi, idx, bitnum, bitnum) == value)
+ return true;
wait = ns_to_ktime(1000 * 1000);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
}
- return !value;
+ return false;
}
static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
@@ -571,21 +562,18 @@ static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
}
#ifdef DSI_PERF_MEASURE
-static void dsi_perf_mark_setup(struct platform_device *dsidev)
+static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_setup_time = ktime_get();
}
-static void dsi_perf_mark_start(struct platform_device *dsidev)
+static void dsi_perf_mark_start(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_start_time = ktime_get();
}
-static void dsi_perf_show(struct platform_device *dsidev, const char *name)
+static void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
@@ -619,16 +607,15 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
-static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+static inline void dsi_perf_mark_setup(struct dsi_data *dsi)
{
}
-static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+static inline void dsi_perf_mark_start(struct dsi_data *dsi)
{
}
-static inline void dsi_perf_show(struct platform_device *dsidev,
- const char *name)
+static inline void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
}
#endif
@@ -725,10 +712,9 @@ static void print_irq_status_cio(u32 status)
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
- u32 *vcstatus, u32 ciostatus)
+static void dsi_collect_irq_stats(struct dsi_data *dsi, u32 irqstatus,
+ u32 *vcstatus, u32 ciostatus)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
spin_lock(&dsi->irq_stats_lock);
@@ -744,15 +730,14 @@ static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
spin_unlock(&dsi->irq_stats_lock);
}
#else
-#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
+#define dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus)
#endif
static int debug_irq;
-static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
- u32 *vcstatus, u32 ciostatus)
+static void dsi_handle_irq_errors(struct dsi_data *dsi, u32 irqstatus,
+ u32 *vcstatus, u32 ciostatus)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
if (irqstatus & DSI_IRQ_ERROR_MASK) {
@@ -784,7 +769,7 @@ static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
}
static void dsi_call_isrs(struct dsi_isr_data *isr_array,
- unsigned isr_array_size, u32 irqstatus)
+ unsigned int isr_array_size, u32 irqstatus)
{
struct dsi_isr_data *isr_data;
int i;
@@ -821,20 +806,16 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
- struct platform_device *dsidev;
- struct dsi_data *dsi;
+ struct dsi_data *dsi = arg;
u32 irqstatus, vcstatus[4], ciostatus;
int i;
- dsidev = (struct platform_device *) arg;
- dsi = dsi_get_dsidrv_data(dsidev);
-
if (!dsi->is_enabled)
return IRQ_NONE;
spin_lock(&dsi->irq_lock);
- irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
+ irqstatus = dsi_read_reg(dsi, DSI_IRQSTATUS);
/* IRQ is not for us */
if (!irqstatus) {
@@ -842,9 +823,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
- dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ dsi_write_reg(dsi, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_IRQSTATUS);
+ dsi_read_reg(dsi, DSI_IRQSTATUS);
for (i = 0; i < 4; ++i) {
if ((irqstatus & (1 << i)) == 0) {
@@ -852,19 +833,19 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
continue;
}
- vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
+ vcstatus[i] = dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
- dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
+ dsi_write_reg(dsi, DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
+ dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
}
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
- ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
+ ciostatus = dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+ dsi_write_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
} else {
ciostatus = 0;
}
@@ -883,19 +864,20 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
- dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
+ dsi_handle_irq_errors(dsi, irqstatus, vcstatus, ciostatus);
- dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
+ dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus);
return IRQ_HANDLED;
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
- struct dsi_isr_data *isr_array,
- unsigned isr_array_size, u32 default_mask,
- const struct dsi_reg enable_reg,
- const struct dsi_reg status_reg)
+static void _omap_dsi_configure_irqs(struct dsi_data *dsi,
+ struct dsi_isr_data *isr_array,
+ unsigned int isr_array_size,
+ u32 default_mask,
+ const struct dsi_reg enable_reg,
+ const struct dsi_reg status_reg)
{
struct dsi_isr_data *isr_data;
u32 mask;
@@ -913,54 +895,48 @@ static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
mask |= isr_data->mask;
}
- old_mask = dsi_read_reg(dsidev, enable_reg);
+ old_mask = dsi_read_reg(dsi, enable_reg);
/* clear the irqstatus for newly enabled irqs */
- dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
- dsi_write_reg(dsidev, enable_reg, mask);
+ dsi_write_reg(dsi, status_reg, (mask ^ old_mask) & mask);
+ dsi_write_reg(dsi, enable_reg, mask);
/* flush posted writes */
- dsi_read_reg(dsidev, enable_reg);
- dsi_read_reg(dsidev, status_reg);
+ dsi_read_reg(dsi, enable_reg);
+ dsi_read_reg(dsi, status_reg);
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs(struct platform_device *dsidev)
+static void _omap_dsi_set_irqs(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 mask = DSI_IRQ_ERROR_MASK;
#ifdef DSI_CATCH_MISSING_TE
mask |= DSI_IRQ_TE_TRIGGER;
#endif
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
DSI_IRQENABLE, DSI_IRQSTATUS);
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
+static void _omap_dsi_set_irqs_vc(struct dsi_data *dsi, int vc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
DSI_VC_IRQ_ERROR_MASK,
DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
+static void _omap_dsi_set_irqs_cio(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_cio,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
DSI_CIO_IRQ_ERROR_MASK,
DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
}
-static void _dsi_initialize_irq(struct platform_device *dsidev)
+static void _dsi_initialize_irq(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int vc;
@@ -968,16 +944,16 @@ static void _dsi_initialize_irq(struct platform_device *dsidev)
memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
for (vc = 0; vc < 4; ++vc)
- _omap_dsi_set_irqs_vc(dsidev, vc);
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_vc(dsi, vc);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
}
static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
- struct dsi_isr_data *isr_array, unsigned isr_array_size)
+ struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int free_idx;
@@ -1011,7 +987,7 @@ static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
}
static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
- struct dsi_isr_data *isr_array, unsigned isr_array_size)
+ struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int i;
@@ -1032,10 +1008,9 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
return -EINVAL;
}
-static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
- void *arg, u32 mask)
+static int dsi_register_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1045,17 +1020,16 @@ static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1065,17 +1039,16 @@ static int dsi_unregister_isr(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+ omap_dsi_isr_t isr, void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1086,17 +1059,16 @@ static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsidev, channel);
+ _omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+ omap_dsi_isr_t isr, void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1107,17 +1079,16 @@ static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsidev, channel);
+ _omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_register_isr_cio(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1127,17 +1098,16 @@ static int dsi_register_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_cio(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1147,18 +1117,18 @@ static int dsi_unregister_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static u32 dsi_get_errors(struct platform_device *dsidev)
+static u32 dsi_get_errors(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
u32 e;
+
spin_lock_irqsave(&dsi->errors_lock, flags);
e = dsi->errors;
dsi->errors = 0;
@@ -1166,38 +1136,35 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-static int dsi_runtime_get(struct platform_device *dsidev)
+static int dsi_runtime_get(struct dsi_data *dsi)
{
int r;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DSSDBG("dsi_runtime_get\n");
- r = pm_runtime_get_sync(&dsi->pdev->dev);
+ r = pm_runtime_get_sync(dsi->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-static void dsi_runtime_put(struct platform_device *dsidev)
+static void dsi_runtime_put(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
DSSDBG("dsi_runtime_put\n");
- r = pm_runtime_put_sync(&dsi->pdev->dev);
+ r = pm_runtime_put_sync(dsi->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
-static int dsi_regulator_init(struct platform_device *dsidev)
+static int dsi_regulator_init(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
if (dsi->vdds_dsi_reg != NULL)
return 0;
- vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
+ vdds_dsi = devm_regulator_get(dsi->dev, "vdd");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
@@ -1210,16 +1177,15 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return 0;
}
-static void _dsi_print_reset_status(struct platform_device *dsidev)
+static void _dsi_print_reset_status(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 l;
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
@@ -1232,7 +1198,7 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
}
#define DSI_FLD_GET(fld, start, end)\
- FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
+ FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end)
pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
DSI_FLD_GET(PLL_STATUS, 0, 0),
@@ -1247,53 +1213,48 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
#undef DSI_FLD_GET
}
-static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
+static inline int dsi_if_enable(struct dsi_data *dsi, bool enable)
{
DSSDBG("dsi_if_enable(%d)\n", enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
+ REG_FLD_MOD(dsi, DSI_CTRL, enable, 0, 0); /* IF_EN */
- if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
- DSSERR("Failed to set dsi_if_enable to %d\n", enable);
- return -EIO;
+ if (!wait_for_bit_change(dsi, DSI_CTRL, 0, enable)) {
+ DSSERR("Failed to set dsi_if_enable to %d\n", enable);
+ return -EIO;
}
return 0;
}
-static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
+static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkout[HSDIV_DISPC];
}
-static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
+static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkout[HSDIV_DSI];
}
-static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
+static unsigned long dsi_get_txbyteclkhs(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkdco / 16;
}
-static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
+static unsigned long dsi_fclk_rate(struct dsi_data *dsi)
{
unsigned long r;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum dss_clk_source source;
- if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
+ source = dss_get_dsi_clk_source(dsi->dss, dsi->module_id);
+ if (source == DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
- r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
+ r = dsi_get_pll_hsdiv_dsi_rate(dsi);
}
return r;
@@ -1303,7 +1264,7 @@ static int dsi_lp_clock_calc(unsigned long dsi_fclk,
unsigned long lp_clk_min, unsigned long lp_clk_max,
struct dsi_lp_clock_info *lp_cinfo)
{
- unsigned lp_clk_div;
+ unsigned int lp_clk_div;
unsigned long lp_clk;
lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
@@ -1318,13 +1279,12 @@ static int dsi_lp_clock_calc(unsigned long dsi_fclk,
return 0;
}
-static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
+static int dsi_set_lp_clk_divisor(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long dsi_fclk;
- unsigned lp_clk_div;
+ unsigned int lp_clk_div;
unsigned long lp_clk;
- unsigned lpdiv_max = dsi->data->max_pll_lpdiv;
+ unsigned int lpdiv_max = dsi->data->max_pll_lpdiv;
lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
@@ -1332,7 +1292,7 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
return -EINVAL;
- dsi_fclk = dsi_fclk_rate(dsidev);
+ dsi_fclk = dsi_fclk_rate(dsi);
lp_clk = dsi_fclk / 2 / lp_clk_div;
@@ -1341,29 +1301,25 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
/* LP_CLK_DIVISOR */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, lp_clk_div, 12, 0);
/* LP_RX_SYNCHRO_ENABLE */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
return 0;
}
-static void dsi_enable_scp_clk(struct platform_device *dsidev)
+static void dsi_enable_scp_clk(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->scp_clk_refcount++ == 0)
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
}
-static void dsi_disable_scp_clk(struct platform_device *dsidev)
+static void dsi_disable_scp_clk(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
WARN_ON(dsi->scp_clk_refcount == 0);
if (--dsi->scp_clk_refcount == 0)
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
}
enum dsi_pll_power_state {
@@ -1373,10 +1329,8 @@ enum dsi_pll_power_state {
DSI_PLL_POWER_ON_DIV = 0x3,
};
-static int dsi_pll_power(struct platform_device *dsidev,
- enum dsi_pll_power_state state)
+static int dsi_pll_power(struct dsi_data *dsi, enum dsi_pll_power_state state)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t = 0;
/* DSI-PLL power command 0x3 is not working */
@@ -1385,10 +1339,10 @@ static int dsi_pll_power(struct platform_device *dsidev,
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, state, 31, 30);
/* PLL_PWR_STATUS */
- while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
+ while (FLD_GET(dsi_read_reg(dsi, DSI_CLK_CTRL), 29, 28) != state) {
if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
@@ -1415,23 +1369,22 @@ static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
static int dsi_pll_enable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
- struct platform_device *dsidev = dsi->pdev;
int r = 0;
DSSDBG("PLL init\n");
- r = dsi_regulator_init(dsidev);
+ r = dsi_regulator_init(dsi);
if (r)
return r;
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
return r;
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
*/
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
if (!dsi->vdds_dsi_enabled) {
r = regulator_enable(dsi->vdds_dsi_reg);
@@ -1441,20 +1394,20 @@ static int dsi_pll_enable(struct dss_pll *pll)
}
/* XXX PLL does not come out of reset without this... */
- dispc_pck_free_enable(1);
+ dispc_pck_free_enable(dsi->dss->dispc, 1);
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_PLL_STATUS, 0, 1)) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dsi->dss->dispc, 0);
goto err1;
}
/* XXX ... but if left on, we get problems when planes do not
* fill the whole display. No idea about this */
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dsi->dss->dispc, 0);
- r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
+ r = dsi_pll_power(dsi, DSI_PLL_POWER_ON_ALL);
if (r)
goto err1;
@@ -1468,24 +1421,22 @@ err1:
dsi->vdds_dsi_enabled = false;
}
err0:
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
return r;
}
-static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
+static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
+ dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
if (disconnect_lanes) {
WARN_ON(!dsi->vdds_dsi_enabled);
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
DSSDBG("PLL uninit done\n");
}
@@ -1493,24 +1444,21 @@ static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes
static void dsi_pll_disable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
- struct platform_device *dsidev = dsi->pdev;
- dsi_pll_uninit(dsidev, true);
+ dsi_pll_uninit(dsi, true);
}
-static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
struct dss_pll *pll = &dsi->pll;
- dispc_clk_src = dss_get_dispc_clk_source();
- dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
+ dispc_clk_src = dss_get_dispc_clk_source(dsi->dss);
+ dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
- if (dsi_runtime_get(dsidev))
+ if (dsi_runtime_get(dsi))
return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1545,35 +1493,33 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "dsi fclk source = %s\n",
dss_get_clk_source_name(dsi_clk_src));
- seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
+ seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsi));
seq_printf(s, "DDR_CLK\t\t%lu\n",
cinfo->clkdco / 4);
- seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
+ seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsi));
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
}
void dsi_dump_clocks(struct seq_file *s)
{
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
int i;
for (i = 0; i < MAX_NUM_DSI; i++) {
- dsidev = dsi_get_dsidev_from_id(i);
- if (dsidev)
- dsi_dump_dsidev_clocks(dsidev, s);
+ dsi = dsi_get_dsi_from_id(i);
+ if (dsi)
+ dsi_dump_dsi_clocks(dsi, s);
}
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
struct dsi_irq_stats stats;
@@ -1659,29 +1605,30 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#undef PIS
}
-static void dsi1_dump_irqs(struct seq_file *s)
+static int dsi1_dump_irqs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(0);
- dsi_dump_dsidev_irqs(dsidev, s);
+ dsi_dump_dsi_irqs(dsi, s);
+ return 0;
}
-static void dsi2_dump_irqs(struct seq_file *s)
+static int dsi2_dump_irqs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsidev_irqs(dsidev, s);
+ dsi_dump_dsi_irqs(dsi, s);
+ return 0;
}
#endif
-static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
- if (dsi_runtime_get(dsidev))
+ if (dsi_runtime_get(dsi))
return;
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
@@ -1753,23 +1700,25 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
#undef DUMPREG
}
-static void dsi1_dump_regs(struct seq_file *s)
+static int dsi1_dump_regs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(0);
- dsi_dump_dsidev_regs(dsidev, s);
+ dsi_dump_dsi_regs(dsi, s);
+ return 0;
}
-static void dsi2_dump_regs(struct seq_file *s)
+static int dsi2_dump_regs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsidev_regs(dsidev, s);
+ dsi_dump_dsi_regs(dsi, s);
+ return 0;
}
enum dsi_cio_power_state {
@@ -1778,16 +1727,15 @@ enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_ULPS = 0x2,
};
-static int dsi_cio_power(struct platform_device *dsidev,
- enum dsi_cio_power_state state)
+static int dsi_cio_power(struct dsi_data *dsi, enum dsi_cio_power_state state)
{
int t = 0;
/* PWR_CMD */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG1, state, 28, 27);
/* PWR_STATUS */
- while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
+ while (FLD_GET(dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1),
26, 25) != state) {
if (++t > 1000) {
DSSERR("failed to set complexio power state to "
@@ -1800,9 +1748,8 @@ static int dsi_cio_power(struct platform_device *dsidev,
return 0;
}
-static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
+static unsigned int dsi_get_line_buf_size(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
@@ -1812,7 +1759,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
- val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
+ val = REG_GET(dsi, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
switch (val) {
case 1:
@@ -1835,9 +1782,8 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
}
}
-static int dsi_set_lane_config(struct platform_device *dsidev)
+static int dsi_set_lane_config(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
static const u8 offsets[] = { 0, 4, 8, 12, 16 };
static const enum dsi_lane_function functions[] = {
DSI_LANE_CLK,
@@ -1849,12 +1795,12 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
u32 r;
int i;
- r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
+ r = dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1);
for (i = 0; i < dsi->num_lanes_used; ++i) {
- unsigned offset = offsets[i];
- unsigned polarity, lane_number;
- unsigned t;
+ unsigned int offset = offsets[i];
+ unsigned int polarity, lane_number;
+ unsigned int t;
for (t = 0; t < dsi->num_lanes_supported; ++t)
if (dsi->lanes[t].function == functions[i])
@@ -1872,37 +1818,34 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
/* clear the unused lanes */
for (; i < dsi->num_lanes_supported; ++i) {
- unsigned offset = offsets[i];
+ unsigned int offset = offsets[i];
r = FLD_MOD(r, 0, offset + 2, offset);
r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
- dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
+ dsi_write_reg(dsi, DSI_COMPLEXIO_CFG1, r);
return 0;
}
-static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
+static inline unsigned int ns2ddr(struct dsi_data *dsi, unsigned int ns)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* convert time in ns to ddr ticks, rounding up */
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
+
return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}
-static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
+static inline unsigned int ddr2ns(struct dsi_data *dsi, unsigned int ddr)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
+
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
-static void dsi_cio_timings(struct platform_device *dsidev)
+static void dsi_cio_timings(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
@@ -1913,54 +1856,54 @@ static void dsi_cio_timings(struct platform_device *dsidev)
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
- ths_prepare = ns2ddr(dsidev, 70) + 2;
+ ths_prepare = ns2ddr(dsi, 70) + 2;
/* min 145ns + 10*UI */
- ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
+ ths_prepare_ths_zero = ns2ddr(dsi, 175) + 2;
/* min max(8*UI, 60ns+4*UI) */
- ths_trail = ns2ddr(dsidev, 60) + 5;
+ ths_trail = ns2ddr(dsi, 60) + 5;
/* min 100ns */
- ths_exit = ns2ddr(dsidev, 145);
+ ths_exit = ns2ddr(dsi, 145);
/* tlpx min 50n */
- tlpx_half = ns2ddr(dsidev, 25);
+ tlpx_half = ns2ddr(dsi, 25);
/* min 60ns */
- tclk_trail = ns2ddr(dsidev, 60) + 2;
+ tclk_trail = ns2ddr(dsi, 60) + 2;
/* min 38ns, max 95ns */
- tclk_prepare = ns2ddr(dsidev, 65);
+ tclk_prepare = ns2ddr(dsi, 65);
/* min tclk-prepare + tclk-zero = 300ns */
- tclk_zero = ns2ddr(dsidev, 260);
+ tclk_zero = ns2ddr(dsi, 260);
DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
- ths_prepare, ddr2ns(dsidev, ths_prepare),
- ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
+ ths_prepare, ddr2ns(dsi, ths_prepare),
+ ths_prepare_ths_zero, ddr2ns(dsi, ths_prepare_ths_zero));
DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
- ths_trail, ddr2ns(dsidev, ths_trail),
- ths_exit, ddr2ns(dsidev, ths_exit));
+ ths_trail, ddr2ns(dsi, ths_trail),
+ ths_exit, ddr2ns(dsi, ths_exit));
DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
"tclk_zero %u (%uns)\n",
- tlpx_half, ddr2ns(dsidev, tlpx_half),
- tclk_trail, ddr2ns(dsidev, tclk_trail),
- tclk_zero, ddr2ns(dsidev, tclk_zero));
+ tlpx_half, ddr2ns(dsi, tlpx_half),
+ tclk_trail, ddr2ns(dsi, tclk_trail),
+ tclk_zero, ddr2ns(dsi, tclk_zero));
DSSDBG("tclk_prepare %u (%uns)\n",
- tclk_prepare, ddr2ns(dsidev, tclk_prepare));
+ tclk_prepare, ddr2ns(dsi, tclk_prepare));
/* program timings */
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
r = FLD_MOD(r, ths_prepare, 31, 24);
r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
r = FLD_MOD(r, ths_trail, 15, 8);
r = FLD_MOD(r, ths_exit, 7, 0);
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG0, r);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
r = FLD_MOD(r, tlpx_half, 20, 16);
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
@@ -1971,18 +1914,18 @@ static void dsi_cio_timings(struct platform_device *dsidev)
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
}
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG1, r);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
r = FLD_MOD(r, tclk_prepare, 7, 0);
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
- unsigned mask_p, unsigned mask_n)
+static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
+ unsigned int mask_p,
+ unsigned int mask_n)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u32 l;
u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
@@ -1990,7 +1933,7 @@ static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
l = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
- unsigned p = dsi->lanes[i].polarity;
+ unsigned int p = dsi->lanes[i].polarity;
if (mask_p & (1 << i))
l |= 1 << (i * 2 + (p ? 0 : 1));
@@ -2011,26 +1954,25 @@ static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
/* Set the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
/* Enable lane override */
/* ENLPTXSCPDAT */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
}
-static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
+static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
{
/* Disable lane override */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
/* Reset the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
}
-static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
+static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t, i;
bool in_use[DSI_MAX_NR_LANES];
static const u8 offsets_old[] = { 28, 27, 26 };
@@ -2050,7 +1992,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
u32 l;
int ok;
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
ok = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
@@ -2077,10 +2019,9 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
}
/* return bitmask of enabled lanes, lane0 being the lsb */
-static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
+static unsigned int dsi_get_lane_mask(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned mask = 0;
+ unsigned int mask = 0;
int i;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
@@ -2168,45 +2109,44 @@ static void dsi_disable_pads(struct dsi_data *dsi)
dsi_omap5_mux_pads(dsi, 0);
}
-static int dsi_cio_init(struct platform_device *dsidev)
+static int dsi_cio_init(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
u32 l;
DSSDBG("DSI CIO init starts");
- r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
+ r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsi));
if (r)
return r;
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
- if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_DSIPHY_CFG5, 30, 1)) {
DSSERR("CIO SCP Clock domain not coming out of reset.\n");
r = -EIO;
goto err_scp_clk_dom;
}
- r = dsi_set_lane_config(dsidev);
+ r = dsi_set_lane_config(dsi);
if (r)
goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
- l = dsi_read_reg(dsidev, DSI_TIMING1);
+ l = dsi_read_reg(dsi, DSI_TIMING1);
l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, l);
+ dsi_write_reg(dsi, DSI_TIMING1, l);
if (dsi->ulps_enabled) {
- unsigned mask_p;
+ unsigned int mask_p;
int i;
DSSDBG("manual ulps exit\n");
@@ -2228,24 +2168,24 @@ static int dsi_cio_init(struct platform_device *dsidev)
mask_p |= 1 << i;
}
- dsi_cio_enable_lane_override(dsidev, mask_p, 0);
+ dsi_cio_enable_lane_override(dsi, mask_p, 0);
}
- r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
+ r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
- if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_COMPLEXIO_CFG1, 29, 1)) {
DSSERR("CIO PWR clock domain not coming out of reset.\n");
r = -ENODEV;
goto err_cio_pwr_dom;
}
- dsi_if_enable(dsidev, true);
- dsi_if_enable(dsidev, false);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
+ dsi_if_enable(dsi, true);
+ dsi_if_enable(dsi, false);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
- r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
+ r = dsi_cio_wait_tx_clk_esc_reset(dsi);
if (r)
goto err_tx_clk_esc_rst;
@@ -2257,17 +2197,17 @@ static int dsi_cio_init(struct platform_device *dsidev)
/* Disable the override. The lanes should be set to Mark-11
* state by the HW */
- dsi_cio_disable_lane_override(dsidev);
+ dsi_cio_disable_lane_override(dsi);
}
/* FORCE_TX_STOP_MODE_IO */
- REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
+ REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
- dsi_cio_timings(dsidev);
+ dsi_cio_timings(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
/* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL,
dsi->vm_timings.ddr_clk_always_on, 13, 13);
}
@@ -2278,35 +2218,32 @@ static int dsi_cio_init(struct platform_device *dsidev)
return 0;
err_tx_clk_esc_rst:
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
err_cio_pwr_dom:
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
if (dsi->ulps_enabled)
- dsi_cio_disable_lane_override(dsidev);
+ dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
- dsi_disable_scp_clk(dsidev);
+ dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
return r;
}
-static void dsi_cio_uninit(struct platform_device *dsidev)
+static void dsi_cio_uninit(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
- dsi_disable_scp_clk(dsidev);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
+ dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
}
-static void dsi_config_tx_fifo(struct platform_device *dsidev,
- enum fifo_size size1, enum fifo_size size2,
- enum fifo_size size3, enum fifo_size size4)
+static void dsi_config_tx_fifo(struct dsi_data *dsi,
+ enum fifo_size size1, enum fifo_size size2,
+ enum fifo_size size3, enum fifo_size size4)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
@@ -2332,14 +2269,13 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
add += size;
}
- dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
+ dsi_write_reg(dsi, DSI_TX_FIFO_VC_SIZE, r);
}
-static void dsi_config_rx_fifo(struct platform_device *dsidev,
+static void dsi_config_rx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
@@ -2365,18 +2301,18 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
add += size;
}
- dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
+ dsi_write_reg(dsi, DSI_RX_FIFO_VC_SIZE, r);
}
-static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
+static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
{
u32 r;
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
- if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
+ if (!wait_for_bit_change(dsi, DSI_TIMING1, 15, 0)) {
DSSERR("TX_STOP bit not going down\n");
return -EIO;
}
@@ -2384,29 +2320,28 @@ static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
return 0;
}
-static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
{
- return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
+ return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
+ struct dsi_data *dsi = vp_data->dsi;
const int channel = dsi->update_channel;
u8 bit = dsi->te_enabled ? 30 : 31;
- if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
+ if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
complete(vp_data->completion);
}
-static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
- .dsidev = dsidev,
+ .dsi = dsi,
.completion = &completion
};
int r = 0;
@@ -2414,13 +2349,13 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
bit = dsi->te_enabled ? 30 : 31;
- r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
- if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
+ if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
@@ -2429,12 +2364,12 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
}
}
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
@@ -2444,29 +2379,29 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
+ struct dsi_data *dsi = l4_data->dsi;
const int channel = dsi->update_channel;
- if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
complete(l4_data->completion);
}
-static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
- .dsidev = dsidev,
+ .dsi = dsi,
.completion = &completion
};
int r = 0;
- r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
@@ -2475,66 +2410,61 @@ static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
}
}
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
-static int dsi_sync_vc(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
- if (!dsi_vc_is_enabled(dsidev, channel))
+ if (!dsi_vc_is_enabled(dsi, channel))
return 0;
switch (dsi->vc[channel].source) {
case DSI_VC_SOURCE_VP:
- return dsi_sync_vc_vp(dsidev, channel);
+ return dsi_sync_vc_vp(dsi, channel);
case DSI_VC_SOURCE_L4:
- return dsi_sync_vc_l4(dsidev, channel);
+ return dsi_sync_vc_l4(dsi, channel);
default:
BUG();
return -EINVAL;
}
}
-static int dsi_vc_enable(struct platform_device *dsidev, int channel,
- bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
{
DSSDBG("dsi_vc_enable channel %d, enable %d\n",
channel, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
- if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
- 0, enable) != enable) {
- DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
- return -EIO;
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+ DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
+ return -EIO;
}
return 0;
}
-static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
DSSDBG("Initial config of virtual channel %d", channel);
- r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+ r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
@@ -2553,41 +2483,39 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
+ dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
dsi->vc[channel].source = DSI_VC_SOURCE_L4;
}
-static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
- enum dsi_vc_source source)
+static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
+ enum dsi_vc_source source)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->vc[channel].source == source)
return 0;
DSSDBG("Source config of virtual channel %d", channel);
- dsi_sync_vc(dsidev, channel);
+ dsi_sync_vc(dsi, channel);
- dsi_vc_enable(dsidev, channel, 0);
+ dsi_vc_enable(dsi, channel, 0);
/* VC_BUSY */
- if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
DSSERR("vc(%d) busy when trying to config for VP\n", channel);
return -EIO;
}
/* SOURCE, 0 = L4, 1 = video port */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
bool enable = source == DSI_VC_SOURCE_VP;
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
}
- dsi_vc_enable(dsidev, channel, 1);
+ dsi_vc_enable(dsi, channel, 1);
dsi->vc[channel].source = source;
@@ -2597,33 +2525,32 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
- dsi_vc_enable(dsidev, channel, 0);
- dsi_if_enable(dsidev, 0);
+ dsi_vc_enable(dsi, channel, 0);
+ dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
- dsi_vc_enable(dsidev, channel, 1);
- dsi_if_enable(dsidev, 1);
+ dsi_vc_enable(dsi, channel, 1);
+ dsi_if_enable(dsi, 1);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_force_tx_stop_mode_io(dsi);
/* start the DDR clock by sending a NULL packet */
if (dsi->vm_timings.ddr_clk_always_on && enable)
- dsi_vc_send_null(dssdev, channel);
+ dsi_vc_send_null(dsi, channel);
}
-static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
{
- while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2669,14 +2596,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
- int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2691,7 +2617,7 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(dsidev, channel);
+ dsi_vc_flush_long_data(dsi, channel);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -2699,47 +2625,45 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->debug_write || dsi->debug_read)
DSSDBG("dsi_vc_send_bta %d\n", channel);
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(dsidev, channel);
+ dsi_vc_flush_receive_data(dsi, channel);
}
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
/* flush posted write */
- dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+ dsi_read_reg(dsi, DSI_VC_CTRL(channel));
return 0;
}
static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
+ r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
- r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
+ r = dsi_register_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
if (r)
goto err1;
- r = dsi_vc_send_bta(dsidev, channel);
+ r = dsi_vc_send_bta(dsi, channel);
if (r)
goto err2;
@@ -2750,41 +2674,40 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
goto err2;
}
- err = dsi_get_errors(dsidev);
+ err = dsi_get_errors(dsi);
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
goto err2;
}
err2:
- dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
+ dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
-static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
- int channel, u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
+ u8 data_type, u16 len, u8 ecc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 data_id;
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
data_id = data_type | dsi->vc[channel].vc_id << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
}
-static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
- int channel, u8 b1, u8 b2, u8 b3, u8 b4)
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+ u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
@@ -2793,14 +2716,13 @@ static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
- dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
}
-static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
- u8 data_type, u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
+ u8 *data, u16 len, u8 ecc)
{
/*u32 val; */
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u8 *p;
int r = 0;
@@ -2815,9 +2737,9 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
return -EINVAL;
}
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
- dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
+ dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
p = data;
for (i = 0; i < len >> 2; i++) {
@@ -2829,7 +2751,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
+ dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
}
i = len % 4;
@@ -2854,29 +2776,28 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
break;
}
- dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
- u8 data_type, u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
+ u16 data, u8 ecc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u8 data_id;
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
channel,
data_type, data & 0xff, (data >> 8) & 0xff);
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
- if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
+ if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
@@ -2885,41 +2806,39 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
r = (data_id << 0) | (data << 8) | (ecc << 24);
- dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
return 0;
}
-static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-
- return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
- 0, 0);
+ return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
}
-static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
- int channel, u8 *data, int len, enum dss_dsi_content_type type)
+static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
+ u8 *data, int len,
+ enum dss_dsi_content_type type)
{
int r;
if (len == 0) {
BUG_ON(type == DSS_DSI_CONTENT_DCS);
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
} else if (len == 1) {
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
} else if (len == 2) {
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
data[0] | (data[1] << 8), 0);
} else {
- r = dsi_vc_send_long(dsidev, channel,
+ r = dsi_vc_send_long(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
@@ -2931,28 +2850,29 @@ static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_write_nosync_common(dsidev, channel, data, len,
+ return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_DCS);
}
static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_write_nosync_common(dsidev, channel, data, len,
+ return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_GENERIC);
}
-static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len, enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev,
+ int channel, u8 *data, int len,
+ enum dss_dsi_content_type type)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
+ r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
if (r)
goto err;
@@ -2961,9 +2881,9 @@ static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
goto err;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty after write, dumping data:\n");
- dsi_vc_flush_receive_data(dsidev, channel);
+ dsi_vc_flush_receive_data(dsi, channel);
r = -EIO;
goto err;
}
@@ -2989,17 +2909,16 @@ static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8
DSS_DSI_CONTENT_GENERIC);
}
-static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
- int channel, u8 dcs_cmd)
+static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
+ u8 dcs_cmd)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
if (dsi->debug_read)
DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
channel, dcs_cmd);
- r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
if (r) {
DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
" failed\n", channel, dcs_cmd);
@@ -3009,10 +2928,9 @@ static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
- int channel, u8 *reqdata, int reqlen)
+static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
+ u8 *reqdata, int reqlen)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 data;
u8 data_type;
int r;
@@ -3035,7 +2953,7 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return -EINVAL;
}
- r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
+ r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
if (r) {
DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
" failed\n", channel, reqlen);
@@ -3045,22 +2963,21 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
- u8 *buf, int buflen, enum dss_dsi_content_type type)
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+ int buflen, enum dss_dsi_content_type type)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 dt;
int r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
@@ -3123,7 +3040,7 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
int b;
- val = dsi_read_reg(dsidev,
+ val = dsi_read_reg(dsi,
DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
@@ -3157,10 +3074,10 @@ err:
static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
+ r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
if (r)
goto err;
@@ -3168,7 +3085,7 @@ static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_c
if (r)
goto err;
- r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
@@ -3187,10 +3104,10 @@ err:
static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
+ r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
if (r)
return r;
@@ -3198,7 +3115,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
if (r)
return r;
- r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
return r;
@@ -3214,22 +3131,21 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
u16 len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_send_short(dsidev, channel,
+ return dsi_vc_send_short(dsi, channel,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
}
-static int dsi_enter_ulps(struct platform_device *dsidev)
+static int dsi_enter_ulps(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
int r, i;
- unsigned mask;
+ unsigned int mask;
DSSDBG("Entering ULPS");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(dsi->ulps_enabled);
@@ -3237,35 +3153,35 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
return 0;
/* DDR_CLK_ALWAYS_ON */
- if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
- dsi_if_enable(dsidev, 0);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
- dsi_if_enable(dsidev, 1);
+ if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
+ dsi_if_enable(dsi, 0);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
+ dsi_if_enable(dsi, 1);
}
- dsi_sync_vc(dsidev, 0);
- dsi_sync_vc(dsidev, 1);
- dsi_sync_vc(dsidev, 2);
- dsi_sync_vc(dsidev, 3);
+ dsi_sync_vc(dsi, 0);
+ dsi_sync_vc(dsi, 1);
+ dsi_sync_vc(dsi, 2);
+ dsi_sync_vc(dsi, 3);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_force_tx_stop_mode_io(dsi);
- dsi_vc_enable(dsidev, 0, false);
- dsi_vc_enable(dsidev, 1, false);
- dsi_vc_enable(dsidev, 2, false);
- dsi_vc_enable(dsidev, 3, false);
+ dsi_vc_enable(dsi, 0, false);
+ dsi_vc_enable(dsi, 1, false);
+ dsi_vc_enable(dsi, 2, false);
+ dsi_vc_enable(dsi, 3, false);
- if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
+ if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
DSSERR("HS busy when enabling ULPS\n");
return -EIO;
}
- if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
+ if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
DSSERR("LP busy when enabling ULPS\n");
return -EIO;
}
- r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
+ r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
if (r)
return r;
@@ -3279,10 +3195,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
}
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(1000)) == 0) {
@@ -3291,31 +3207,31 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
goto err;
}
- dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
/* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
- dsi_if_enable(dsidev, false);
+ dsi_if_enable(dsi, false);
dsi->ulps_enabled = true;
return 0;
err:
- dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
return r;
}
-static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_lp_rx_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3324,14 +3240,14 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING2);
+ r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING2, r);
+ dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3341,8 +3257,8 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
- bool x8, bool x16)
+static void dsi_set_ta_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x8, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3351,14 +3267,14 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
@@ -3368,8 +3284,8 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_stop_state_counter(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_stop_state_counter(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3378,14 +3294,14 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3395,8 +3311,8 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_hs_tx_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3405,14 +3321,14 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in TxByteClkHS */
- fck = dsi_get_txbyteclkhs(dsidev);
+ fck = dsi_get_txbyteclkhs(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING2);
+ r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING2, r);
+ dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3422,9 +3338,8 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
+static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
@@ -3444,12 +3359,11 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
}
/* LINE_BUFFER */
- REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
+ REG_FLD_MOD(dsi, DSI_CTRL, num_line_buffers, 13, 12);
}
-static void dsi_config_vp_sync_events(struct platform_device *dsidev)
+static void dsi_config_vp_sync_events(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
bool sync_end;
u32 r;
@@ -3458,7 +3372,7 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
else
sync_end = false;
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
@@ -3466,12 +3380,11 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
}
-static void dsi_config_blanking_modes(struct platform_device *dsidev)
+static void dsi_config_blanking_modes(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode = dsi->vm_timings.blanking_mode;
int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
@@ -3482,12 +3395,12 @@ static void dsi_config_blanking_modes(struct platform_device *dsidev)
* 0 = TX FIFO packets sent or LPS in corresponding blanking periods
* 1 = Long blanking packets are sent in corresponding blanking periods
*/
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
}
/*
@@ -3552,9 +3465,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
return max(lp_inter, 0);
}
-static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
+static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode;
int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
@@ -3571,33 +3483,33 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int bl_interleave_hs = 0, bl_interleave_lp = 0;
u32 r;
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
blanking_mode = FLD_GET(r, 20, 20);
hfp_blanking_mode = FLD_GET(r, 21, 21);
hbp_blanking_mode = FLD_GET(r, 22, 22);
hsa_blanking_mode = FLD_GET(r, 23, 23);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING1);
hbp = FLD_GET(r, 11, 0);
hfp = FLD_GET(r, 23, 12);
hsa = FLD_GET(r, 31, 24);
- r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ r = dsi_read_reg(dsi, DSI_CLK_TIMING);
ddr_clk_post = FLD_GET(r, 7, 0);
ddr_clk_pre = FLD_GET(r, 15, 8);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING7);
exit_hs_mode_lat = FLD_GET(r, 15, 0);
enter_hs_mode_lat = FLD_GET(r, 31, 16);
- r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+ r = dsi_read_reg(dsi, DSI_CLK_CTRL);
lp_clk_div = FLD_GET(r, 12, 0);
ddr_alwon = FLD_GET(r, 13, 13);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_exit = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tclk_trail = FLD_GET(r, 15, 8);
exiths_clk = ths_exit + tclk_trail;
@@ -3651,45 +3563,44 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
bl_interleave_lp);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING4);
r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING4, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING5);
r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING5, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING6);
r = FLD_MOD(r, bl_interleave_hs, 31, 15);
r = FLD_MOD(r, bl_interleave_lp, 16, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING6, r);
}
-static int dsi_proto_config(struct platform_device *dsidev)
+static int dsi_proto_config(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
int buswidth = 0;
- dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ dsi_config_tx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
- dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ dsi_config_rx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
- dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
- dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
- dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
- dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
+ dsi_set_stop_state_counter(dsi, 0x1000, false, false);
+ dsi_set_ta_timeout(dsi, 0x1fff, true, true);
+ dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
+ dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
switch (dsi_get_pixel_size(dsi->pix_fmt)) {
case 16:
@@ -3706,7 +3617,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
return -EINVAL;
}
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
@@ -3721,56 +3632,55 @@ static int dsi_proto_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, 25, 25);
}
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
- dsi_config_vp_num_line_buffers(dsidev);
+ dsi_config_vp_num_line_buffers(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_config_vp_sync_events(dsidev);
- dsi_config_blanking_modes(dsidev);
- dsi_config_cmd_mode_interleaving(dsidev);
+ dsi_config_vp_sync_events(dsi);
+ dsi_config_blanking_modes(dsi);
+ dsi_config_cmd_mode_interleaving(dsi);
}
- dsi_vc_initial_config(dsidev, 0);
- dsi_vc_initial_config(dsidev, 1);
- dsi_vc_initial_config(dsidev, 2);
- dsi_vc_initial_config(dsidev, 3);
+ dsi_vc_initial_config(dsi, 0);
+ dsi_vc_initial_config(dsi, 1);
+ dsi_vc_initial_config(dsi, 2);
+ dsi_vc_initial_config(dsi, 3);
return 0;
}
-static void dsi_proto_timings(struct platform_device *dsidev)
+static void dsi_proto_timings(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
- unsigned tclk_pre, tclk_post;
- unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
- unsigned ths_trail, ths_exit;
- unsigned ddr_clk_pre, ddr_clk_post;
- unsigned enter_hs_mode_lat, exit_hs_mode_lat;
- unsigned ths_eot;
+ unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tclk_pre, tclk_post;
+ unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
+ unsigned int ths_trail, ths_exit;
+ unsigned int ddr_clk_pre, ddr_clk_post;
+ unsigned int enter_hs_mode_lat, exit_hs_mode_lat;
+ unsigned int ths_eot;
int ndl = dsi->num_lanes_used - 1;
u32 r;
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_prepare = FLD_GET(r, 31, 24);
ths_prepare_ths_zero = FLD_GET(r, 23, 16);
ths_zero = ths_prepare_ths_zero - ths_prepare;
ths_trail = FLD_GET(r, 15, 8);
ths_exit = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
tclk_prepare = FLD_GET(r, 7, 0);
/* min 8*UI */
tclk_pre = 20;
/* min 60ns + 52*UI */
- tclk_post = ns2ddr(dsidev, 60) + 26;
+ tclk_post = ns2ddr(dsi, 60) + 26;
ths_eot = DIV_ROUND_UP(4, ndl);
@@ -3781,10 +3691,10 @@ static void dsi_proto_timings(struct platform_device *dsidev)
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
- r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ r = dsi_read_reg(dsi, DSI_CLK_TIMING);
r = FLD_MOD(r, ddr_clk_pre, 15, 8);
r = FLD_MOD(r, ddr_clk_post, 7, 0);
- dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
+ dsi_write_reg(dsi, DSI_CLK_TIMING, r);
DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
ddr_clk_pre,
@@ -3798,7 +3708,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
FLD_VAL(exit_hs_mode_lat, 15, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING7, r);
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
@@ -3832,31 +3742,30 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
vsa, vm->vactive);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
r = FLD_MOD(r, hfp, 23, 12); /* HFP */
r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
- dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING1, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING2);
r = FLD_MOD(r, vbp, 7, 0); /* VBP */
r = FLD_MOD(r, vfp, 15, 8); /* VFP */
r = FLD_MOD(r, vsa, 23, 16); /* VSA */
r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
- dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING2, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING3);
r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
- dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING3, r);
}
}
static int dsi_configure_pins(struct omap_dss_device *dssdev,
const struct omap_dsi_pin_config *pin_cfg)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int num_pins;
const int *pins;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
@@ -3921,9 +3830,7 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
struct omap_dss_device *out = &dsi->output;
u8 data_type;
@@ -3935,7 +3842,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
return -ENODEV;
}
- r = dsi_display_init_dispc(dsidev, dispc_channel);
+ r = dsi_display_init_dispc(dsi);
if (r)
goto err_init_dispc;
@@ -3958,22 +3865,22 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
goto err_pix_fmt;
}
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
/* MODE, 1 = video mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
- dsi_vc_write_long_header(dsidev, channel, data_type,
+ dsi_vc_write_long_header(dsi, channel, data_type,
word_count, 0);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsi, channel, true);
+ dsi_if_enable(dsi, true);
}
- r = dss_mgr_enable(dispc_channel);
+ r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
@@ -3981,57 +3888,53 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
}
err_pix_fmt:
- dsi_display_uninit_dispc(dsidev, dispc_channel);
+ dsi_display_uninit_dispc(dsi);
err_init_dispc:
return r;
}
static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
/* MODE, 0 = command mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsi, channel, true);
+ dsi_if_enable(dsi, true);
}
- dss_mgr_disable(dispc_channel);
+ dss_mgr_disable(&dsi->output);
- dsi_display_uninit_dispc(dsidev, dispc_channel);
+ dsi_display_uninit_dispc(dsi);
}
-static void dsi_update_screen_dispc(struct platform_device *dsidev)
+static void dsi_update_screen_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dsi->output.dispc_channel;
- unsigned bytespp;
- unsigned bytespl;
- unsigned bytespf;
- unsigned total_len;
- unsigned packet_payload;
- unsigned packet_len;
+ unsigned int bytespp;
+ unsigned int bytespl;
+ unsigned int bytespf;
+ unsigned int total_len;
+ unsigned int packet_payload;
+ unsigned int packet_len;
u32 l;
int r;
const unsigned channel = dsi->update_channel;
- const unsigned line_buf_size = dsi->line_buffer_size;
+ const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
@@ -4052,16 +3955,16 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(channel), l);
- dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(channel), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -4069,24 +3972,24 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
* the same goes for any DSS interrupts, but for some reason I have not
* seen the problem anywhere else than here.
*/
- dispc_disable_sidle();
+ dispc_disable_sidle(dsi->dss->dispc);
- dsi_perf_mark_start(dsidev);
+ dsi_perf_mark_start(dsi);
r = schedule_delayed_work(&dsi->framedone_timeout_work,
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->vm);
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
- dss_mgr_start_update(dispc_channel);
+ dss_mgr_start_update(&dsi->output);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
- REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(dsidev, channel);
+ dsi_vc_send_bta(dsi, channel);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -4101,22 +4004,20 @@ static void dsi_te_timeout(struct timer_list *unused)
}
#endif
-static void dsi_handle_framedone(struct platform_device *dsidev, int error)
+static void dsi_handle_framedone(struct dsi_data *dsi, int error)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* SIDLEMODE back to smart-idle */
- dispc_enable_sidle();
+ dispc_enable_sidle(dsi->dss->dispc);
if (dsi->te_enabled) {
/* enable LP_RX_TO again after the TE */
- REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
dsi->framedone_callback(error, dsi->framedone_data);
if (!error)
- dsi_perf_show(dsidev, "DISPC");
+ dsi_perf_show(dsi, "DISPC");
}
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
@@ -4132,13 +4033,12 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
DSSERR("Framedone not received for 250ms!\n");
- dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
+ dsi_handle_framedone(dsi, -ETIMEDOUT);
}
static void dsi_framedone_irq_callback(void *data)
{
- struct platform_device *dsidev = (struct platform_device *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = data;
/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
* turns itself off. However, DSI still has the pixels in its buffers,
@@ -4147,17 +4047,16 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
- dsi_handle_framedone(dsidev, 0);
+ dsi_handle_framedone(dsi, 0);
}
static int dsi_update(struct omap_dss_device *dssdev, int channel,
void (*callback)(int, void *), void *data)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
u16 dw, dh;
- dsi_perf_mark_setup(dsidev);
+ dsi_perf_mark_setup(dsi);
dsi->update_channel = channel;
@@ -4171,26 +4070,25 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
- dsi_update_screen_dispc(dsidev);
+ dsi_update_screen_dispc(dsi);
return 0;
}
/* Display funcs */
-static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
+static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dispc_clock_info dispc_cinfo;
int r;
unsigned long fck;
- fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ fck = dsi_get_pll_hsdiv_dispc_rate(dsi);
dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
- r = dispc_calc_clock_rates(fck, &dispc_cinfo);
+ r = dispc_calc_clock_rates(dsi->dss->dispc, fck, &dispc_cinfo);
if (r) {
DSSERR("Failed to calc dispc clocks\n");
return r;
@@ -4201,19 +4099,18 @@ static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
return 0;
}
-static int dsi_display_init_dispc(struct platform_device *dsidev,
- enum omap_channel channel)
+static int dsi_display_init_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum omap_channel channel = dsi->output.dispc_channel;
int r;
- dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- r = dss_mgr_register_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ r = dss_mgr_register_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
if (r) {
DSSERR("can't register FRAMEDONE handler\n");
goto err;
@@ -4242,9 +4139,9 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
- dss_mgr_set_timings(channel, &dsi->vm);
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
- r = dsi_configure_dispc_clocks(dsidev);
+ r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
@@ -4253,33 +4150,31 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi_get_pixel_size(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
+ dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ dss_mgr_unregister_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
err:
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- enum omap_channel channel)
+static void dsi_display_uninit_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum omap_channel channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ dss_mgr_unregister_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
}
-static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
+static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info cinfo;
int r;
@@ -4294,99 +4189,95 @@ static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
return 0;
}
-static int dsi_display_init_dsi(struct platform_device *dsidev)
+static int dsi_display_init_dsi(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dss_pll_enable(&dsi->pll);
if (r)
goto err0;
- r = dsi_configure_dsi_clocks(dsidev);
+ r = dsi_configure_dsi_clocks(dsi);
if (r)
goto err1;
- dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
- DSS_CLK_SRC_PLL1_2 :
- DSS_CLK_SRC_PLL2_2);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
+ dsi->module_id == 0 ?
+ DSS_CLK_SRC_PLL1_2 : DSS_CLK_SRC_PLL2_2);
DSSDBG("PLL OK\n");
- r = dsi_cio_init(dsidev);
+ r = dsi_cio_init(dsi);
if (r)
goto err2;
- _dsi_print_reset_status(dsidev);
+ _dsi_print_reset_status(dsi);
- dsi_proto_timings(dsidev);
- dsi_set_lp_clk_divisor(dsidev);
+ dsi_proto_timings(dsi);
+ dsi_set_lp_clk_divisor(dsi);
if (1)
- _dsi_print_reset_status(dsidev);
+ _dsi_print_reset_status(dsi);
- r = dsi_proto_config(dsidev);
+ r = dsi_proto_config(dsi);
if (r)
goto err3;
/* enable interface */
- dsi_vc_enable(dsidev, 0, 1);
- dsi_vc_enable(dsidev, 1, 1);
- dsi_vc_enable(dsidev, 2, 1);
- dsi_vc_enable(dsidev, 3, 1);
- dsi_if_enable(dsidev, 1);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_vc_enable(dsi, 0, 1);
+ dsi_vc_enable(dsi, 1, 1);
+ dsi_vc_enable(dsi, 2, 1);
+ dsi_vc_enable(dsi, 3, 1);
+ dsi_if_enable(dsi, 1);
+ dsi_force_tx_stop_mode_io(dsi);
return 0;
err3:
- dsi_cio_uninit(dsidev);
+ dsi_cio_uninit(dsi);
err2:
- dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err1:
dss_pll_disable(&dsi->pll);
err0:
return r;
}
-static void dsi_display_uninit_dsi(struct platform_device *dsidev,
- bool disconnect_lanes, bool enter_ulps)
+static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
+ bool enter_ulps)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (enter_ulps && !dsi->ulps_enabled)
- dsi_enter_ulps(dsidev);
+ dsi_enter_ulps(dsi);
/* disable interface */
- dsi_if_enable(dsidev, 0);
- dsi_vc_enable(dsidev, 0, 0);
- dsi_vc_enable(dsidev, 1, 0);
- dsi_vc_enable(dsidev, 2, 0);
- dsi_vc_enable(dsidev, 3, 0);
+ dsi_if_enable(dsi, 0);
+ dsi_vc_enable(dsi, 0, 0);
+ dsi_vc_enable(dsi, 1, 0);
+ dsi_vc_enable(dsi, 2, 0);
+ dsi_vc_enable(dsi, 3, 0);
- dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
- dsi_cio_uninit(dsidev);
- dsi_pll_uninit(dsidev, disconnect_lanes);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ dsi_cio_uninit(dsi);
+ dsi_pll_uninit(dsi, disconnect_lanes);
}
static int dsi_display_enable(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r = 0;
DSSDBG("dsi_display_enable\n");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
goto err_get_dsi;
- _dsi_initialize_irq(dsidev);
+ _dsi_initialize_irq(dsi);
- r = dsi_display_init_dsi(dsidev);
+ r = dsi_display_init_dsi(dsi);
if (r)
goto err_init_dsi;
@@ -4395,7 +4286,7 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
return 0;
err_init_dsi:
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
@@ -4405,31 +4296,29 @@ err_get_dsi:
static void dsi_display_disable(struct omap_dss_device *dssdev,
bool disconnect_lanes, bool enter_ulps)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DSSDBG("dsi_display_disable\n");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
- dsi_sync_vc(dsidev, 0);
- dsi_sync_vc(dsidev, 1);
- dsi_sync_vc(dsidev, 2);
- dsi_sync_vc(dsidev, 3);
+ dsi_sync_vc(dsi, 0);
+ dsi_sync_vc(dsi, 1);
+ dsi_sync_vc(dsi, 2);
+ dsi_sync_vc(dsi, 3);
- dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
+ dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
mutex_unlock(&dsi->lock);
}
static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
dsi->te_enabled = enable;
return 0;
@@ -4550,15 +4439,16 @@ static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
- return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
- dsi_cm_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
+ ctx->req_pck_min, ctx->req_pck_max,
+ dsi_cm_calc_dispc_cb, ctx);
}
static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4594,7 +4484,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
txbyteclk = pck * bitspp / 8 / ndl;
memset(ctx, 0, sizeof(*ctx));
- ctx->dsidev = dsi->pdev;
+ ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
ctx->req_pck_min = pck;
@@ -4611,7 +4501,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
int bitspp = dsi_get_pixel_size(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
@@ -4850,15 +4740,16 @@ static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
else
pck_max = ctx->req_pck_max;
- return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
- dsi_vm_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
+ ctx->req_pck_min, pck_max,
+ dsi_vm_calc_dispc_cb, ctx);
}
static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4885,7 +4776,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
clkin = clk_get_rate(dsi->pll.clkin);
memset(ctx, 0, sizeof(*ctx));
- ctx->dsidev = dsi->pdev;
+ ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
@@ -4915,8 +4806,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
static int dsi_set_config(struct omap_dss_device *dssdev,
const struct omap_dss_dsi_config *config)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
struct dsi_clk_calc_ctx ctx;
bool ok;
int r;
@@ -5003,8 +4893,7 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int i;
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
@@ -5021,8 +4910,7 @@ static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if (vc_id < 0 || vc_id > 3) {
DSSERR("VC ID out of range\n");
@@ -5047,8 +4935,7 @@ static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if ((channel >= 0 && channel <= 3) &&
dsi->vc[channel].dssdev == dssdev) {
@@ -5058,12 +4945,11 @@ static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
}
-static int dsi_get_clocks(struct platform_device *dsidev)
+static int dsi_get_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct clk *clk;
- clk = devm_clk_get(&dsidev->dev, "fck");
+ clk = devm_clk_get(dsi->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
return PTR_ERR(clk);
@@ -5077,15 +4963,14 @@ static int dsi_get_clocks(struct platform_device *dsidev)
static int dsi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_regulator_init(dsidev);
+ r = dsi_regulator_init(dsi);
if (r)
return r;
- r = dss_mgr_connect(dispc_channel, dssdev);
+ r = dss_mgr_connect(&dsi->output, dssdev);
if (r)
return r;
@@ -5093,7 +4978,7 @@ static int dsi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
- dss_mgr_disconnect(dispc_channel, dssdev);
+ dss_mgr_disconnect(&dsi->output, dssdev);
return r;
}
@@ -5103,7 +4988,7 @@ static int dsi_connect(struct omap_dss_device *dssdev,
static void dsi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -5112,7 +4997,7 @@ static void dsi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(dispc_channel, dssdev);
+ dss_mgr_disconnect(&dsi->output, dssdev);
}
static const struct omapdss_dsi_ops dsi_ops = {
@@ -5154,12 +5039,11 @@ static const struct omapdss_dsi_ops dsi_ops = {
.set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
};
-static void dsi_init_output(struct platform_device *dsidev)
+static void dsi_init_output(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
- out->dev = &dsidev->dev;
+ out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
@@ -5172,18 +5056,16 @@ static void dsi_init_output(struct platform_device *dsidev)
omapdss_register_output(out);
}
-static void dsi_uninit_output(struct platform_device *dsidev)
+static void dsi_uninit_output(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
omapdss_unregister_output(out);
}
-static int dsi_probe_of(struct platform_device *pdev)
+static int dsi_probe_of(struct dsi_data *dsi)
{
- struct device_node *node = pdev->dev.of_node;
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct device_node *node = dsi->dev->of_node;
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
@@ -5197,7 +5079,7 @@ static int dsi_probe_of(struct platform_device *pdev)
prop = of_find_property(ep, "lanes", &len);
if (prop == NULL) {
- dev_err(&pdev->dev, "failed to find lane data\n");
+ dev_err(dsi->dev, "failed to find lane data\n");
r = -EINVAL;
goto err;
}
@@ -5206,14 +5088,14 @@ static int dsi_probe_of(struct platform_device *pdev)
if (num_pins < 4 || num_pins % 2 != 0 ||
num_pins > dsi->num_lanes_supported * 2) {
- dev_err(&pdev->dev, "bad number of lanes\n");
+ dev_err(dsi->dev, "bad number of lanes\n");
r = -EINVAL;
goto err;
}
r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
if (r) {
- dev_err(&pdev->dev, "failed to read lane data\n");
+ dev_err(dsi->dev, "failed to read lane data\n");
goto err;
}
@@ -5223,7 +5105,7 @@ static int dsi_probe_of(struct platform_device *pdev)
r = dsi_configure_pins(&dsi->output, &pin_cfg);
if (r) {
- dev_err(&pdev->dev, "failed to configure pins");
+ dev_err(dsi->dev, "failed to configure pins");
goto err;
}
@@ -5323,14 +5205,13 @@ static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
.has_refsel = true,
};
-static int dsi_init_pll_data(struct platform_device *dsidev)
+static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll *pll = &dsi->pll;
struct clk *clk;
int r;
- clk = devm_clk_get(&dsidev->dev, "sys_clk");
+ clk = devm_clk_get(dsi->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
@@ -5343,7 +5224,7 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
pll->hw = dsi->data->pll_hw;
pll->ops = &dsi_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return r;
@@ -5415,9 +5296,11 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ .machine = "AM35*", .data = &dsi_of_data_omap34xx },
{ /* sentinel */ }
};
+
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *dsidev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
u32 rev;
@@ -5426,12 +5309,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
struct resource *dsi_mem;
struct resource *res;
- dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- dsi->pdev = dsidev;
- dev_set_drvdata(&dsidev->dev, dsi);
+ dsi->dss = dss;
+ dsi->dev = dev;
+ dev_set_drvdata(dev, dsi);
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
@@ -5452,29 +5336,29 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
- dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
- dsi->proto_base = devm_ioremap_resource(&dsidev->dev, dsi_mem);
+ dsi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proto");
+ dsi->proto_base = devm_ioremap_resource(dev, dsi_mem);
if (IS_ERR(dsi->proto_base))
return PTR_ERR(dsi->proto_base);
- res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
- dsi->phy_base = devm_ioremap_resource(&dsidev->dev, res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ dsi->phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->phy_base))
return PTR_ERR(dsi->phy_base);
- res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
- dsi->pll_base = devm_ioremap_resource(&dsidev->dev, res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
+ dsi->pll_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->pll_base))
return PTR_ERR(dsi->pll_base);
- dsi->irq = platform_get_irq(dsi->pdev, 0);
+ dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
return -ENODEV;
}
- r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
- IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
+ r = devm_request_irq(dev, dsi->irq, omap_dsi_irq_handler,
+ IRQF_SHARED, dev_name(dev), dsi);
if (r < 0) {
DSSERR("request_irq failed\n");
return r;
@@ -5522,83 +5406,92 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->vc[i].vc_id = 0;
}
- r = dsi_get_clocks(dsidev);
+ r = dsi_get_clocks(dsi);
if (r)
return r;
- dsi_init_pll_data(dsidev);
+ dsi_init_pll_data(dss, dsi);
- pm_runtime_enable(&dsidev->dev);
+ pm_runtime_enable(dev);
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
goto err_runtime_get;
- rev = dsi_read_reg(dsidev, DSI_REVISION);
- dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
+ rev = dsi_read_reg(dsi, DSI_REVISION);
+ dev_dbg(dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ)
/* NB_DATA_LANES */
- dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
+ dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
else
dsi->num_lanes_supported = 3;
- dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
+ dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
- dsi_init_output(dsidev);
+ dsi_init_output(dsi);
- r = dsi_probe_of(dsidev);
+ r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
goto err_probe_of;
}
- r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, &dsidev->dev);
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (r)
DSSERR("Failed to populate DSI child devices: %d\n", r);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
if (dsi->module_id == 0)
- dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
- else if (dsi->module_id == 1)
- dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
-
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs",
+ dsi1_dump_regs,
+ &dsi);
+ else
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs",
+ dsi2_dump_regs,
+ &dsi);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
if (dsi->module_id == 0)
- dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
- else if (dsi->module_id == 1)
- dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs",
+ dsi1_dump_irqs,
+ &dsi);
+ else
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs",
+ dsi2_dump_irqs,
+ &dsi);
#endif
return 0;
err_probe_of:
- dsi_uninit_output(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_uninit_output(dsi);
+ dsi_runtime_put(dsi);
err_runtime_get:
- pm_runtime_disable(&dsidev->dev);
+ pm_runtime_disable(dev);
return r;
}
static void dsi_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *dsidev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(dsi->debugfs.irqs);
+ dss_debugfs_remove_file(dsi->debugfs.regs);
- of_platform_depopulate(&dsidev->dev);
+ of_platform_depopulate(dev);
WARN_ON(dsi->scp_clk_refcount > 0);
dss_pll_unregister(&dsi->pll);
- dsi_uninit_output(dsidev);
+ dsi_uninit_output(dsi);
- pm_runtime_disable(&dsidev->dev);
+ pm_runtime_disable(dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -5624,8 +5517,7 @@ static int dsi_remove(struct platform_device *pdev)
static int dsi_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
dsi->is_enabled = false;
/* ensure the irq handler sees the is_enabled value */
@@ -5633,18 +5525,17 @@ static int dsi_runtime_suspend(struct device *dev)
/* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq);
- dispc_runtime_put();
+ dispc_runtime_put(dsi->dss->dispc);
return 0;
}
static int dsi_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dsi->dss->dispc);
if (r)
return r;
@@ -5660,7 +5551,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
-static struct platform_driver omap_dsihw_driver = {
+struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
.remove = dsi_remove,
.driver = {
@@ -5670,13 +5561,3 @@ static struct platform_driver omap_dsihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dsi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dsihw_driver);
-}
-
-void dsi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dsihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index c6b86f3..4602a79 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -44,7 +44,6 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
return NULL;
}
-EXPORT_SYMBOL_GPL(dss_of_port_get_parent_device);
u32 dss_of_port_get_port_number(struct device_node *port)
{
@@ -57,7 +56,6 @@ u32 dss_of_port_get_port_number(struct device_node *port)
return reg;
}
-EXPORT_SYMBOL_GPL(dss_of_port_get_port_number);
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d1755f1..0b908e9 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dss.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -23,6 +21,7 @@
#define DSS_SUBSYS_NAME "DSS"
#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -49,8 +48,6 @@
#include "omapdss.h"
#include "dss.h"
-#define DSS_SZ_REGS SZ_512
-
struct dss_reg {
u16 idx;
};
@@ -65,16 +62,19 @@ struct dss_reg {
#define DSS_PLL_CONTROL DSS_REG(0x0048)
#define DSS_SDI_STATUS DSS_REG(0x005C)
-#define REG_GET(idx, start, end) \
- FLD_GET(dss_read_reg(idx), start, end)
+#define REG_GET(dss, idx, start, end) \
+ FLD_GET(dss_read_reg(dss, idx), start, end)
-#define REG_FLD_MOD(idx, val, start, end) \
- dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+#define REG_FLD_MOD(dss, idx, val, start, end) \
+ dss_write_reg(dss, idx, \
+ FLD_MOD(dss_read_reg(dss, idx), val, start, end))
struct dss_ops {
- int (*dpi_select_source)(int port, enum omap_channel channel);
- int (*select_lcd_source)(enum omap_channel channel,
- enum dss_clk_source clk_src);
+ int (*dpi_select_source)(struct dss_device *dss, int port,
+ enum omap_channel channel);
+ int (*select_lcd_source)(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src);
};
struct dss_features {
@@ -91,33 +91,6 @@ struct dss_features {
bool has_lcd_clk_src;
};
-static struct {
- struct platform_device *pdev;
- void __iomem *base;
- struct regmap *syscon_pll_ctrl;
- u32 syscon_pll_ctrl_offset;
-
- struct clk *parent_clk;
- struct clk *dss_clk;
- unsigned long dss_clk_rate;
-
- unsigned long cache_req_pck;
- unsigned long cache_prate;
- struct dispc_clock_info cache_dispc_cinfo;
-
- enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
- enum dss_clk_source dispc_clk_source;
- enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
-
- bool ctx_valid;
- u32 ctx[DSS_SZ_REGS / sizeof(u32)];
-
- const struct dss_features *feat;
-
- struct dss_pll *video1_pll;
- struct dss_pll *video2_pll;
-} dss;
-
static const char * const dss_generic_clk_source_names[] = {
[DSS_CLK_SRC_FCK] = "FCK",
[DSS_CLK_SRC_PLL1_1] = "PLL1:1",
@@ -129,49 +102,50 @@ static const char * const dss_generic_clk_source_names[] = {
[DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
};
-static inline void dss_write_reg(const struct dss_reg idx, u32 val)
+static inline void dss_write_reg(struct dss_device *dss,
+ const struct dss_reg idx, u32 val)
{
- __raw_writel(val, dss.base + idx.idx);
+ __raw_writel(val, dss->base + idx.idx);
}
-static inline u32 dss_read_reg(const struct dss_reg idx)
+static inline u32 dss_read_reg(struct dss_device *dss, const struct dss_reg idx)
{
- return __raw_readl(dss.base + idx.idx);
+ return __raw_readl(dss->base + idx.idx);
}
-#define SR(reg) \
- dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
-#define RR(reg) \
- dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
+#define SR(dss, reg) \
+ dss->ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(dss, DSS_##reg)
+#define RR(dss, reg) \
+ dss_write_reg(dss, DSS_##reg, dss->ctx[(DSS_##reg).idx / sizeof(u32)])
-static void dss_save_context(void)
+static void dss_save_context(struct dss_device *dss)
{
DSSDBG("dss_save_context\n");
- SR(CONTROL);
+ SR(dss, CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- SR(SDI_CONTROL);
- SR(PLL_CONTROL);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ SR(dss, SDI_CONTROL);
+ SR(dss, PLL_CONTROL);
}
- dss.ctx_valid = true;
+ dss->ctx_valid = true;
DSSDBG("context saved\n");
}
-static void dss_restore_context(void)
+static void dss_restore_context(struct dss_device *dss)
{
DSSDBG("dss_restore_context\n");
- if (!dss.ctx_valid)
+ if (!dss->ctx_valid)
return;
- RR(CONTROL);
+ RR(dss, CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- RR(SDI_CONTROL);
- RR(PLL_CONTROL);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ RR(dss, SDI_CONTROL);
+ RR(dss, PLL_CONTROL);
}
DSSDBG("context restored\n");
@@ -180,17 +154,17 @@ static void dss_restore_context(void)
#undef SR
#undef RR
-void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
+void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable)
{
- unsigned shift;
- unsigned val;
+ unsigned int shift;
+ unsigned int val;
- if (!dss.syscon_pll_ctrl)
+ if (!pll->dss->syscon_pll_ctrl)
return;
val = !enable;
- switch (pll_id) {
+ switch (pll->id) {
case DSS_PLL_VIDEO1:
shift = 0;
break;
@@ -201,20 +175,22 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
shift = 2;
break;
default:
- DSSERR("illegal DSS PLL ID %d\n", pll_id);
+ DSSERR("illegal DSS PLL ID %d\n", pll->id);
return;
}
- regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
- 1 << shift, val << shift);
+ regmap_update_bits(pll->dss->syscon_pll_ctrl,
+ pll->dss->syscon_pll_ctrl_offset,
+ 1 << shift, val << shift);
}
-static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
- enum omap_channel channel)
+static int dss_ctrl_pll_set_control_mux(struct dss_device *dss,
+ enum dss_clk_source clk_src,
+ enum omap_channel channel)
{
- unsigned shift, val;
+ unsigned int shift, val;
- if (!dss.syscon_pll_ctrl)
+ if (!dss->syscon_pll_ctrl)
return -EINVAL;
switch (channel) {
@@ -269,47 +245,47 @@ static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
return -EINVAL;
}
- regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
+ regmap_update_bits(dss->syscon_pll_ctrl, dss->syscon_pll_ctrl_offset,
0x3 << shift, val << shift);
return 0;
}
-void dss_sdi_init(int datapairs)
+void dss_sdi_init(struct dss_device *dss, int datapairs)
{
u32 l;
BUG_ON(datapairs > 3 || datapairs < 1);
- l = dss_read_reg(DSS_SDI_CONTROL);
+ l = dss_read_reg(dss, DSS_SDI_CONTROL);
l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
- dss_write_reg(DSS_SDI_CONTROL, l);
+ dss_write_reg(dss, DSS_SDI_CONTROL, l);
- l = dss_read_reg(DSS_PLL_CONTROL);
+ l = dss_read_reg(dss, DSS_PLL_CONTROL);
l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
- dss_write_reg(DSS_PLL_CONTROL, l);
+ dss_write_reg(dss, DSS_PLL_CONTROL, l);
}
-int dss_sdi_enable(void)
+int dss_sdi_enable(struct dss_device *dss)
{
unsigned long timeout;
- dispc_pck_free_enable(1);
+ dispc_pck_free_enable(dss->dispc, 1);
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
udelay(1); /* wait 2x PCLK */
/* Lock SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
/* Waiting for PLL lock request to complete */
timeout = jiffies + msecs_to_jiffies(500);
- while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
+ while (dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 6)) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock request timed out\n");
goto err1;
@@ -317,22 +293,22 @@ int dss_sdi_enable(void)
}
/* Clearing PLL_GO bit */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 28, 28);
/* Waiting for PLL to lock */
timeout = jiffies + msecs_to_jiffies(500);
- while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
+ while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 5))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock timed out\n");
goto err1;
}
}
- dispc_lcd_enable_signal(1);
+ dispc_lcd_enable_signal(dss->dispc, 1);
/* Waiting for SDI reset to complete */
timeout = jiffies + msecs_to_jiffies(500);
- while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
+ while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 2))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("SDI reset timed out\n");
goto err2;
@@ -342,24 +318,24 @@ int dss_sdi_enable(void)
return 0;
err2:
- dispc_lcd_enable_signal(0);
+ dispc_lcd_enable_signal(dss->dispc, 0);
err1:
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dss->dispc, 0);
return -ETIMEDOUT;
}
-void dss_sdi_disable(void)
+void dss_sdi_disable(struct dss_device *dss)
{
- dispc_lcd_enable_signal(0);
+ dispc_lcd_enable_signal(dss->dispc, 0);
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dss->dispc, 0);
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
@@ -367,46 +343,61 @@ const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
-void dss_dump_clocks(struct seq_file *s)
+static void dss_dump_clocks(struct dss_device *dss, struct seq_file *s)
{
const char *fclk_name;
unsigned long fclk_rate;
- if (dss_runtime_get())
+ if (dss_runtime_get(dss))
return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
- fclk_rate = clk_get_rate(dss.dss_clk);
+ fclk_rate = clk_get_rate(dss->dss_clk);
seq_printf(s, "%s = %lu\n",
fclk_name,
fclk_rate);
- dss_runtime_put();
+ dss_runtime_put(dss);
}
-static void dss_dump_regs(struct seq_file *s)
+static int dss_dump_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
+ struct dss_device *dss = s->private;
- if (dss_runtime_get())
- return;
+#define DUMPREG(dss, r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(dss, r))
- DUMPREG(DSS_REVISION);
- DUMPREG(DSS_SYSCONFIG);
- DUMPREG(DSS_SYSSTATUS);
- DUMPREG(DSS_CONTROL);
+ if (dss_runtime_get(dss))
+ return 0;
+
+ DUMPREG(dss, DSS_REVISION);
+ DUMPREG(dss, DSS_SYSCONFIG);
+ DUMPREG(dss, DSS_SYSSTATUS);
+ DUMPREG(dss, DSS_CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- DUMPREG(DSS_SDI_CONTROL);
- DUMPREG(DSS_PLL_CONTROL);
- DUMPREG(DSS_SDI_STATUS);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ DUMPREG(dss, DSS_SDI_CONTROL);
+ DUMPREG(dss, DSS_PLL_CONTROL);
+ DUMPREG(dss, DSS_SDI_STATUS);
}
- dss_runtime_put();
+ dss_runtime_put(dss);
#undef DUMPREG
+ return 0;
+}
+
+static int dss_debug_dump_clocks(struct seq_file *s, void *p)
+{
+ struct dss_device *dss = s->private;
+
+ dss_dump_clocks(dss, s);
+ dispc_dump_clocks(dss->dispc, s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+ return 0;
}
static int dss_get_channel_index(enum omap_channel channel)
@@ -424,7 +415,8 @@ static int dss_get_channel_index(enum omap_channel channel)
}
}
-static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
+static void dss_select_dispc_clk_source(struct dss_device *dss,
+ enum dss_clk_source clk_src)
{
int b;
@@ -432,7 +424,7 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
* We always use PRCM clock as the DISPC func clock, except on DSS3,
* where we don't have separate DISPC and LCD clock sources.
*/
- if (WARN_ON(dss.feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
+ if (WARN_ON(dss->feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
return;
switch (clk_src) {
@@ -450,15 +442,15 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
return;
}
- REG_FLD_MOD(DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
- dss.feat->dispc_clk_switch.start,
- dss.feat->dispc_clk_switch.end);
+ REG_FLD_MOD(dss, DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
+ dss->feat->dispc_clk_switch.start,
+ dss->feat->dispc_clk_switch.end);
- dss.dispc_clk_source = clk_src;
+ dss->dispc_clk_source = clk_src;
}
-void dss_select_dsi_clk_source(int dsi_module,
- enum dss_clk_source clk_src)
+void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
+ enum dss_clk_source clk_src)
{
int b, pos;
@@ -480,13 +472,14 @@ void dss_select_dsi_clk_source(int dsi_module,
}
pos = dsi_module == 0 ? 1 : 10;
- REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
+ REG_FLD_MOD(dss, DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
- dss.dsi_clk_source[dsi_module] = clk_src;
+ dss->dsi_clk_source[dsi_module] = clk_src;
}
-static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_dra7(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -499,21 +492,22 @@ static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
- r = dss_ctrl_pll_set_control_mux(clk_src, channel);
+ r = dss_ctrl_pll_set_control_mux(dss, clk_src, channel);
if (r)
return r;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_omap5(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -530,20 +524,21 @@ static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_omap4(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -558,87 +553,90 @@ static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return 0;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-void dss_select_lcd_clk_source(enum omap_channel channel,
- enum dss_clk_source clk_src)
+void dss_select_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
int idx = dss_get_channel_index(channel);
int r;
- if (!dss.feat->has_lcd_clk_src) {
- dss_select_dispc_clk_source(clk_src);
- dss.lcd_clk_source[idx] = clk_src;
+ if (!dss->feat->has_lcd_clk_src) {
+ dss_select_dispc_clk_source(dss, clk_src);
+ dss->lcd_clk_source[idx] = clk_src;
return;
}
- r = dss.feat->ops->select_lcd_source(channel, clk_src);
+ r = dss->feat->ops->select_lcd_source(dss, channel, clk_src);
if (r)
return;
- dss.lcd_clk_source[idx] = clk_src;
+ dss->lcd_clk_source[idx] = clk_src;
}
-enum dss_clk_source dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss)
{
- return dss.dispc_clk_source;
+ return dss->dispc_clk_source;
}
-enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
+enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
+ int dsi_module)
{
- return dss.dsi_clk_source[dsi_module];
+ return dss->dsi_clk_source[dsi_module];
}
-enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel)
{
- if (dss.feat->has_lcd_clk_src) {
+ if (dss->feat->has_lcd_clk_src) {
int idx = dss_get_channel_index(channel);
- return dss.lcd_clk_source[idx];
+ return dss->lcd_clk_source[idx];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
- return dss.dispc_clk_source;
+ return dss->dispc_clk_source;
}
}
-bool dss_div_calc(unsigned long pck, unsigned long fck_min,
- dss_div_calc_func func, void *data)
+bool dss_div_calc(struct dss_device *dss, unsigned long pck,
+ unsigned long fck_min, dss_div_calc_func func, void *data)
{
int fckd, fckd_start, fckd_stop;
unsigned long fck;
unsigned long fck_hw_max;
unsigned long fckd_hw_max;
unsigned long prate;
- unsigned m;
+ unsigned int m;
- fck_hw_max = dss.feat->fck_freq_max;
+ fck_hw_max = dss->feat->fck_freq_max;
- if (dss.parent_clk == NULL) {
- unsigned pckd;
+ if (dss->parent_clk == NULL) {
+ unsigned int pckd;
pckd = fck_hw_max / pck;
fck = pck * pckd;
- fck = clk_round_rate(dss.dss_clk, fck);
+ fck = clk_round_rate(dss->dss_clk, fck);
return func(fck, data);
}
- fckd_hw_max = dss.feat->fck_div_max;
+ fckd_hw_max = dss->feat->fck_div_max;
- m = dss.feat->dss_fck_multiplier;
- prate = clk_get_rate(dss.parent_clk);
+ m = dss->feat->dss_fck_multiplier;
+ prate = clk_get_rate(dss->parent_clk);
fck_min = fck_min ? fck_min : 1;
@@ -655,67 +653,68 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
return false;
}
-int dss_set_fck_rate(unsigned long rate)
+int dss_set_fck_rate(struct dss_device *dss, unsigned long rate)
{
int r;
DSSDBG("set fck to %lu\n", rate);
- r = clk_set_rate(dss.dss_clk, rate);
+ r = clk_set_rate(dss->dss_clk, rate);
if (r)
return r;
- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+ dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
- WARN_ONCE(dss.dss_clk_rate != rate,
- "clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
- rate);
+ WARN_ONCE(dss->dss_clk_rate != rate, "clk rate mismatch: %lu != %lu",
+ dss->dss_clk_rate, rate);
return 0;
}
-unsigned long dss_get_dispc_clk_rate(void)
+unsigned long dss_get_dispc_clk_rate(struct dss_device *dss)
{
- return dss.dss_clk_rate;
+ return dss->dss_clk_rate;
}
-unsigned long dss_get_max_fck_rate(void)
+unsigned long dss_get_max_fck_rate(struct dss_device *dss)
{
- return dss.feat->fck_freq_max;
+ return dss->feat->fck_freq_max;
}
-enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel)
+enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
+ enum omap_channel channel)
{
- return dss.feat->outputs[channel];
+ return dss->feat->outputs[channel];
}
-static int dss_setup_default_clock(void)
+static int dss_setup_default_clock(struct dss_device *dss)
{
unsigned long max_dss_fck, prate;
unsigned long fck;
- unsigned fck_div;
+ unsigned int fck_div;
int r;
- max_dss_fck = dss.feat->fck_freq_max;
+ max_dss_fck = dss->feat->fck_freq_max;
- if (dss.parent_clk == NULL) {
- fck = clk_round_rate(dss.dss_clk, max_dss_fck);
+ if (dss->parent_clk == NULL) {
+ fck = clk_round_rate(dss->dss_clk, max_dss_fck);
} else {
- prate = clk_get_rate(dss.parent_clk);
+ prate = clk_get_rate(dss->parent_clk);
- fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ fck_div = DIV_ROUND_UP(prate * dss->feat->dss_fck_multiplier,
max_dss_fck);
- fck = DIV_ROUND_UP(prate, fck_div) * dss.feat->dss_fck_multiplier;
+ fck = DIV_ROUND_UP(prate, fck_div)
+ * dss->feat->dss_fck_multiplier;
}
- r = dss_set_fck_rate(fck);
+ r = dss_set_fck_rate(dss, fck);
if (r)
return r;
return 0;
}
-void dss_set_venc_output(enum omap_dss_venc_type type)
+void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type)
{
int l = 0;
@@ -727,19 +726,21 @@ void dss_set_venc_output(enum omap_dss_venc_type type)
BUG();
/* venc out selection. 0 = comp, 1 = svideo */
- REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
+ REG_FLD_MOD(dss, DSS_CONTROL, l, 6, 6);
}
-void dss_set_dac_pwrdn_bgz(bool enable)
+void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable)
{
- REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
+ /* DAC Power-Down Control */
+ REG_FLD_MOD(dss, DSS_CONTROL, enable, 5, 5);
}
-void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
+void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
+ enum dss_hdmi_venc_clk_source_select src)
{
enum omap_dss_output_id outputs;
- outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
+ outputs = dss->feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
/* Complain about invalid selections */
WARN_ON((src == DSS_VENC_TV_CLK) && !(outputs & OMAP_DSS_OUTPUT_VENC));
@@ -748,24 +749,12 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
/* Select only if we have options */
if ((outputs & OMAP_DSS_OUTPUT_VENC) &&
(outputs & OMAP_DSS_OUTPUT_HDMI))
- REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */
-}
-
-enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
-{
- enum omap_dss_output_id outputs;
-
- outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
- if ((outputs & OMAP_DSS_OUTPUT_HDMI) == 0)
- return DSS_VENC_TV_CLK;
-
- if ((outputs & OMAP_DSS_OUTPUT_VENC) == 0)
- return DSS_HDMI_M_PCLK;
-
- return REG_GET(DSS_CONTROL, 15, 15);
+ /* VENC_HDMI_SWITCH */
+ REG_FLD_MOD(dss, DSS_CONTROL, src, 15, 15);
}
-static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap2_omap3(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
if (channel != OMAP_DSS_CHANNEL_LCD)
return -EINVAL;
@@ -773,7 +762,8 @@ static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel
return 0;
}
-static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap4(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
int val;
@@ -788,12 +778,13 @@ static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
return -EINVAL;
}
- REG_FLD_MOD(DSS_CONTROL, val, 17, 17);
+ REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 17);
return 0;
}
-static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap5(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
int val;
@@ -814,16 +805,17 @@ static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
return -EINVAL;
}
- REG_FLD_MOD(DSS_CONTROL, val, 17, 16);
+ REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 16);
return 0;
}
-static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
+static int dss_dpi_select_source_dra7xx(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
switch (port) {
case 0:
- return dss_dpi_select_source_omap5(port, channel);
+ return dss_dpi_select_source_omap5(dss, port, channel);
case 1:
if (channel != OMAP_DSS_CHANNEL_LCD2)
return -EINVAL;
@@ -839,135 +831,153 @@ static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
return 0;
}
-int dss_dpi_select_source(int port, enum omap_channel channel)
+int dss_dpi_select_source(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
- return dss.feat->ops->dpi_select_source(port, channel);
+ return dss->feat->ops->dpi_select_source(dss, port, channel);
}
-static int dss_get_clocks(void)
+static int dss_get_clocks(struct dss_device *dss)
{
struct clk *clk;
- clk = devm_clk_get(&dss.pdev->dev, "fck");
+ clk = devm_clk_get(&dss->pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get clock fck\n");
return PTR_ERR(clk);
}
- dss.dss_clk = clk;
+ dss->dss_clk = clk;
- if (dss.feat->parent_clk_name) {
- clk = clk_get(NULL, dss.feat->parent_clk_name);
+ if (dss->feat->parent_clk_name) {
+ clk = clk_get(NULL, dss->feat->parent_clk_name);
if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->parent_clk_name);
+ DSSERR("Failed to get %s\n",
+ dss->feat->parent_clk_name);
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
- dss.parent_clk = clk;
+ dss->parent_clk = clk;
return 0;
}
-static void dss_put_clocks(void)
+static void dss_put_clocks(struct dss_device *dss)
{
- if (dss.parent_clk)
- clk_put(dss.parent_clk);
+ if (dss->parent_clk)
+ clk_put(dss->parent_clk);
}
-int dss_runtime_get(void)
+int dss_runtime_get(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_get\n");
- r = pm_runtime_get_sync(&dss.pdev->dev);
+ r = pm_runtime_get_sync(&dss->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-void dss_runtime_put(void)
+void dss_runtime_put(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_put\n");
- r = pm_runtime_put_sync(&dss.pdev->dev);
+ r = pm_runtime_put_sync(&dss->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
-/* DEBUGFS */
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-static void dss_debug_dump_clocks(struct seq_file *s)
+struct dss_device *dss_get_device(struct device *dev)
{
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
+ return dev_get_drvdata(dev);
}
-static int dss_debug_show(struct seq_file *s, void *unused)
+/* DEBUGFS */
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
+static int dss_initialize_debugfs(struct dss_device *dss)
{
- void (*func)(struct seq_file *) = s->private;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("omapdss", NULL);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dss->debugfs.root = dir;
- func(s);
return 0;
}
+static void dss_uninitialize_debugfs(struct dss_device *dss)
+{
+ debugfs_remove_recursive(dss->debugfs.root);
+}
+
+struct dss_debugfs_entry {
+ struct dentry *dentry;
+ int (*show_fn)(struct seq_file *s, void *data);
+ void *data;
+};
+
static int dss_debug_open(struct inode *inode, struct file *file)
{
- return single_open(file, dss_debug_show, inode->i_private);
+ struct dss_debugfs_entry *entry = inode->i_private;
+
+ return single_open(file, entry->show_fn, entry->data);
}
static const struct file_operations dss_debug_fops = {
- .open = dss_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = dss_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static struct dentry *dss_debugfs_dir;
-
-static int dss_initialize_debugfs(void)
+struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data)
{
- dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
- if (IS_ERR(dss_debugfs_dir)) {
- int err = PTR_ERR(dss_debugfs_dir);
+ struct dss_debugfs_entry *entry;
+ struct dentry *d;
- dss_debugfs_dir = NULL;
- return err;
- }
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
- debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
- &dss_debug_dump_clocks, &dss_debug_fops);
+ entry->show_fn = show_fn;
+ entry->data = data;
- return 0;
-}
+ d = debugfs_create_file(name, 0444, dss->debugfs.root, entry,
+ &dss_debug_fops);
+ if (IS_ERR(d)) {
+ kfree(entry);
+ return ERR_PTR(PTR_ERR(d));
+ }
-static void dss_uninitialize_debugfs(void)
-{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
+ entry->dentry = d;
+ return entry;
}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_debug_fops);
+ if (IS_ERR_OR_NULL(entry))
+ return;
- return PTR_ERR_OR_ZERO(d);
+ debugfs_remove(entry->dentry);
+ kfree(entry);
}
+
#else /* CONFIG_OMAP2_DSS_DEBUGFS */
-static inline int dss_initialize_debugfs(void)
+static inline int dss_initialize_debugfs(struct dss_device *dss)
{
return 0;
}
-static inline void dss_uninitialize_debugfs(void)
+static inline void dss_uninitialize_debugfs(struct dss_device *dss)
{
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
@@ -1168,23 +1178,24 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct platform_device *pdev)
+static int dss_init_ports(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
int i;
- for (i = 0; i < dss.feat->num_ports; i++) {
+ for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
- switch (dss.feat->ports[i]) {
+ switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(pdev, port, dss.feat->model);
+ dpi_init_port(dss, pdev, port, dss->feat->model);
break;
case OMAP_DISPLAY_TYPE_SDI:
- sdi_init_port(pdev, port);
+ sdi_init_port(dss, pdev, port);
break;
default:
break;
@@ -1194,18 +1205,19 @@ static int dss_init_ports(struct platform_device *pdev)
return 0;
}
-static void dss_uninit_ports(struct platform_device *pdev)
+static void dss_uninit_ports(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
int i;
- for (i = 0; i < dss.feat->num_ports; i++) {
+ for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
- switch (dss.feat->ports[i]) {
+ switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
dpi_uninit_port(port);
break;
@@ -1218,8 +1230,9 @@ static void dss_uninit_ports(struct platform_device *pdev)
}
}
-static int dss_video_pll_probe(struct platform_device *pdev)
+static int dss_video_pll_probe(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *np = pdev->dev.of_node;
struct regulator *pll_regulator;
int r;
@@ -1228,16 +1241,16 @@ static int dss_video_pll_probe(struct platform_device *pdev)
return 0;
if (of_property_read_bool(np, "syscon-pll-ctrl")) {
- dss.syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
+ dss->syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
"syscon-pll-ctrl");
- if (IS_ERR(dss.syscon_pll_ctrl)) {
+ if (IS_ERR(dss->syscon_pll_ctrl)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl regmap\n");
- return PTR_ERR(dss.syscon_pll_ctrl);
+ return PTR_ERR(dss->syscon_pll_ctrl);
}
if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
- &dss.syscon_pll_ctrl_offset)) {
+ &dss->syscon_pll_ctrl_offset)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl offset\n");
return -EINVAL;
@@ -1263,16 +1276,18 @@ static int dss_video_pll_probe(struct platform_device *pdev)
}
if (of_property_match_string(np, "reg-names", "pll1") >= 0) {
- dss.video1_pll = dss_video_pll_init(pdev, 0, pll_regulator);
- if (IS_ERR(dss.video1_pll))
- return PTR_ERR(dss.video1_pll);
+ dss->video1_pll = dss_video_pll_init(dss, pdev, 0,
+ pll_regulator);
+ if (IS_ERR(dss->video1_pll))
+ return PTR_ERR(dss->video1_pll);
}
if (of_property_match_string(np, "reg-names", "pll2") >= 0) {
- dss.video2_pll = dss_video_pll_init(pdev, 1, pll_regulator);
- if (IS_ERR(dss.video2_pll)) {
- dss_video_pll_uninit(dss.video1_pll);
- return PTR_ERR(dss.video2_pll);
+ dss->video2_pll = dss_video_pll_init(dss, pdev, 1,
+ pll_regulator);
+ if (IS_ERR(dss->video2_pll)) {
+ dss_video_pll_uninit(dss->video1_pll);
+ return PTR_ERR(dss->video2_pll);
}
}
@@ -1299,109 +1314,26 @@ static const struct soc_device_attribute dss_soc_devices[] = {
static int dss_bind(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *dss_mem;
- u32 rev;
+ struct dss_device *dss = dev_get_drvdata(dev);
int r;
- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
- if (IS_ERR(dss.base))
- return PTR_ERR(dss.base);
-
- r = dss_get_clocks();
+ r = component_bind_all(dev, NULL);
if (r)
return r;
- r = dss_setup_default_clock();
- if (r)
- goto err_setup_clocks;
-
- r = dss_video_pll_probe(pdev);
- if (r)
- goto err_pll_init;
-
- r = dss_init_ports(pdev);
- if (r)
- goto err_init_ports;
-
- pm_runtime_enable(&pdev->dev);
-
- r = dss_runtime_get();
- if (r)
- goto err_runtime_get;
-
- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
-
- /* Select DPLL */
- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
-
- dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
-
-#ifdef CONFIG_OMAP2_DSS_VENC
- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
-#endif
- dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
-
- rev = dss_read_reg(DSS_REVISION);
- pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- dss_runtime_put();
-
- r = component_bind_all(&pdev->dev, NULL);
- if (r)
- goto err_component;
-
- dss_debugfs_create_file("dss", dss_dump_regs);
-
pm_set_vt_switch(0);
omapdss_gather_components(dev);
- omapdss_set_is_initialized(true);
+ omapdss_set_dss(dss);
return 0;
-
-err_component:
-err_runtime_get:
- pm_runtime_disable(&pdev->dev);
- dss_uninit_ports(pdev);
-err_init_ports:
- if (dss.video1_pll)
- dss_video_pll_uninit(dss.video1_pll);
-
- if (dss.video2_pll)
- dss_video_pll_uninit(dss.video2_pll);
-err_pll_init:
-err_setup_clocks:
- dss_put_clocks();
- return r;
}
static void dss_unbind(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
-
- omapdss_set_is_initialized(false);
-
- component_unbind_all(&pdev->dev, NULL);
-
- if (dss.video1_pll)
- dss_video_pll_uninit(dss.video1_pll);
-
- if (dss.video2_pll)
- dss_video_pll_uninit(dss.video2_pll);
-
- dss_uninit_ports(pdev);
-
- pm_runtime_disable(&pdev->dev);
+ omapdss_set_dss(NULL);
- dss_put_clocks();
+ component_unbind_all(dev, NULL);
}
static const struct component_master_ops dss_component_ops = {
@@ -1433,13 +1365,61 @@ static int dss_add_child_component(struct device *dev, void *data)
return 0;
}
+static int dss_probe_hardware(struct dss_device *dss)
+{
+ u32 rev;
+ int r;
+
+ r = dss_runtime_get(dss);
+ if (r)
+ return r;
+
+ dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
+
+ /* Select DPLL */
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, 0, 0);
+
+ dss_select_dispc_clk_source(dss, DSS_CLK_SRC_FCK);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+ dss->dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss->dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+ dss->dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss->lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss->lcd_clk_source[1] = DSS_CLK_SRC_FCK;
+
+ rev = dss_read_reg(dss, DSS_REVISION);
+ pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put(dss);
+
+ return 0;
+}
+
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
struct component_match *match = NULL;
+ struct resource *dss_mem;
+ struct dss_device *dss;
int r;
- dss.pdev = pdev;
+ dss = kzalloc(sizeof(*dss), GFP_KERNEL);
+ if (!dss)
+ return -ENOMEM;
+
+ dss->pdev = pdev;
+ platform_set_drvdata(pdev, dss);
+
+ r = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (r) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ goto err_free_dss;
+ }
/*
* The various OMAP3-based SoCs can't be told apart using the compatible
@@ -1447,31 +1427,108 @@ static int dss_probe(struct platform_device *pdev)
*/
soc = soc_device_match(dss_soc_devices);
if (soc)
- dss.feat = soc->data;
+ dss->feat = soc->data;
else
- dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+
+ /* Map I/O registers, get and setup clocks. */
+ dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dss->base = devm_ioremap_resource(&pdev->dev, dss_mem);
+ if (IS_ERR(dss->base)) {
+ r = PTR_ERR(dss->base);
+ goto err_free_dss;
+ }
- r = dss_initialize_debugfs();
+ r = dss_get_clocks(dss);
if (r)
- return r;
+ goto err_free_dss;
+
+ r = dss_setup_default_clock(dss);
+ if (r)
+ goto err_put_clocks;
+
+ /* Setup the video PLLs and the DPI and SDI ports. */
+ r = dss_video_pll_probe(dss);
+ if (r)
+ goto err_put_clocks;
+
+ r = dss_init_ports(dss);
+ if (r)
+ goto err_uninit_plls;
+
+ /* Enable runtime PM and probe the hardware. */
+ pm_runtime_enable(&pdev->dev);
+
+ r = dss_probe_hardware(dss);
+ if (r)
+ goto err_pm_runtime_disable;
+
+ /* Initialize debugfs. */
+ r = dss_initialize_debugfs(dss);
+ if (r)
+ goto err_pm_runtime_disable;
+
+ dss->debugfs.clk = dss_debugfs_create_file(dss, "clk",
+ dss_debug_dump_clocks, dss);
+ dss->debugfs.dss = dss_debugfs_create_file(dss, "dss", dss_dump_regs,
+ dss);
- /* add all the child devices as components */
+ /* Add all the child devices as components. */
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
- if (r) {
- dss_uninitialize_debugfs();
- return r;
- }
+ if (r)
+ goto err_uninit_debugfs;
return 0;
+
+err_uninit_debugfs:
+ dss_debugfs_remove_file(dss->debugfs.clk);
+ dss_debugfs_remove_file(dss->debugfs.dss);
+ dss_uninitialize_debugfs(dss);
+
+err_pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ dss_uninit_ports(dss);
+
+err_uninit_plls:
+ if (dss->video1_pll)
+ dss_video_pll_uninit(dss->video1_pll);
+ if (dss->video2_pll)
+ dss_video_pll_uninit(dss->video2_pll);
+
+err_put_clocks:
+ dss_put_clocks(dss);
+
+err_free_dss:
+ kfree(dss);
+
+ return r;
}
static int dss_remove(struct platform_device *pdev)
{
+ struct dss_device *dss = platform_get_drvdata(pdev);
+
component_master_del(&pdev->dev, &dss_component_ops);
- dss_uninitialize_debugfs();
+ dss_debugfs_remove_file(dss->debugfs.clk);
+ dss_debugfs_remove_file(dss->debugfs.dss);
+ dss_uninitialize_debugfs(dss);
+
+ pm_runtime_disable(&pdev->dev);
+
+ dss_uninit_ports(dss);
+
+ if (dss->video1_pll)
+ dss_video_pll_uninit(dss->video1_pll);
+
+ if (dss->video2_pll)
+ dss_video_pll_uninit(dss->video2_pll);
+
+ dss_put_clocks(dss);
+
+ kfree(dss);
return 0;
}
@@ -1493,7 +1550,9 @@ static void dss_shutdown(struct platform_device *pdev)
static int dss_runtime_suspend(struct device *dev)
{
- dss_save_context();
+ struct dss_device *dss = dev_get_drvdata(dev);
+
+ dss_save_context(dss);
dss_set_min_bus_tput(dev, 0);
pinctrl_pm_select_sleep_state(dev);
@@ -1503,6 +1562,7 @@ static int dss_runtime_suspend(struct device *dev)
static int dss_runtime_resume(struct device *dev)
{
+ struct dss_device *dss = dev_get_drvdata(dev);
int r;
pinctrl_pm_select_default_state(dev);
@@ -1518,7 +1578,7 @@ static int dss_runtime_resume(struct device *dev)
if (r)
return r;
- dss_restore_context();
+ dss_restore_context(dss);
return 0;
}
@@ -1527,7 +1587,7 @@ static const struct dev_pm_ops dss_pm_ops = {
.runtime_resume = dss_runtime_resume,
};
-static struct platform_driver omap_dsshw_driver = {
+struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
.remove = dss_remove,
.shutdown = dss_shutdown,
@@ -1538,13 +1598,3 @@ static struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dss_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dsshw_driver);
-}
-
-void dss_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dsshw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index ed46557..847c78a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dss.h
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -27,6 +25,11 @@
#include "omapdss.h"
+struct dispc_device;
+struct dss_debugfs_entry;
+struct platform_device;
+struct seq_file;
+
#define MAX_DSS_LCD_MANAGERS 3
#define MAX_NUM_DSI 2
@@ -99,17 +102,6 @@ enum dss_dsi_content_type {
DSS_DSI_CONTENT_GENERIC,
};
-enum dss_writeback_channel {
- DSS_WB_LCD1_MGR = 0,
- DSS_WB_LCD2_MGR = 1,
- DSS_WB_TV_MGR = 2,
- DSS_WB_OVL0 = 3,
- DSS_WB_OVL1 = 4,
- DSS_WB_OVL2 = 5,
- DSS_WB_OVL3 = 6,
- DSS_WB_LCD3_MGR = 7,
-};
-
enum dss_clk_source {
DSS_CLK_SRC_FCK = 0,
@@ -169,10 +161,10 @@ struct dss_pll_ops {
struct dss_pll_hw {
enum dss_pll_type type;
- unsigned n_max;
- unsigned m_min;
- unsigned m_max;
- unsigned mX_max;
+ unsigned int n_max;
+ unsigned int m_min;
+ unsigned int m_max;
+ unsigned int mX_max;
unsigned long fint_min, fint_max;
unsigned long clkdco_min, clkdco_low, clkdco_max;
@@ -193,6 +185,7 @@ struct dss_pll_hw {
struct dss_pll {
const char *name;
enum dss_pll_id id;
+ struct dss_device *dss;
struct clk *clkin;
struct regulator *regulator;
@@ -234,8 +227,44 @@ struct dss_lcd_mgr_config {
int lcden_sig_polarity;
};
-struct seq_file;
-struct platform_device;
+#define DSS_SZ_REGS SZ_512
+
+struct dss_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct regmap *syscon_pll_ctrl;
+ u32 syscon_pll_ctrl_offset;
+
+ struct clk *parent_clk;
+ struct clk *dss_clk;
+ unsigned long dss_clk_rate;
+
+ unsigned long cache_req_pck;
+ unsigned long cache_prate;
+ struct dispc_clock_info cache_dispc_cinfo;
+
+ enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
+ enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+
+ bool ctx_valid;
+ u32 ctx[DSS_SZ_REGS / sizeof(u32)];
+
+ const struct dss_features *feat;
+
+ struct {
+ struct dentry *root;
+ struct dss_debugfs_entry *clk;
+ struct dss_debugfs_entry *dss;
+ } debugfs;
+
+ struct dss_pll *plls[4];
+ struct dss_pll *video1_pll;
+ struct dss_pll *video2_pll;
+
+ struct dispc_device *dispc;
+ const struct dispc_ops *dispc_ops;
+};
/* core */
static inline int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
@@ -255,65 +284,81 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
/* DSS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data);
+void dss_debugfs_remove_file(struct dss_debugfs_entry *entry);
#else
-static inline int dss_debugfs_create_file(const char *name,
- void (*write)(struct seq_file *))
+static inline struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data)
+{
+ return NULL;
+}
+
+static inline void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
- return 0;
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
-int dss_init_platform_driver(void) __init;
-void dss_uninit_platform_driver(void);
+struct dss_device *dss_get_device(struct device *dev);
-int dss_runtime_get(void);
-void dss_runtime_put(void);
+int dss_runtime_get(struct dss_device *dss);
+void dss_runtime_put(struct dss_device *dss);
-unsigned long dss_get_dispc_clk_rate(void);
-unsigned long dss_get_max_fck_rate(void);
-enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel);
-int dss_dpi_select_source(int port, enum omap_channel channel);
-void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
+unsigned long dss_get_dispc_clk_rate(struct dss_device *dss);
+unsigned long dss_get_max_fck_rate(struct dss_device *dss);
+enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
+ enum omap_channel channel);
+int dss_dpi_select_source(struct dss_device *dss, int port,
+ enum omap_channel channel);
+void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
+ enum dss_hdmi_venc_clk_source_select src);
const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
-void dss_dump_clocks(struct seq_file *s);
/* DSS VIDEO PLL */
-struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
- struct regulator *regulator);
+struct dss_pll *dss_video_pll_init(struct dss_device *dss,
+ struct platform_device *pdev, int id,
+ struct regulator *regulator);
void dss_video_pll_uninit(struct dss_pll *pll);
-void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
+void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable);
-void dss_sdi_init(int datapairs);
-int dss_sdi_enable(void);
-void dss_sdi_disable(void);
+void dss_sdi_init(struct dss_device *dss, int datapairs);
+int dss_sdi_enable(struct dss_device *dss);
+void dss_sdi_disable(struct dss_device *dss);
-void dss_select_dsi_clk_source(int dsi_module,
- enum dss_clk_source clk_src);
-void dss_select_lcd_clk_source(enum omap_channel channel,
- enum dss_clk_source clk_src);
-enum dss_clk_source dss_get_dispc_clk_source(void);
-enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
-enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
+void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
+ enum dss_clk_source clk_src);
+void dss_select_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss);
+enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
+ int dsi_module);
+enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel);
-void dss_set_venc_output(enum omap_dss_venc_type type);
-void dss_set_dac_pwrdn_bgz(bool enable);
+void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type);
+void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable);
-int dss_set_fck_rate(unsigned long rate);
+int dss_set_fck_rate(struct dss_device *dss, unsigned long rate);
typedef bool (*dss_div_calc_func)(unsigned long fck, void *data);
-bool dss_div_calc(unsigned long pck, unsigned long fck_min,
- dss_div_calc_func func, void *data);
+bool dss_div_calc(struct dss_device *dss, unsigned long pck,
+ unsigned long fck_min, dss_div_calc_func func, void *data);
/* SDI */
#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init_port(struct platform_device *pdev, struct device_node *port);
+int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port);
void sdi_uninit_port(struct device_node *port);
#else
-static inline int sdi_init_port(struct platform_device *pdev,
- struct device_node *port)
+static inline int sdi_init_port(struct dss_device *dss,
+ struct platform_device *pdev,
+ struct device_node *port)
{
return 0;
}
@@ -326,12 +371,6 @@ static inline void sdi_uninit_port(struct device_node *port)
#ifdef CONFIG_OMAP2_DSS_DSI
-struct dentry;
-struct file_operations;
-
-int dsi_init_platform_driver(void) __init;
-void dsi_uninit_platform_driver(void);
-
void dsi_dump_clocks(struct seq_file *s);
void dsi_irq_handler(void);
@@ -340,12 +379,14 @@ void dsi_irq_handler(void);
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init_port(struct platform_device *pdev, struct device_node *port,
- enum dss_model dss_model);
+int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port, enum dss_model dss_model);
void dpi_uninit_port(struct device_node *port);
#else
-static inline int dpi_init_port(struct platform_device *pdev,
- struct device_node *port, enum dss_model dss_model)
+static inline int dpi_init_port(struct dss_device *dss,
+ struct platform_device *pdev,
+ struct device_node *port,
+ enum dss_model dss_model)
{
return 0;
}
@@ -355,65 +396,49 @@ static inline void dpi_uninit_port(struct device_node *port)
#endif
/* DISPC */
-int dispc_init_platform_driver(void) __init;
-void dispc_uninit_platform_driver(void);
-void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
+int dispc_runtime_get(struct dispc_device *dispc);
+void dispc_runtime_put(struct dispc_device *dispc);
-void dispc_enable_sidle(void);
-void dispc_disable_sidle(void);
+void dispc_enable_sidle(struct dispc_device *dispc);
+void dispc_disable_sidle(struct dispc_device *dispc);
-void dispc_lcd_enable_signal(bool enable);
-void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifomerge(bool enable);
-void dispc_enable_gamma_table(bool enable);
+void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable);
+void dispc_pck_free_enable(struct dispc_device *dispc, bool enable);
+void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable);
typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data);
-bool dispc_div_calc(unsigned long dispc,
- unsigned long pck_min, unsigned long pck_max,
- dispc_div_calc_func func, void *data);
-
-bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
- struct dispc_clock_info *cinfo);
-
-
-void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
- u32 high);
-void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
- bool manual_update);
-
-void dispc_mgr_set_clock_div(enum omap_channel channel,
- const struct dispc_clock_info *cinfo);
-int dispc_mgr_get_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo);
-void dispc_set_tv_pclk(unsigned long pclk);
-
-u32 dispc_wb_get_framedone_irq(void);
-bool dispc_wb_go_busy(void);
-void dispc_wb_go(void);
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm);
-
-/* VENC */
-int venc_init_platform_driver(void) __init;
-void venc_uninit_platform_driver(void);
-
-/* HDMI */
-int hdmi4_init_platform_driver(void) __init;
-void hdmi4_uninit_platform_driver(void);
-
-int hdmi5_init_platform_driver(void) __init;
-void hdmi5_uninit_platform_driver(void);
-
+bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data);
+
+bool dispc_mgr_timings_ok(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+int dispc_calc_clock_rates(struct dispc_device *dispc,
+ unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo);
+
+
+void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 low, u32 high);
+void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 *fifo_low, u32 *fifo_high,
+ bool use_fifomerge, bool manual_update);
+
+void dispc_mgr_set_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dispc_clock_info *cinfo);
+int dispc_mgr_get_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ struct dispc_clock_info *cinfo);
+void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
+static inline void dss_collect_irq_stats(u32 irqstatus, unsigned int *irq_arr)
{
int b;
for (b = 0; b < 32; ++b) {
@@ -429,11 +454,12 @@ typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint,
typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
void *data);
-int dss_pll_register(struct dss_pll *pll);
+int dss_pll_register(struct dss_device *dss, struct dss_pll *pll);
void dss_pll_unregister(struct dss_pll *pll);
-struct dss_pll *dss_pll_find(const char *name);
-struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
-unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
+struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name);
+struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
+ enum dss_clk_source src);
+unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
int dss_pll_enable(struct dss_pll *pll);
void dss_pll_disable(struct dss_pll *pll);
int dss_pll_set_config(struct dss_pll *pll,
@@ -455,4 +481,19 @@ int dss_pll_write_config_type_b(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_wait_reset_done(struct dss_pll *pll);
+extern struct platform_driver omap_dsshw_driver;
+extern struct platform_driver omap_dispchw_driver;
+#ifdef CONFIG_OMAP2_DSS_DSI
+extern struct platform_driver omap_dsihw_driver;
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+extern struct platform_driver omap_venchw_driver;
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+extern struct platform_driver omapdss_hdmi4hw_driver;
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+extern struct platform_driver omapdss_hdmi5hw_driver;
+#endif
+
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c2609c44..3aeb4ca 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -29,6 +29,8 @@
#include "omapdss.h"
#include "dss.h"
+struct dss_device;
+
/* HDMI Wrapper */
#define HDMI_WP_REVISION 0x0
@@ -324,8 +326,8 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
- struct hdmi_wp_data *wp);
+int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
+ struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
/* HDMI PHY funcs */
@@ -357,6 +359,9 @@ static inline bool hdmi_mode_has_audio(struct hdmi_config *cfg)
struct omap_hdmi {
struct mutex lock;
struct platform_device *pdev;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
struct hdmi_wp_data wp;
struct hdmi_pll_data pll;
@@ -384,4 +389,6 @@ struct omap_hdmi {
bool display_enabled;
};
+#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a598dfd..97c8886 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -1,5 +1,6 @@
/*
* HDMI interface DSS driver for TI's OMAP4 family of SoCs.
+ *
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <mythripk@ti.com>
@@ -44,15 +45,13 @@
#include "dss.h"
#include "hdmi.h"
-static struct omap_hdmi hdmi;
-
-static int hdmi_runtime_get(void)
+static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ r = pm_runtime_get_sync(&hdmi->pdev->dev);
WARN_ON(r < 0);
if (r < 0)
return r;
@@ -60,13 +59,13 @@ static int hdmi_runtime_get(void)
return 0;
}
-static void hdmi_runtime_put(void)
+static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
@@ -109,14 +108,14 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(void)
+static int hdmi_init_regulator(struct omap_hdmi *hdmi)
{
struct regulator *reg;
- if (hdmi.vdda_reg != NULL)
+ if (hdmi->vdda_reg != NULL)
return 0;
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
+ reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
if (IS_ERR(reg)) {
if (PTR_ERR(reg) != -EPROBE_DEFER)
@@ -124,64 +123,63 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- hdmi.vdda_reg = reg;
+ hdmi->vdda_reg = reg;
return 0;
}
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
- if (hdmi.core.core_pwr_cnt++)
+ if (hdmi->core.core_pwr_cnt++)
return 0;
- r = regulator_enable(hdmi.vdda_reg);
+ r = regulator_enable(hdmi->vdda_reg);
if (r)
goto err_reg_enable;
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
- hdmi4_core_powerdown_disable(&hdmi.core);
+ hdmi4_core_powerdown_disable(&hdmi->core);
/* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+ dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
- hdmi.core_enabled = true;
+ hdmi->core_enabled = true;
return 0;
err_runtime_get:
- regulator_disable(hdmi.vdda_reg);
+ regulator_disable(hdmi->vdda_reg);
err_reg_enable:
- hdmi.core.core_pwr_cnt--;
+ hdmi->core.core_pwr_cnt--;
return r;
}
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
- if (--hdmi.core.core_pwr_cnt)
+ if (--hdmi->core.core_pwr_cnt)
return;
- hdmi.core_enabled = false;
+ hdmi->core_enabled = false;
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_reg);
+ hdmi_runtime_put(hdmi);
+ regulator_disable(hdmi->vdda_reg);
}
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
struct videomode *vm;
- enum omap_channel channel = dssdev->dispc_channel;
- struct hdmi_wp_data *wp = &hdmi.wp;
+ struct hdmi_wp_data *wp = &hdmi->wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
- unsigned pc;
+ unsigned int pc;
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r)
return r;
@@ -189,7 +187,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
- vm = &hdmi.cfg.vm;
+ vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
@@ -201,22 +199,22 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
- dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
- r = dss_pll_enable(&hdmi.pll.pll);
+ r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
- r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to configure PHY\n");
@@ -227,16 +225,16 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
goto err_phy_pwr;
- hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+ hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
/* tv size */
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&hdmi->output, vm);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
@@ -246,39 +244,39 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
return 0;
err_vid_enable:
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
err_mgr_enable:
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
return -EIO;
}
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ hdmi_wp_clear_irqenable(&hdmi->wp, ~HDMI_IRQ_CORE);
- hdmi_wp_clear_irqenable(&hdmi.wp, ~HDMI_IRQ_CORE);
+ hdmi_wp_video_stop(&hdmi->wp);
- hdmi_wp_video_stop(&hdmi.wp);
+ dss_mgr_disable(&hdmi->output);
- dss_mgr_disable(channel);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ dss_pll_disable(&hdmi->pll.pll);
- dss_pll_disable(&hdmi.pll.pll);
-
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
@@ -287,52 +285,59 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ mutex_lock(&hdmi->lock);
- hdmi.cfg.vm = *vm;
+ hdmi->cfg.vm = *vm;
- dispc_set_tv_pclk(vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = hdmi.cfg.vm;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ *vm = hdmi->cfg.vm;
}
-static void hdmi_dump_regs(struct seq_file *s)
+static int hdmi_dump_regs(struct seq_file *s, void *p)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = s->private;
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
+ mutex_lock(&hdmi->lock);
+
+ if (hdmi_runtime_get(hdmi)) {
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
- hdmi_wp_dump(&hdmi.wp, s);
- hdmi_pll_dump(&hdmi.pll, s);
- hdmi_phy_dump(&hdmi.phy, s);
- hdmi4_core_dump(&hdmi.core, s);
+ hdmi_wp_dump(&hdmi->wp, s);
+ hdmi_pll_dump(&hdmi->pll, s);
+ hdmi_phy_dump(&hdmi->phy, s);
+ hdmi4_core_dump(&hdmi->core, s);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
-static int read_edid(u8 *buf, int len)
+static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
{
int r;
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
BUG_ON(r);
- r = hdmi4_read_edid(&hdmi.core, buf, len);
+ r = hdmi4_read_edid(&hdmi->core, buf, len);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
return r;
}
@@ -351,112 +356,117 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
static int hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = hdmi_power_on_full(dssdev);
+ r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- if (hdmi.audio_configured) {
- r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.vm.pixelclock);
+ if (hdmi->audio_configured) {
+ r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
- hdmi.audio_abort_cb(&hdmi.pdev->dev);
- hdmi.audio_configured = false;
+ hdmi->audio_abort_cb(&hdmi->pdev->dev);
+ hdmi->audio_configured = false;
}
}
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- if (hdmi.audio_configured && hdmi.audio_playing)
- hdmi_start_audio_stream(&hdmi);
- hdmi.display_enabled = true;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ if (hdmi->audio_configured && hdmi->audio_playing)
+ hdmi_start_audio_stream(hdmi);
+ hdmi->display_enabled = true;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
DSSDBG("Enter hdmi_display_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- hdmi_stop_audio_stream(&hdmi);
- hdmi.display_enabled = false;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ hdmi_stop_audio_stream(hdmi);
+ hdmi->display_enabled = false;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- hdmi_power_off_full(dssdev);
+ hdmi_power_off_full(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct omap_dss_device *dssdev)
+int hdmi4_core_enable(struct hdmi_core_data *core)
{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
int r = 0;
DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
-void hdmi4_core_disable(struct omap_dss_device *dssdev)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
DSSDBG("Enter omapdss_hdmi4_core_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator();
+ r = hdmi_init_regulator(hdmi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&hdmi->output, dssdev);
if (r)
return r;
@@ -464,7 +474,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
return r;
}
@@ -474,7 +484,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -483,51 +493,58 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
bool need_enable;
int r;
- need_enable = hdmi.core_enabled == false;
+ need_enable = hdmi->core_enabled == false;
if (need_enable) {
- r = hdmi4_core_enable(dssdev);
+ r = hdmi4_core_enable(&hdmi->core);
if (r)
return r;
}
- r = read_edid(edid, len);
+ r = read_edid(hdmi, edid, len);
if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi.core,
+ hdmi4_cec_set_phys_addr(&hdmi->core,
cec_get_edid_phys_addr(edid, r, NULL));
else
- hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
if (need_enable)
- hdmi4_core_disable(dssdev);
+ hdmi4_core_disable(&hdmi->core);
return r;
}
static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
{
- hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
- hdmi.cfg.infoframe = *avi;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.infoframe = *avi;
return 0;
}
static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
bool hdmi_mode)
{
- hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
return 0;
}
@@ -548,11 +565,11 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
-static void hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
- out->dev = &pdev->dev;
+ out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->output_type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
@@ -563,15 +580,16 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
omapdss_unregister_output(out);
}
-static int hdmi_probe_of(struct platform_device *pdev)
+static int hdmi_probe_of(struct omap_hdmi *hdmi)
{
+ struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
@@ -580,7 +598,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy);
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
if (r)
goto err;
@@ -597,21 +615,16 @@ static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret = 0;
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
- }
+ WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
-out:
mutex_unlock(&hd->lock);
- return ret;
+ return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
@@ -632,12 +645,14 @@ static int hdmi_audio_start(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
-
spin_lock_irqsave(&hd->audio_playing_lock, flags);
- if (hd->display_enabled)
+ if (hd->display_enabled) {
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n",
+ __func__);
hdmi_start_audio_stream(hd);
+ }
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
@@ -668,17 +683,15 @@ static int hdmi_audio_config(struct device *dev,
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
+ if (hd->display_enabled) {
+ ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.vm.pixelclock);
+ if (ret)
+ goto out;
}
- ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.vm.pixelclock);
- if (!ret) {
- hd->audio_configured = true;
- hd->audio_config = *dss_audio;
- }
+ hd->audio_configured = true;
+ hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
@@ -693,21 +706,21 @@ static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_config = hdmi_audio_config,
};
-static int hdmi_audio_register(struct device *dev)
+static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
- .dev = dev,
+ .dev = &hdmi->pdev->dev,
.version = 4,
- .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
- hdmi.audio_pdev = platform_device_register_data(
- dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ hdmi->audio_pdev = platform_device_register_data(
+ &hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
- if (IS_ERR(hdmi.audio_pdev))
- return PTR_ERR(hdmi.audio_pdev);
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
return 0;
}
@@ -716,88 +729,103 @@ static int hdmi_audio_register(struct device *dev)
static int hdmi4_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct omap_hdmi *hdmi;
int r;
int irq;
- hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->pdev = pdev;
+ hdmi->dss = dss;
+ dev_set_drvdata(&pdev->dev, hdmi);
- mutex_init(&hdmi.lock);
- spin_lock_init(&hdmi.audio_playing_lock);
+ mutex_init(&hdmi->lock);
+ spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(pdev);
+ r = hdmi_probe_of(hdmi);
if (r)
- return r;
+ goto err_free;
- r = hdmi_wp_init(pdev, &hdmi.wp, 4);
+ r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- return r;
+ goto err_free;
- r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
+ r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
if (r)
- return r;
+ goto err_free;
- r = hdmi_phy_init(pdev, &hdmi.phy, 4);
+ r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi4_core_init(pdev, &hdmi.core);
+ r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi4_cec_init(pdev, &hdmi.core, &hdmi.wp);
+ r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp);
if (r)
- goto err;
+ goto err_pll;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err;
+ goto err_pll;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi);
+ IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err;
+ goto err_pll;
}
pm_runtime_enable(&pdev->dev);
- hdmi_init_output(pdev);
+ hdmi_init_output(hdmi);
- r = hdmi_audio_register(&pdev->dev);
+ r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed\n");
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
return r;
}
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
return 0;
-err:
- hdmi_pll_uninit(&hdmi.pll);
+
+err_pll:
+ hdmi_pll_uninit(&hdmi->pll);
+err_free:
+ kfree(hdmi);
return r;
}
static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
- if (hdmi.audio_pdev)
- platform_device_unregister(hdmi.audio_pdev);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
- hdmi4_cec_uninit(&hdmi.core);
+ hdmi4_cec_uninit(&hdmi->core);
- hdmi_pll_uninit(&hdmi.pll);
+ hdmi_pll_uninit(&hdmi->pll);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+
+ kfree(hdmi);
}
static const struct component_ops hdmi4_component_ops = {
@@ -818,16 +846,19 @@ static int hdmi4_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- dispc_runtime_put();
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
@@ -844,7 +875,7 @@ static const struct of_device_id hdmi_of_match[] = {
{},
};
-static struct platform_driver omapdss_hdmihw_driver = {
+struct platform_driver omapdss_hdmi4hw_driver = {
.probe = hdmi4_probe,
.remove = hdmi4_remove,
.driver = {
@@ -854,13 +885,3 @@ static struct platform_driver omapdss_hdmihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init hdmi4_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void hdmi4_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 23db74a..3403831 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,10 +175,10 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
- hdmi4_core_disable(NULL);
+ hdmi4_core_disable(core);
return 0;
}
- err = hdmi4_core_enable(NULL);
+ err = hdmi4_core_enable(core);
if (err)
return err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index b06f995..35ed2ad 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -1,7 +1,6 @@
/*
- * ti_hdmi_4xxx_ip.c
- *
* HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
+ *
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <mythripk@ti.com>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index b6ab579..337a317 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -266,8 +266,8 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
-int hdmi4_core_enable(struct omap_dss_device *dssdev);
-void hdmi4_core_disable(struct omap_dss_device *dssdev);
+int hdmi4_core_enable(struct hdmi_core_data *core);
+void hdmi4_core_disable(struct hdmi_core_data *core);
void hdmi4_core_powerdown_disable(struct hdmi_core_data *core);
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b3221ca..d28da9a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -1,7 +1,7 @@
/*
* HDMI driver for OMAP5
*
- * Copyright (C) 2014 Texas Instruments Incorporated
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
*
* Authors:
* Yong Zhi
@@ -46,15 +46,13 @@
#include "hdmi5_core.h"
#include "dss.h"
-static struct omap_hdmi hdmi;
-
-static int hdmi_runtime_get(void)
+static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ r = pm_runtime_get_sync(&hdmi->pdev->dev);
WARN_ON(r < 0);
if (r < 0)
return r;
@@ -62,19 +60,20 @@ static int hdmi_runtime_get(void)
return 0;
}
-static void hdmi_runtime_put(void)
+static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
- struct hdmi_wp_data *wp = data;
+ struct omap_hdmi *hdmi = data;
+ struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
@@ -97,17 +96,17 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
* setting the PHY to LDOON. To ignore those, we force the RXDET
* line to 0 until the PHY power state has been changed.
*/
- v = hdmi_read_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL);
+ v = hdmi_read_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL);
v = FLD_MOD(v, 1, 15, 15); /* FORCE_RXDET_HIGH */
v = FLD_MOD(v, 0, 14, 7); /* RXDET_LINE */
- hdmi_write_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v);
+ hdmi_write_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v);
hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
HDMI_IRQ_LINK_DISCONNECT);
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
- REG_FLD_MOD(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15);
+ REG_FLD_MOD(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15);
} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
@@ -118,70 +117,69 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(void)
+static int hdmi_init_regulator(struct omap_hdmi *hdmi)
{
struct regulator *reg;
- if (hdmi.vdda_reg != NULL)
+ if (hdmi->vdda_reg != NULL)
return 0;
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
+ reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
if (IS_ERR(reg)) {
DSSERR("can't get VDDA regulator\n");
return PTR_ERR(reg);
}
- hdmi.vdda_reg = reg;
+ hdmi->vdda_reg = reg;
return 0;
}
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
- r = regulator_enable(hdmi.vdda_reg);
+ r = regulator_enable(hdmi->vdda_reg);
if (r)
return r;
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
/* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+ dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
- hdmi.core_enabled = true;
+ hdmi->core_enabled = true;
return 0;
err_runtime_get:
- regulator_disable(hdmi.vdda_reg);
+ regulator_disable(hdmi->vdda_reg);
return r;
}
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
- hdmi.core_enabled = false;
+ hdmi->core_enabled = false;
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_reg);
+ hdmi_runtime_put(hdmi);
+ regulator_disable(hdmi->vdda_reg);
}
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
struct videomode *vm;
- enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
- unsigned pc;
+ unsigned int pc;
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r)
return r;
- vm = &hdmi.cfg.vm;
+ vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
@@ -193,89 +191,89 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
- dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
/* disable and clear irqs */
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
- hdmi_wp_set_irqstatus(&hdmi.wp,
- hdmi_wp_get_irqstatus(&hdmi.wp));
+ hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
+ hdmi_wp_set_irqstatus(&hdmi->wp,
+ hdmi_wp_get_irqstatus(&hdmi->wp));
- r = dss_pll_enable(&hdmi.pll.pll);
+ r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
- r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err_phy_cfg;
}
- r = hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_LDOON);
+ r = hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_LDOON);
if (r)
goto err_phy_pwr;
- hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+ hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
/* tv size */
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&hdmi->output, vm);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
- hdmi_wp_set_irqenable(&hdmi.wp,
+ hdmi_wp_set_irqenable(&hdmi->wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
err_vid_enable:
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
err_mgr_enable:
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
return -EIO;
}
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
- enum omap_channel channel = dssdev->dispc_channel;
-
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
- hdmi_wp_video_stop(&hdmi.wp);
+ hdmi_wp_video_stop(&hdmi->wp);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
@@ -284,66 +282,73 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- hdmi.cfg.vm = *vm;
+ mutex_lock(&hdmi->lock);
- dispc_set_tv_pclk(vm->pixelclock);
+ hdmi->cfg.vm = *vm;
- mutex_unlock(&hdmi.lock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+
+ mutex_unlock(&hdmi->lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = hdmi.cfg.vm;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ *vm = hdmi->cfg.vm;
}
-static void hdmi_dump_regs(struct seq_file *s)
+static int hdmi_dump_regs(struct seq_file *s, void *p)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = s->private;
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
+ mutex_lock(&hdmi->lock);
+
+ if (hdmi_runtime_get(hdmi)) {
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
- hdmi_wp_dump(&hdmi.wp, s);
- hdmi_pll_dump(&hdmi.pll, s);
- hdmi_phy_dump(&hdmi.phy, s);
- hdmi5_core_dump(&hdmi.core, s);
+ hdmi_wp_dump(&hdmi->wp, s);
+ hdmi_pll_dump(&hdmi->pll, s);
+ hdmi_phy_dump(&hdmi->phy, s);
+ hdmi5_core_dump(&hdmi->core, s);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
-static int read_edid(u8 *buf, int len)
+static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
{
int r;
int idlemode;
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
BUG_ON(r);
- idlemode = REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
/* No-idle mode */
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- r = hdmi5_read_edid(&hdmi.core, buf, len);
+ r = hdmi5_read_edid(&hdmi->core, buf, len);
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
hdmi_wp_audio_enable(&hd->wp, true);
hdmi_wp_audio_core_req_enable(&hd->wp, true);
}
@@ -357,112 +362,114 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
static int hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = hdmi_power_on_full(dssdev);
+ r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- if (hdmi.audio_configured) {
- r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.vm.pixelclock);
+ if (hdmi->audio_configured) {
+ r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
- hdmi.audio_abort_cb(&hdmi.pdev->dev);
- hdmi.audio_configured = false;
+ hdmi->audio_abort_cb(&hdmi->pdev->dev);
+ hdmi->audio_configured = false;
}
}
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- if (hdmi.audio_configured && hdmi.audio_playing)
- hdmi_start_audio_stream(&hdmi);
- hdmi.display_enabled = true;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ if (hdmi->audio_configured && hdmi->audio_playing)
+ hdmi_start_audio_stream(hdmi);
+ hdmi->display_enabled = true;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
DSSDBG("Enter hdmi_display_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- hdmi_stop_audio_stream(&hdmi);
- hdmi.display_enabled = false;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ hdmi_stop_audio_stream(hdmi);
+ hdmi->display_enabled = false;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- hdmi_power_off_full(dssdev);
+ hdmi_power_off_full(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
{
int r = 0;
DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
DSSDBG("Enter omapdss_hdmi_core_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator();
+ r = hdmi_init_regulator(hdmi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&hdmi->output, dssdev);
if (r)
return r;
@@ -470,7 +477,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
return r;
}
@@ -480,7 +487,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -489,27 +496,28 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
bool need_enable;
int r;
- need_enable = hdmi.core_enabled == false;
+ need_enable = hdmi->core_enabled == false;
if (need_enable) {
- r = hdmi_core_enable(dssdev);
+ r = hdmi_core_enable(hdmi);
if (r)
return r;
}
- r = read_edid(edid, len);
+ r = read_edid(hdmi, edid, len);
if (need_enable)
- hdmi_core_disable(dssdev);
+ hdmi_core_disable(hdmi);
return r;
}
@@ -517,14 +525,18 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
- hdmi.cfg.infoframe = *avi;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.infoframe = *avi;
return 0;
}
static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
bool hdmi_mode)
{
- hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
return 0;
}
@@ -544,11 +556,11 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
-static void hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
- out->dev = &pdev->dev;
+ out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->output_type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
@@ -559,15 +571,16 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
omapdss_unregister_output(out);
}
-static int hdmi_probe_of(struct platform_device *pdev)
+static int hdmi_probe_of(struct omap_hdmi *hdmi)
{
+ struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
@@ -576,7 +589,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy);
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
if (r)
goto err;
@@ -593,21 +606,16 @@ static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret = 0;
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
- }
+ WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
-out:
mutex_unlock(&hd->lock);
- return ret;
+ return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
@@ -628,12 +636,14 @@ static int hdmi_audio_start(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
-
spin_lock_irqsave(&hd->audio_playing_lock, flags);
- if (hd->display_enabled)
+ if (hd->display_enabled) {
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n",
+ __func__);
hdmi_start_audio_stream(hd);
+ }
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
@@ -645,7 +655,8 @@ static void hdmi_audio_stop(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n", __func__);
spin_lock_irqsave(&hd->audio_playing_lock, flags);
@@ -664,18 +675,15 @@ static int hdmi_audio_config(struct device *dev,
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
+ if (hd->display_enabled) {
+ ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.vm.pixelclock);
+ if (ret)
+ goto out;
}
- ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.vm.pixelclock);
-
- if (!ret) {
- hd->audio_configured = true;
- hd->audio_config = *dss_audio;
- }
+ hd->audio_configured = true;
+ hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
@@ -690,26 +698,26 @@ static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_config = hdmi_audio_config,
};
-static int hdmi_audio_register(struct device *dev)
+static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
- .dev = dev,
+ .dev = &hdmi->pdev->dev,
.version = 5,
- .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
- hdmi.audio_pdev = platform_device_register_data(
- dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ hdmi->audio_pdev = platform_device_register_data(
+ &hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
- if (IS_ERR(hdmi.audio_pdev))
- return PTR_ERR(hdmi.audio_pdev);
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
- hdmi_runtime_get();
- hdmi.wp_idlemode =
- REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- hdmi_runtime_put();
+ hdmi_runtime_get(hdmi);
+ hdmi->wp_idlemode =
+ REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ hdmi_runtime_put(hdmi);
return 0;
}
@@ -718,82 +726,97 @@ static int hdmi_audio_register(struct device *dev)
static int hdmi5_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct omap_hdmi *hdmi;
int r;
int irq;
- hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
- mutex_init(&hdmi.lock);
- spin_lock_init(&hdmi.audio_playing_lock);
+ hdmi->pdev = pdev;
+ hdmi->dss = dss;
+ dev_set_drvdata(&pdev->dev, hdmi);
- r = hdmi_probe_of(pdev);
+ mutex_init(&hdmi->lock);
+ spin_lock_init(&hdmi->audio_playing_lock);
+
+ r = hdmi_probe_of(hdmi);
if (r)
- return r;
+ goto err_free;
- r = hdmi_wp_init(pdev, &hdmi.wp, 5);
+ r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- return r;
+ goto err_free;
- r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
+ r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
if (r)
- return r;
+ goto err_free;
- r = hdmi_phy_init(pdev, &hdmi.phy, 5);
+ r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi5_core_init(pdev, &hdmi.core);
+ r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err;
+ goto err_pll;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err;
+ goto err_pll;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
+ IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err;
+ goto err_pll;
}
pm_runtime_enable(&pdev->dev);
- hdmi_init_output(pdev);
+ hdmi_init_output(hdmi);
- r = hdmi_audio_register(&pdev->dev);
+ r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed %d\n", r);
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
return r;
}
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
return 0;
-err:
- hdmi_pll_uninit(&hdmi.pll);
+
+err_pll:
+ hdmi_pll_uninit(&hdmi->pll);
+err_free:
+ kfree(hdmi);
return r;
}
static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
- if (hdmi.audio_pdev)
- platform_device_unregister(hdmi.audio_pdev);
+ hdmi_uninit_output(hdmi);
- hdmi_uninit_output(pdev);
+ hdmi_pll_uninit(&hdmi->pll);
- hdmi_pll_uninit(&hdmi.pll);
+ pm_runtime_disable(dev);
- pm_runtime_disable(&pdev->dev);
+ kfree(hdmi);
}
static const struct component_ops hdmi5_component_ops = {
@@ -814,16 +837,19 @@ static int hdmi5_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- dispc_runtime_put();
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
@@ -841,7 +867,7 @@ static const struct of_device_id hdmi_of_match[] = {
{},
};
-static struct platform_driver omapdss_hdmihw_driver = {
+struct platform_driver omapdss_hdmi5hw_driver = {
.probe = hdmi5_probe,
.remove = hdmi5_remove,
.driver = {
@@ -851,13 +877,3 @@ static struct platform_driver omapdss_hdmihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init hdmi5_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void hdmi5_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ab179ec..2282e48 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -1,8 +1,7 @@
/*
* OMAP5 HDMI CORE IP driver library
*
- * Copyright (C) 2014 Texas Instruments Incorporated
- *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Authors:
* Yong Zhi
* Mythri pk
@@ -51,14 +50,14 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4600; /* ns */
- const unsigned ss_scl_low = 5400; /* ns */
- const unsigned fs_scl_high = 600; /* ns */
- const unsigned fs_scl_low = 1300; /* ns */
- const unsigned sda_hold = 1000; /* ns */
- const unsigned sfr_div = 10;
+ const unsigned int ss_scl_high = 4600; /* ns */
+ const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int fs_scl_high = 600; /* ns */
+ const unsigned int fs_scl_low = 1300; /* ns */
+ const unsigned int sda_hold = 1000; /* ns */
+ const unsigned int sfr_div = 10;
unsigned long long sfr;
- unsigned v;
+ unsigned int v;
sfr = iclk / sfr_div; /* SFR_DIV */
sfr /= 1000; /* SFR clock in kHz */
@@ -431,11 +430,11 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
void __iomem *base = core->base;
u8 data[HDMI_INFOFRAME_SIZE(AVI)];
u8 *ptr;
- unsigned y, a, b, s;
- unsigned c, m, r;
- unsigned itc, ec, q, sc;
- unsigned vic;
- unsigned yq, cn, pr;
+ unsigned int y, a, b, s;
+ unsigned int c, m, r;
+ unsigned int itc, ec, q, sc;
+ unsigned int vic;
+ unsigned int yq, cn, pr;
hdmi_avi_infoframe_pack(frame, data, sizeof(data));
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index a156292..9915923 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -1,7 +1,7 @@
/*
* HDMI PHY
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -99,7 +99,7 @@ static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
u16 lane_cfg = 0;
int i;
- unsigned lane_cfg_val;
+ unsigned int lane_cfg_val;
u16 pol_val = 0;
for (i = 0; i < 4; ++i)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 55bee81..e7be370 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -1,7 +1,7 @@
/*
* HDMI PLL
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -48,7 +48,7 @@ static int hdmi_pll_enable(struct dss_pll *dsspll)
r = pm_runtime_get_sync(&pll->pdev->dev);
WARN_ON(r < 0);
- dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
+ dss_ctrl_pll_enable(dsspll, true);
r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
if (r)
@@ -65,7 +65,7 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
- dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
+ dss_ctrl_pll_enable(dsspll, false);
r = pm_runtime_put_sync(&pll->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
@@ -128,7 +128,8 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.has_refsel = true,
};
-static int hdmi_init_pll_data(struct platform_device *pdev,
+static int hdmi_init_pll_data(struct dss_device *dss,
+ struct platform_device *pdev,
struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
@@ -153,15 +154,15 @@ static int hdmi_init_pll_data(struct platform_device *pdev,
pll->ops = &hdmi_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return r;
return 0;
}
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
- struct hdmi_wp_data *wp)
+int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
+ struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
{
int r;
struct resource *res;
@@ -174,7 +175,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
- r = hdmi_init_pll_data(pdev, pll);
+ r = hdmi_init_pll_data(dss, pdev, pll);
if (r) {
DSSERR("failed to init HDMI PLL\n");
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 88034fb..53bc5f7 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -1,7 +1,7 @@
/*
* HDMI wrapper
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -168,7 +168,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsync_len_offset = 1;
+ unsigned int hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index bf626ac..3bfb95d 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Texas Instruments
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 990422b..14d74ad 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Texas Instruments
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -59,7 +59,11 @@
#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
+struct dss_device;
+struct omap_drm_private;
struct omap_dss_device;
+struct dispc_device;
+struct dss_device;
struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
@@ -159,21 +163,6 @@ enum omap_overlay_caps {
OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
};
-enum omap_dss_clk_source {
- OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
- * OMAP4: DSS_FCLK */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
- * OMAP4: PLL1_CLK1 */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
- * OMAP4: PLL1_CLK2 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
-};
-
-enum omap_hdmi_flags {
- OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
-};
-
enum omap_dss_output_id {
OMAP_DSS_OUTPUT_DPI = 1 << 0,
OMAP_DSS_OUTPUT_DBI = 1 << 1,
@@ -198,8 +187,8 @@ enum omap_dss_dsi_trans_mode {
struct omap_dss_dsi_videomode_timings {
unsigned long hsclk;
- unsigned ndl;
- unsigned bitspp;
+ unsigned int ndl;
+ unsigned int bitspp;
/* pixels */
u16 hact;
@@ -563,6 +552,8 @@ struct omap_dss_driver {
struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
+ void (*get_size)(struct omap_dss_device *dssdev,
+ unsigned int *width, unsigned int *height);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -583,10 +574,12 @@ struct omap_dss_driver {
const struct hdmi_avi_infoframe *avi);
};
-bool omapdss_is_initialized(void);
-
-int omap_dss_register_driver(struct omap_dss_driver *);
-void omap_dss_unregister_driver(struct omap_dss_driver *);
+struct dss_device *omapdss_get_dss(void);
+void omapdss_set_dss(struct dss_device *dss);
+static inline bool omapdss_is_initialized(void)
+{
+ return !!omapdss_get_dss();
+}
int omapdss_register_display(struct omap_dss_device *dssdev);
void omapdss_unregister_display(struct omap_dss_device *dssdev);
@@ -595,9 +588,6 @@ struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
void omap_dss_put_device(struct omap_dss_device *dssdev);
#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
-struct omap_dss_device *omap_dss_find_device(void *data,
- int (*match)(struct omap_dss_device *dssdev, void *data));
-
int omap_dss_get_num_overlay_managers(void);
@@ -613,9 +603,6 @@ int omapdss_output_unset_device(struct omap_dss_device *out);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
-void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -636,95 +623,139 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
-void omapdss_set_is_initialized(bool set);
-
struct device_node *dss_of_port_get_parent_device(struct device_node *port);
u32 dss_of_port_get_port_number(struct device_node *port);
+enum dss_writeback_channel {
+ DSS_WB_LCD1_MGR = 0,
+ DSS_WB_LCD2_MGR = 1,
+ DSS_WB_TV_MGR = 2,
+ DSS_WB_OVL0 = 3,
+ DSS_WB_OVL1 = 4,
+ DSS_WB_OVL2 = 5,
+ DSS_WB_OVL3 = 6,
+ DSS_WB_LCD3_MGR = 7,
+};
+
struct dss_mgr_ops {
- int (*connect)(enum omap_channel channel,
- struct omap_dss_device *dst);
- void (*disconnect)(enum omap_channel channel,
- struct omap_dss_device *dst);
-
- void (*start_update)(enum omap_channel channel);
- int (*enable)(enum omap_channel channel);
- void (*disable)(enum omap_channel channel);
- void (*set_timings)(enum omap_channel channel,
- const struct videomode *vm);
- void (*set_lcd_config)(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(enum omap_channel channel,
+ int (*connect)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ struct omap_dss_device *dst);
+
+ void (*start_update)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ int (*enable)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ void (*disable)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ void (*set_timings)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct videomode *vm);
+ void (*set_lcd_config)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+ int (*register_framedone_handler)(struct omap_drm_private *priv,
+ enum omap_channel channel,
void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(enum omap_channel channel,
+ void (*unregister_framedone_handler)(struct omap_drm_private *priv,
+ enum omap_channel channel,
void (*handler)(void *), void *data);
};
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+ struct omap_drm_private *priv);
void dss_uninstall_mgr_ops(void);
-int dss_mgr_connect(enum omap_channel channel,
- struct omap_dss_device *dst);
-void dss_mgr_disconnect(enum omap_channel channel,
- struct omap_dss_device *dst);
-void dss_mgr_set_timings(enum omap_channel channel,
+int dss_mgr_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+void dss_mgr_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
-void dss_mgr_set_lcd_config(enum omap_channel channel,
+void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config);
-int dss_mgr_enable(enum omap_channel channel);
-void dss_mgr_disable(enum omap_channel channel);
-void dss_mgr_start_update(enum omap_channel channel);
-int dss_mgr_register_framedone_handler(enum omap_channel channel,
+int dss_mgr_enable(struct omap_dss_device *dssdev);
+void dss_mgr_disable(struct omap_dss_device *dssdev);
+void dss_mgr_start_update(struct omap_dss_device *dssdev);
+int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
-void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
/* dispc ops */
struct dispc_ops {
- u32 (*read_irqstatus)(void);
- void (*clear_irqstatus)(u32 mask);
- void (*write_irqenable)(u32 mask);
-
- int (*request_irq)(irq_handler_t handler, void *dev_id);
- void (*free_irq)(void *dev_id);
-
- int (*runtime_get)(void);
- void (*runtime_put)(void);
-
- int (*get_num_ovls)(void);
- int (*get_num_mgrs)(void);
-
- void (*mgr_enable)(enum omap_channel channel, bool enable);
- bool (*mgr_is_enabled)(enum omap_channel channel);
- u32 (*mgr_get_vsync_irq)(enum omap_channel channel);
- u32 (*mgr_get_framedone_irq)(enum omap_channel channel);
- u32 (*mgr_get_sync_lost_irq)(enum omap_channel channel);
- bool (*mgr_go_busy)(enum omap_channel channel);
- void (*mgr_go)(enum omap_channel channel);
- void (*mgr_set_lcd_config)(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- void (*mgr_set_timings)(enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_setup)(enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
- enum omap_dss_output_id (*mgr_get_supported_outputs)(enum omap_channel channel);
- u32 (*mgr_gamma_size)(enum omap_channel channel);
- void (*mgr_set_gamma)(enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length);
-
- int (*ovl_enable)(enum omap_plane_id plane, bool enable);
- int (*ovl_setup)(enum omap_plane_id plane,
+ u32 (*read_irqstatus)(struct dispc_device *dispc);
+ void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
+ void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
+
+ int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id);
+ void (*free_irq)(struct dispc_device *dispc, void *dev_id);
+
+ int (*runtime_get)(struct dispc_device *dispc);
+ void (*runtime_put)(struct dispc_device *dispc);
+
+ int (*get_num_ovls)(struct dispc_device *dispc);
+ int (*get_num_mgrs)(struct dispc_device *dispc);
+
+ u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
+
+ void (*mgr_enable)(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable);
+ bool (*mgr_is_enabled)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ bool (*mgr_go_busy)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
+ void (*mgr_set_lcd_config)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+ void (*mgr_set_timings)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+ void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+ enum omap_dss_output_id (*mgr_get_supported_outputs)(
+ struct dispc_device *dispc, enum omap_channel channel);
+ u32 (*mgr_gamma_size)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ void (*mgr_set_gamma)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
+
+ int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
+ bool enable);
+ int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel);
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
+
+ const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
+ enum omap_plane_id plane);
- const u32 *(*ovl_get_color_modes)(enum omap_plane_id plane);
+ u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
+ int (*wb_setup)(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in);
+ bool (*has_writeback)(struct dispc_device *dispc);
+ bool (*wb_go_busy)(struct dispc_device *dispc);
+ void (*wb_go)(struct dispc_device *dispc);
};
-void dispc_set_ops(const struct dispc_ops *o);
-const struct dispc_ops *dispc_get_ops(void);
+struct dispc_device *dispc_get_dispc(struct dss_device *dss);
+const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
bool omapdss_component_is_display(struct device_node *node);
bool omapdss_component_is_output(struct device_node *node);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 3c572b6..96b9d4c 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Texas Instruments Ltd
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
* Author: Archit Taneja <archit@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -156,7 +156,6 @@ struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *po
return NULL;
}
-EXPORT_SYMBOL(omap_dss_find_output_by_port_node);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
{
@@ -171,13 +170,16 @@ struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device
EXPORT_SYMBOL(omapdss_find_output_from_display);
static const struct dss_mgr_ops *dss_mgr_ops;
+static struct omap_drm_private *dss_mgr_ops_priv;
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+ struct omap_drm_private *priv)
{
if (dss_mgr_ops)
return -EBUSY;
dss_mgr_ops = mgr_ops;
+ dss_mgr_ops_priv = priv;
return 0;
}
@@ -186,64 +188,71 @@ EXPORT_SYMBOL(dss_install_mgr_ops);
void dss_uninstall_mgr_ops(void)
{
dss_mgr_ops = NULL;
+ dss_mgr_ops_priv = NULL;
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
-int dss_mgr_connect(enum omap_channel channel,
- struct omap_dss_device *dst)
+int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst)
{
- return dss_mgr_ops->connect(channel, dst);
+ return dss_mgr_ops->connect(dss_mgr_ops_priv,
+ dssdev->dispc_channel, dst);
}
EXPORT_SYMBOL(dss_mgr_connect);
-void dss_mgr_disconnect(enum omap_channel channel,
- struct omap_dss_device *dst)
+void dss_mgr_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
{
- dss_mgr_ops->disconnect(channel, dst);
+ dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst);
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
+void dss_mgr_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, vm);
+ dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
-void dss_mgr_set_lcd_config(enum omap_channel channel,
+void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dss_mgr_ops->set_lcd_config(channel, config);
+ dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv,
+ dssdev->dispc_channel, config);
}
EXPORT_SYMBOL(dss_mgr_set_lcd_config);
-int dss_mgr_enable(enum omap_channel channel)
+int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dss_mgr_ops->enable(channel);
+ return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_enable);
-void dss_mgr_disable(enum omap_channel channel)
+void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->disable(channel);
+ dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_disable);
-void dss_mgr_start_update(enum omap_channel channel)
+void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->start_update(channel);
+ dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_start_update);
-int dss_mgr_register_framedone_handler(enum omap_channel channel,
+int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- return dss_mgr_ops->register_framedone_handler(channel, handler, data);
+ return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
-void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- dss_mgr_ops->unregister_framedone_handler(channel, handler, data);
+ dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 9d9d9d4..078b0e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Texas Instruments Incorporated
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -35,15 +35,14 @@
#define PLL_SSC_CONFIGURATION2 0x001C
#define PLL_CONFIGURATION4 0x0020
-static struct dss_pll *dss_plls[4];
-
-int dss_pll_register(struct dss_pll *pll)
+int dss_pll_register(struct dss_device *dss, struct dss_pll *pll)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (!dss_plls[i]) {
- dss_plls[i] = pll;
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (!dss->plls[i]) {
+ dss->plls[i] = pll;
+ pll->dss = dss;
return 0;
}
}
@@ -53,29 +52,32 @@ int dss_pll_register(struct dss_pll *pll)
void dss_pll_unregister(struct dss_pll *pll)
{
+ struct dss_device *dss = pll->dss;
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (dss_plls[i] == pll) {
- dss_plls[i] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (dss->plls[i] == pll) {
+ dss->plls[i] = NULL;
+ pll->dss = NULL;
return;
}
}
}
-struct dss_pll *dss_pll_find(const char *name)
+struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (dss_plls[i] && strcmp(dss_plls[i]->name, name) == 0)
- return dss_plls[i];
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (dss->plls[i] && strcmp(dss->plls[i]->name, name) == 0)
+ return dss->plls[i];
}
return NULL;
}
-struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
+struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
+ enum dss_clk_source src)
{
struct dss_pll *pll;
@@ -85,27 +87,27 @@ struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
return NULL;
case DSS_CLK_SRC_HDMI_PLL:
- return dss_pll_find("hdmi");
+ return dss_pll_find(dss, "hdmi");
case DSS_CLK_SRC_PLL1_1:
case DSS_CLK_SRC_PLL1_2:
case DSS_CLK_SRC_PLL1_3:
- pll = dss_pll_find("dsi0");
+ pll = dss_pll_find(dss, "dsi0");
if (!pll)
- pll = dss_pll_find("video0");
+ pll = dss_pll_find(dss, "video0");
return pll;
case DSS_CLK_SRC_PLL2_1:
case DSS_CLK_SRC_PLL2_2:
case DSS_CLK_SRC_PLL2_3:
- pll = dss_pll_find("dsi1");
+ pll = dss_pll_find(dss, "dsi1");
if (!pll)
- pll = dss_pll_find("video1");
+ pll = dss_pll_find(dss, "video1");
return pll;
}
}
-unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
+unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
{
switch (src) {
case DSS_CLK_SRC_HDMI_PLL:
@@ -277,7 +279,7 @@ bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
unsigned long fint, clkdco, clkout;
unsigned long target_clkdco;
unsigned long min_dco;
- unsigned n, m, mf, m2, sd;
+ unsigned int n, m, mf, m2, sd;
const struct dss_pll_hw *hw = pll->hw;
DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index d18ad58..68a40ae 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/sdi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -31,8 +29,9 @@
#include "omapdss.h"
#include "dss.h"
-static struct {
+struct sdi_device {
struct platform_device *pdev;
+ struct dss_device *dss;
bool update_enabled;
struct regulator *vdds_sdi_reg;
@@ -42,11 +41,12 @@ static struct {
int datapairs;
struct omap_dss_device output;
+};
- bool port_initialized;
-} sdi;
+#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
struct sdi_clk_calc_ctx {
+ struct sdi_device *sdi;
unsigned long pck_min, pck_max;
unsigned long fck;
@@ -72,16 +72,17 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->sdi->dss->dispc, fck,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
-static int sdi_calc_clock_div(unsigned long pclk,
- unsigned long *fck,
- struct dispc_clock_info *dispc_cinfo)
+static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
+ unsigned long *fck,
+ struct dispc_clock_info *dispc_cinfo)
{
int i;
- struct sdi_clk_calc_ctx ctx;
+ struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
/*
* DSS fclk gives us very few possibilities, so finding a good pixel
@@ -100,7 +101,8 @@ static int sdi_calc_clock_div(unsigned long pclk,
ctx.pck_min = 0;
ctx.pck_max = pclk + 1000 * i * i * i;
- ok = dss_div_calc(pclk, ctx.pck_min, dpi_calc_dss_cb, &ctx);
+ ok = dss_div_calc(sdi->dss, pclk, ctx.pck_min,
+ dpi_calc_dss_cb, &ctx);
if (ok) {
*fck = ctx.fck;
*dispc_cinfo = ctx.dispc_cinfo;
@@ -111,52 +113,49 @@ static int sdi_calc_clock_div(unsigned long pclk,
return -EINVAL;
}
-static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
+static void sdi_config_lcd_manager(struct sdi_device *sdi)
{
- enum omap_channel channel = dssdev->dispc_channel;
-
- sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+ sdi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
- sdi.mgr_config.stallmode = false;
- sdi.mgr_config.fifohandcheck = false;
+ sdi->mgr_config.stallmode = false;
+ sdi->mgr_config.fifohandcheck = false;
- sdi.mgr_config.video_port_width = 24;
- sdi.mgr_config.lcden_sig_polarity = 1;
+ sdi->mgr_config.video_port_width = 24;
+ sdi->mgr_config.lcden_sig_polarity = 1;
- dss_mgr_set_lcd_config(channel, &sdi.mgr_config);
+ dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
static int sdi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &sdi.output;
- enum omap_channel channel = dssdev->dispc_channel;
- struct videomode *vm = &sdi.vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct videomode *vm = &sdi->vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
int r;
- if (!out->dispc_channel_connected) {
+ if (!sdi->output.dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
return -ENODEV;
}
- r = regulator_enable(sdi.vdds_sdi_reg);
+ r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
goto err_reg_enable;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
/* 15.5.9.1.2 */
vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
- sdi.mgr_config.clock_info = dispc_cinfo;
+ sdi->mgr_config.clock_info = dispc_cinfo;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
@@ -168,13 +167,13 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
}
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&sdi->output, vm);
- r = dss_set_fck_rate(fck);
+ r = dss_set_fck_rate(sdi->dss, fck);
if (r)
goto err_set_dss_clock_div;
- sdi_config_lcd_manager(dssdev);
+ sdi_config_lcd_manager(sdi);
/*
* LCLK and PCLK divisors are located in shadow registers, and we
@@ -187,63 +186,69 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
* need to care about the shadow register mechanism for pck-free. The
* exact reason for this is unknown.
*/
- dispc_mgr_set_clock_div(channel, &sdi.mgr_config.clock_info);
+ dispc_mgr_set_clock_div(sdi->dss->dispc, sdi->output.dispc_channel,
+ &sdi->mgr_config.clock_info);
- dss_sdi_init(sdi.datapairs);
- r = dss_sdi_enable();
+ dss_sdi_init(sdi->dss, sdi->datapairs);
+ r = dss_sdi_enable(sdi->dss);
if (r)
goto err_sdi_enable;
mdelay(2);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&sdi->output);
if (r)
goto err_mgr_enable;
return 0;
err_mgr_enable:
- dss_sdi_disable();
+ dss_sdi_disable(sdi->dss);
err_sdi_enable:
err_set_dss_clock_div:
err_calc_clock_div:
- dispc_runtime_put();
+ dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
- regulator_disable(sdi.vdds_sdi_reg);
+ regulator_disable(sdi->vdds_sdi_reg);
err_reg_enable:
return r;
}
static void sdi_display_disable(struct omap_dss_device *dssdev)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&sdi->output);
- dss_sdi_disable();
+ dss_sdi_disable(sdi->dss);
- dispc_runtime_put();
+ dispc_runtime_put(sdi->dss->dispc);
- regulator_disable(sdi.vdds_sdi_reg);
+ regulator_disable(sdi->vdds_sdi_reg);
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- sdi.vm = *vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+
+ sdi->vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = sdi.vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+
+ *vm = sdi->vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, vm))
+ if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm))
return -EINVAL;
if (vm->pixelclock == 0)
@@ -252,21 +257,21 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-static int sdi_init_regulator(void)
+static int sdi_init_regulator(struct sdi_device *sdi)
{
struct regulator *vdds_sdi;
- if (sdi.vdds_sdi_reg)
+ if (sdi->vdds_sdi_reg)
return 0;
- vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
+ vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
return PTR_ERR(vdds_sdi);
}
- sdi.vdds_sdi_reg = vdds_sdi;
+ sdi->vdds_sdi_reg = vdds_sdi;
return 0;
}
@@ -274,14 +279,14 @@ static int sdi_init_regulator(void)
static int sdi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
int r;
- r = sdi_init_regulator();
+ r = sdi_init_regulator(sdi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&sdi->output, dssdev);
if (r)
return r;
@@ -289,7 +294,7 @@ static int sdi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&sdi->output, dssdev);
return r;
}
@@ -299,7 +304,7 @@ static int sdi_connect(struct omap_dss_device *dssdev,
static void sdi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -308,7 +313,7 @@ static void sdi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&sdi->output, dssdev);
}
static const struct omapdss_sdi_ops sdi_ops = {
@@ -323,11 +328,11 @@ static const struct omapdss_sdi_ops sdi_ops = {
.get_timings = sdi_get_timings,
};
-static void sdi_init_output(struct platform_device *pdev)
+static void sdi_init_output(struct sdi_device *sdi)
{
- struct omap_dss_device *out = &sdi.output;
+ struct omap_dss_device *out = &sdi->output;
- out->dev = &pdev->dev;
+ out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
@@ -340,22 +345,28 @@ static void sdi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void sdi_uninit_output(struct platform_device *pdev)
+static void sdi_uninit_output(struct sdi_device *sdi)
{
- struct omap_dss_device *out = &sdi.output;
-
- omapdss_unregister_output(out);
+ omapdss_unregister_output(&sdi->output);
}
-int sdi_init_port(struct platform_device *pdev, struct device_node *port)
+int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port)
{
+ struct sdi_device *sdi;
struct device_node *ep;
u32 datapairs;
int r;
+ sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
ep = of_get_next_child(port, NULL);
- if (!ep)
- return 0;
+ if (!ep) {
+ r = 0;
+ goto err_free;
+ }
r = of_property_read_u32(ep, "datapairs", &datapairs);
if (r) {
@@ -363,28 +374,33 @@ int sdi_init_port(struct platform_device *pdev, struct device_node *port)
goto err_datapairs;
}
- sdi.datapairs = datapairs;
+ sdi->datapairs = datapairs;
+ sdi->dss = dss;
of_node_put(ep);
- sdi.pdev = pdev;
+ sdi->pdev = pdev;
+ port->data = sdi;
- sdi_init_output(pdev);
-
- sdi.port_initialized = true;
+ sdi_init_output(sdi);
return 0;
err_datapairs:
of_node_put(ep);
+err_free:
+ kfree(sdi);
return r;
}
void sdi_uninit_port(struct device_node *port)
{
- if (!sdi.port_initialized)
+ struct sdi_device *sdi = port->data;
+
+ if (!sdi)
return;
- sdi_uninit_output(sdi.pdev);
+ sdi_uninit_output(sdi);
+ kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index d58da6f..24d1ced 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/venc.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -321,12 +319,15 @@ static enum venc_videomode venc_get_videomode(const struct videomode *vm)
return VENC_MODE_UNKNOWN;
}
-static struct {
+struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
struct clk *tv_dac_clk;
@@ -336,81 +337,87 @@ static struct {
bool requires_tv_dac_clk;
struct omap_dss_device output;
-} venc;
+};
+
+#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
-static inline void venc_write_reg(int idx, u32 val)
+static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
- __raw_writel(val, venc.base + idx);
+ __raw_writel(val, venc->base + idx);
}
-static inline u32 venc_read_reg(int idx)
+static inline u32 venc_read_reg(struct venc_device *venc, int idx)
{
- u32 l = __raw_readl(venc.base + idx);
+ u32 l = __raw_readl(venc->base + idx);
return l;
}
-static void venc_write_config(const struct venc_config *config)
+static void venc_write_config(struct venc_device *venc,
+ const struct venc_config *config)
{
DSSDBG("write venc conf\n");
- venc_write_reg(VENC_LLEN, config->llen);
- venc_write_reg(VENC_FLENS, config->flens);
- venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
- venc_write_reg(VENC_C_PHASE, config->c_phase);
- venc_write_reg(VENC_GAIN_U, config->gain_u);
- venc_write_reg(VENC_GAIN_V, config->gain_v);
- venc_write_reg(VENC_GAIN_Y, config->gain_y);
- venc_write_reg(VENC_BLACK_LEVEL, config->black_level);
- venc_write_reg(VENC_BLANK_LEVEL, config->blank_level);
- venc_write_reg(VENC_M_CONTROL, config->m_control);
- venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc.wss_data);
- venc_write_reg(VENC_S_CARR, config->s_carr);
- venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl);
- venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid);
- venc_write_reg(VENC_FLEN__FAL, config->flen__fal);
- venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset);
- venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x);
- venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x);
- venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x);
- venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y,
+ venc_write_reg(venc, VENC_LLEN, config->llen);
+ venc_write_reg(venc, VENC_FLENS, config->flens);
+ venc_write_reg(venc, VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
+ venc_write_reg(venc, VENC_C_PHASE, config->c_phase);
+ venc_write_reg(venc, VENC_GAIN_U, config->gain_u);
+ venc_write_reg(venc, VENC_GAIN_V, config->gain_v);
+ venc_write_reg(venc, VENC_GAIN_Y, config->gain_y);
+ venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
+ venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
+ venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc->wss_data);
+ venc_write_reg(venc, VENC_S_CARR, config->s_carr);
+ venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
+ venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
+ venc_write_reg(venc, VENC_FLEN__FAL, config->flen__fal);
+ venc_write_reg(venc, VENC_LAL__PHASE_RESET, config->lal__phase_reset);
+ venc_write_reg(venc, VENC_HS_INT_START_STOP_X,
+ config->hs_int_start_stop_x);
+ venc_write_reg(venc, VENC_HS_EXT_START_STOP_X,
+ config->hs_ext_start_stop_x);
+ venc_write_reg(venc, VENC_VS_INT_START_X, config->vs_int_start_x);
+ venc_write_reg(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y,
config->vs_int_stop_x__vs_int_start_y);
- venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X,
+ venc_write_reg(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X,
config->vs_int_stop_y__vs_ext_start_x);
- venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
+ venc_write_reg(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
config->vs_ext_stop_x__vs_ext_start_y);
- venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
- venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x);
- venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
- venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y,
+ venc_write_reg(venc, VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
+ venc_write_reg(venc, VENC_AVID_START_STOP_X, config->avid_start_stop_x);
+ venc_write_reg(venc, VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
+ venc_write_reg(venc, VENC_FID_INT_START_X__FID_INT_START_Y,
config->fid_int_start_x__fid_int_start_y);
- venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
+ venc_write_reg(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
config->fid_int_offset_y__fid_ext_start_x);
- venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
+ venc_write_reg(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
config->fid_ext_start_y__fid_ext_offset_y);
- venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C));
- venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl);
- venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl);
- venc_write_reg(VENC_X_COLOR, config->x_color);
- venc_write_reg(VENC_LINE21, config->line21);
- venc_write_reg(VENC_LN_SEL, config->ln_sel);
- venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
- venc_write_reg(VENC_TVDETGP_INT_START_STOP_X,
+ venc_write_reg(venc, VENC_DAC_B__DAC_C,
+ venc_read_reg(venc, VENC_DAC_B__DAC_C));
+ venc_write_reg(venc, VENC_VIDOUT_CTRL, config->vidout_ctrl);
+ venc_write_reg(venc, VENC_HFLTR_CTRL, config->hfltr_ctrl);
+ venc_write_reg(venc, VENC_X_COLOR, config->x_color);
+ venc_write_reg(venc, VENC_LINE21, config->line21);
+ venc_write_reg(venc, VENC_LN_SEL, config->ln_sel);
+ venc_write_reg(venc, VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
+ venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_X,
config->tvdetgp_int_start_stop_x);
- venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y,
+ venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_Y,
config->tvdetgp_int_start_stop_y);
- venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl);
- venc_write_reg(VENC_F_CONTROL, config->f_control);
- venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl);
+ venc_write_reg(venc, VENC_GEN_CTRL, config->gen_ctrl);
+ venc_write_reg(venc, VENC_F_CONTROL, config->f_control);
+ venc_write_reg(venc, VENC_SYNC_CTRL, config->sync_ctrl);
}
-static void venc_reset(void)
+static void venc_reset(struct venc_device *venc)
{
int t = 1000;
- venc_write_reg(VENC_F_CONTROL, 1<<8);
- while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) {
+ venc_write_reg(venc, VENC_F_CONTROL, 1<<8);
+ while (venc_read_reg(venc, VENC_F_CONTROL) & (1<<8)) {
if (--t == 0) {
DSSERR("Failed to reset venc\n");
return;
@@ -424,24 +431,24 @@ static void venc_reset(void)
#endif
}
-static int venc_runtime_get(void)
+static int venc_runtime_get(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_get\n");
- r = pm_runtime_get_sync(&venc.pdev->dev);
+ r = pm_runtime_get_sync(&venc->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-static void venc_runtime_put(void)
+static void venc_runtime_put(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_put\n");
- r = pm_runtime_put_sync(&venc.pdev->dev);
+ r = pm_runtime_put_sync(&venc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
@@ -457,119 +464,119 @@ static const struct venc_config *venc_timings_to_config(struct videomode *vm)
}
}
-static int venc_power_on(struct omap_dss_device *dssdev)
+static int venc_power_on(struct venc_device *venc)
{
- enum omap_channel channel = dssdev->dispc_channel;
u32 l;
int r;
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err0;
- venc_reset();
- venc_write_config(venc_timings_to_config(&venc.vm));
+ venc_reset(venc);
+ venc_write_config(venc, venc_timings_to_config(&venc->vm));
- dss_set_venc_output(venc.type);
- dss_set_dac_pwrdn_bgz(1);
+ dss_set_venc_output(venc->dss, venc->type);
+ dss_set_dac_pwrdn_bgz(venc->dss, 1);
l = 0;
- if (venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+ if (venc->type == OMAP_DSS_VENC_TYPE_COMPOSITE)
l |= 1 << 1;
else /* S-Video */
l |= (1 << 0) | (1 << 2);
- if (venc.invert_polarity == false)
+ if (venc->invert_polarity == false)
l |= 1 << 3;
- venc_write_reg(VENC_OUTPUT_CONTROL, l);
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.vm);
+ dss_mgr_set_timings(&venc->output, &venc->vm);
- r = regulator_enable(venc.vdda_dac_reg);
+ r = regulator_enable(venc->vdda_dac_reg);
if (r)
goto err1;
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&venc->output);
if (r)
goto err2;
return 0;
err2:
- regulator_disable(venc.vdda_dac_reg);
+ regulator_disable(venc->vdda_dac_reg);
err1:
- venc_write_reg(VENC_OUTPUT_CONTROL, 0);
- dss_set_dac_pwrdn_bgz(0);
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(venc->dss, 0);
- venc_runtime_put();
+ venc_runtime_put(venc);
err0:
return r;
}
-static void venc_power_off(struct omap_dss_device *dssdev)
+static void venc_power_off(struct venc_device *venc)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(venc->dss, 0);
- venc_write_reg(VENC_OUTPUT_CONTROL, 0);
- dss_set_dac_pwrdn_bgz(0);
+ dss_mgr_disable(&venc->output);
- dss_mgr_disable(channel);
+ regulator_disable(venc->vdda_dac_reg);
- regulator_disable(venc.vdda_dac_reg);
-
- venc_runtime_put();
+ venc_runtime_put(venc);
}
static int venc_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &venc.output;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
DSSDBG("venc_display_enable\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("Failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = venc_power_on(dssdev);
+ r = venc_power_on(venc);
if (r)
goto err0;
- venc.wss_data = 0;
+ venc->wss_data = 0;
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return 0;
err0:
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return r;
}
static void venc_display_disable(struct omap_dss_device *dssdev)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
+
DSSDBG("venc_display_disable\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- venc_power_off(dssdev);
+ venc_power_off(venc);
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
}
static void venc_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
struct videomode actual_vm;
DSSDBG("venc_set_timings\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
switch (venc_get_videomode(vm)) {
default:
@@ -583,14 +590,14 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm)))
- venc.wss_data = 0;
+ if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm)))
+ venc->wss_data = 0;
- venc.vm = actual_vm;
+ venc->vm = actual_vm;
- dispc_set_tv_pclk(13500000);
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
}
static int venc_check_timings(struct omap_dss_device *dssdev,
@@ -610,127 +617,136 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
static void venc_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&venc.venc_lock);
+ struct venc_device *venc = dssdev_to_venc(dssdev);
- *vm = venc.vm;
+ mutex_lock(&venc->venc_lock);
- mutex_unlock(&venc.venc_lock);
+ *vm = venc->vm;
+
+ mutex_unlock(&venc->venc_lock);
}
static u32 venc_get_wss(struct omap_dss_device *dssdev)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
+
/* Invert due to VENC_L21_WC_CTL:INV=1 */
- return (venc.wss_data >> 8) ^ 0xfffff;
+ return (venc->wss_data >> 8) ^ 0xfffff;
}
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
const struct venc_config *config;
int r;
DSSDBG("venc_set_wss\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- config = venc_timings_to_config(&venc.vm);
+ config = venc_timings_to_config(&venc->vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
- venc.wss_data = (wss ^ 0xfffff) << 8;
+ venc->wss_data = (wss ^ 0xfffff) << 8;
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err;
- venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc.wss_data);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc->wss_data);
- venc_runtime_put();
+ venc_runtime_put(venc);
err:
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return r;
}
-static int venc_init_regulator(void)
+static int venc_init_regulator(struct venc_device *venc)
{
struct regulator *vdda_dac;
- if (venc.vdda_dac_reg != NULL)
+ if (venc->vdda_dac_reg != NULL)
return 0;
- vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda");
+ vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda");
if (IS_ERR(vdda_dac)) {
if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
return PTR_ERR(vdda_dac);
}
- venc.vdda_dac_reg = vdda_dac;
+ venc->vdda_dac_reg = vdda_dac;
return 0;
}
-static void venc_dump_regs(struct seq_file *s)
+static int venc_dump_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
+ struct venc_device *venc = s->private;
- if (venc_runtime_get())
- return;
+#define DUMPREG(venc, r) \
+ seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(venc, r))
+
+ if (venc_runtime_get(venc))
+ return 0;
- DUMPREG(VENC_F_CONTROL);
- DUMPREG(VENC_VIDOUT_CTRL);
- DUMPREG(VENC_SYNC_CTRL);
- DUMPREG(VENC_LLEN);
- DUMPREG(VENC_FLENS);
- DUMPREG(VENC_HFLTR_CTRL);
- DUMPREG(VENC_CC_CARR_WSS_CARR);
- DUMPREG(VENC_C_PHASE);
- DUMPREG(VENC_GAIN_U);
- DUMPREG(VENC_GAIN_V);
- DUMPREG(VENC_GAIN_Y);
- DUMPREG(VENC_BLACK_LEVEL);
- DUMPREG(VENC_BLANK_LEVEL);
- DUMPREG(VENC_X_COLOR);
- DUMPREG(VENC_M_CONTROL);
- DUMPREG(VENC_BSTAMP_WSS_DATA);
- DUMPREG(VENC_S_CARR);
- DUMPREG(VENC_LINE21);
- DUMPREG(VENC_LN_SEL);
- DUMPREG(VENC_L21__WC_CTL);
- DUMPREG(VENC_HTRIGGER_VTRIGGER);
- DUMPREG(VENC_SAVID__EAVID);
- DUMPREG(VENC_FLEN__FAL);
- DUMPREG(VENC_LAL__PHASE_RESET);
- DUMPREG(VENC_HS_INT_START_STOP_X);
- DUMPREG(VENC_HS_EXT_START_STOP_X);
- DUMPREG(VENC_VS_INT_START_X);
- DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y);
- DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X);
- DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
- DUMPREG(VENC_VS_EXT_STOP_Y);
- DUMPREG(VENC_AVID_START_STOP_X);
- DUMPREG(VENC_AVID_START_STOP_Y);
- DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y);
- DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
- DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
- DUMPREG(VENC_TVDETGP_INT_START_STOP_X);
- DUMPREG(VENC_TVDETGP_INT_START_STOP_Y);
- DUMPREG(VENC_GEN_CTRL);
- DUMPREG(VENC_OUTPUT_CONTROL);
- DUMPREG(VENC_OUTPUT_TEST);
-
- venc_runtime_put();
+ DUMPREG(venc, VENC_F_CONTROL);
+ DUMPREG(venc, VENC_VIDOUT_CTRL);
+ DUMPREG(venc, VENC_SYNC_CTRL);
+ DUMPREG(venc, VENC_LLEN);
+ DUMPREG(venc, VENC_FLENS);
+ DUMPREG(venc, VENC_HFLTR_CTRL);
+ DUMPREG(venc, VENC_CC_CARR_WSS_CARR);
+ DUMPREG(venc, VENC_C_PHASE);
+ DUMPREG(venc, VENC_GAIN_U);
+ DUMPREG(venc, VENC_GAIN_V);
+ DUMPREG(venc, VENC_GAIN_Y);
+ DUMPREG(venc, VENC_BLACK_LEVEL);
+ DUMPREG(venc, VENC_BLANK_LEVEL);
+ DUMPREG(venc, VENC_X_COLOR);
+ DUMPREG(venc, VENC_M_CONTROL);
+ DUMPREG(venc, VENC_BSTAMP_WSS_DATA);
+ DUMPREG(venc, VENC_S_CARR);
+ DUMPREG(venc, VENC_LINE21);
+ DUMPREG(venc, VENC_LN_SEL);
+ DUMPREG(venc, VENC_L21__WC_CTL);
+ DUMPREG(venc, VENC_HTRIGGER_VTRIGGER);
+ DUMPREG(venc, VENC_SAVID__EAVID);
+ DUMPREG(venc, VENC_FLEN__FAL);
+ DUMPREG(venc, VENC_LAL__PHASE_RESET);
+ DUMPREG(venc, VENC_HS_INT_START_STOP_X);
+ DUMPREG(venc, VENC_HS_EXT_START_STOP_X);
+ DUMPREG(venc, VENC_VS_INT_START_X);
+ DUMPREG(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y);
+ DUMPREG(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X);
+ DUMPREG(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
+ DUMPREG(venc, VENC_VS_EXT_STOP_Y);
+ DUMPREG(venc, VENC_AVID_START_STOP_X);
+ DUMPREG(venc, VENC_AVID_START_STOP_Y);
+ DUMPREG(venc, VENC_FID_INT_START_X__FID_INT_START_Y);
+ DUMPREG(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
+ DUMPREG(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
+ DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_X);
+ DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_Y);
+ DUMPREG(venc, VENC_GEN_CTRL);
+ DUMPREG(venc, VENC_OUTPUT_CONTROL);
+ DUMPREG(venc, VENC_OUTPUT_TEST);
+
+ venc_runtime_put(venc);
#undef DUMPREG
+ return 0;
}
-static int venc_get_clocks(struct platform_device *pdev)
+static int venc_get_clocks(struct venc_device *venc)
{
struct clk *clk;
- if (venc.requires_tv_dac_clk) {
- clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
+ if (venc->requires_tv_dac_clk) {
+ clk = devm_clk_get(&venc->pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
return PTR_ERR(clk);
@@ -739,7 +755,7 @@ static int venc_get_clocks(struct platform_device *pdev)
clk = NULL;
}
- venc.tv_dac_clk = clk;
+ venc->tv_dac_clk = clk;
return 0;
}
@@ -747,14 +763,14 @@ static int venc_get_clocks(struct platform_device *pdev)
static int venc_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
- r = venc_init_regulator();
+ r = venc_init_regulator(venc);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&venc->output, dssdev);
if (r)
return r;
@@ -762,7 +778,7 @@ static int venc_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&venc->output, dssdev);
return r;
}
@@ -772,7 +788,7 @@ static int venc_connect(struct omap_dss_device *dssdev,
static void venc_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -781,7 +797,7 @@ static void venc_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&venc->output, dssdev);
}
static const struct omapdss_atv_ops venc_ops = {
@@ -799,11 +815,11 @@ static const struct omapdss_atv_ops venc_ops = {
.get_wss = venc_get_wss,
};
-static void venc_init_output(struct platform_device *pdev)
+static void venc_init_output(struct venc_device *venc)
{
- struct omap_dss_device *out = &venc.output;
+ struct omap_dss_device *out = &venc->output;
- out->dev = &pdev->dev;
+ out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->output_type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
@@ -814,16 +830,14 @@ static void venc_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void venc_uninit_output(struct platform_device *pdev)
+static void venc_uninit_output(struct venc_device *venc)
{
- struct omap_dss_device *out = &venc.output;
-
- omapdss_unregister_output(out);
+ omapdss_unregister_output(&venc->output);
}
-static int venc_probe_of(struct platform_device *pdev)
+static int venc_probe_of(struct venc_device *venc)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = venc->pdev->dev.of_node;
struct device_node *ep;
u32 channels;
int r;
@@ -832,24 +846,25 @@ static int venc_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- venc.invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
+ venc->invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
r = of_property_read_u32(ep, "ti,channels", &channels);
if (r) {
- dev_err(&pdev->dev,
+ dev_err(&venc->pdev->dev,
"failed to read property 'ti,channels': %d\n", r);
goto err;
}
switch (channels) {
case 1:
- venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+ venc->type = OMAP_DSS_VENC_TYPE_COMPOSITE;
break;
case 2:
- venc.type = OMAP_DSS_VENC_TYPE_SVIDEO;
+ venc->type = OMAP_DSS_VENC_TYPE_SVIDEO;
break;
default:
- dev_err(&pdev->dev, "bad channel propert '%d'\n", channels);
+ dev_err(&venc->pdev->dev, "bad channel propert '%d'\n",
+ channels);
r = -EINVAL;
goto err;
}
@@ -857,10 +872,10 @@ static int venc_probe_of(struct platform_device *pdev)
of_node_put(ep);
return 0;
+
err:
of_node_put(ep);
-
- return 0;
+ return r;
}
/* VENC HW IP initialisation */
@@ -873,65 +888,82 @@ static const struct soc_device_attribute venc_soc_devices[] = {
static int venc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct venc_device *venc;
u8 rev_id;
struct resource *venc_mem;
int r;
- venc.pdev = pdev;
+ venc = kzalloc(sizeof(*venc), GFP_KERNEL);
+ if (!venc)
+ return -ENOMEM;
+
+ venc->pdev = pdev;
+ venc->dss = dss;
+ dev_set_drvdata(dev, venc);
/* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
if (soc_device_match(venc_soc_devices))
- venc.requires_tv_dac_clk = true;
+ venc->requires_tv_dac_clk = true;
- mutex_init(&venc.venc_lock);
+ mutex_init(&venc->venc_lock);
- venc.wss_data = 0;
+ venc->wss_data = 0;
- venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
- venc.base = devm_ioremap_resource(&pdev->dev, venc_mem);
- if (IS_ERR(venc.base))
- return PTR_ERR(venc.base);
+ venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
+ venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
+ if (IS_ERR(venc->base)) {
+ r = PTR_ERR(venc->base);
+ goto err_free;
+ }
- r = venc_get_clocks(pdev);
+ r = venc_get_clocks(venc);
if (r)
- return r;
+ goto err_free;
pm_runtime_enable(&pdev->dev);
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err_runtime_get;
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_runtime_put();
+ venc_runtime_put(venc);
- r = venc_probe_of(pdev);
+ r = venc_probe_of(venc);
if (r) {
DSSERR("Invalid DT data\n");
goto err_probe_of;
}
- dss_debugfs_create_file("venc", venc_dump_regs);
+ venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
+ venc);
- venc_init_output(pdev);
+ venc_init_output(venc);
return 0;
err_probe_of:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_free:
+ kfree(venc);
return r;
}
static void venc_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct venc_device *venc = dev_get_drvdata(dev);
- venc_uninit_output(pdev);
+ dss_debugfs_remove_file(venc->debugfs);
- pm_runtime_disable(&pdev->dev);
+ venc_uninit_output(venc);
+
+ pm_runtime_disable(dev);
+
+ kfree(venc);
}
static const struct component_ops venc_component_ops = {
@@ -952,24 +984,27 @@ static int venc_remove(struct platform_device *pdev)
static int venc_runtime_suspend(struct device *dev)
{
- if (venc.tv_dac_clk)
- clk_disable_unprepare(venc.tv_dac_clk);
+ struct venc_device *venc = dev_get_drvdata(dev);
- dispc_runtime_put();
+ if (venc->tv_dac_clk)
+ clk_disable_unprepare(venc->tv_dac_clk);
+
+ dispc_runtime_put(venc->dss->dispc);
return 0;
}
static int venc_runtime_resume(struct device *dev)
{
+ struct venc_device *venc = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(venc->dss->dispc);
if (r < 0)
return r;
- if (venc.tv_dac_clk)
- clk_prepare_enable(venc.tv_dac_clk);
+ if (venc->tv_dac_clk)
+ clk_prepare_enable(venc->tv_dac_clk);
return 0;
}
@@ -986,7 +1021,7 @@ static const struct of_device_id venc_of_match[] = {
{},
};
-static struct platform_driver omap_venchw_driver = {
+struct platform_driver omap_venchw_driver = {
.probe = venc_probe,
.remove = venc_remove,
.driver = {
@@ -996,13 +1031,3 @@ static struct platform_driver omap_venchw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init venc_init_platform_driver(void)
-{
- return platform_driver_register(&omap_venchw_driver);
-}
-
-void venc_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_venchw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 38a239c..585ed94 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -1,13 +1,15 @@
/*
-* Copyright (C) 2014 Texas Instruments Ltd
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 as published by
-* the Free Software Foundation.
-*
-* You should have received a copy of the GNU General Public License along with
-* this program. If not, see <http://www.gnu.org/licenses/>.
-*/
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
#include <linux/clk.h>
#include <linux/delay.h>
@@ -62,11 +64,11 @@ static int dss_video_pll_enable(struct dss_pll *pll)
struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
int r;
- r = dss_runtime_get();
+ r = dss_runtime_get(pll->dss);
if (r)
return r;
- dss_ctrl_pll_enable(pll->id, true);
+ dss_ctrl_pll_enable(pll, true);
dss_dpll_enable_scp_clk(vpll);
@@ -80,8 +82,8 @@ static int dss_video_pll_enable(struct dss_pll *pll)
err_reset:
dss_dpll_disable_scp_clk(vpll);
- dss_ctrl_pll_enable(pll->id, false);
- dss_runtime_put();
+ dss_ctrl_pll_enable(pll, false);
+ dss_runtime_put(pll->dss);
return r;
}
@@ -94,9 +96,9 @@ static void dss_video_pll_disable(struct dss_pll *pll)
dss_dpll_disable_scp_clk(vpll);
- dss_ctrl_pll_enable(pll->id, false);
+ dss_ctrl_pll_enable(pll, false);
- dss_runtime_put();
+ dss_runtime_put(pll->dss);
}
static const struct dss_pll_ops dss_pll_ops = {
@@ -134,8 +136,9 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.errata_i886 = true,
};
-struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
- struct regulator *regulator)
+struct dss_pll *dss_video_pll_init(struct dss_device *dss,
+ struct platform_device *pdev, int id,
+ struct regulator *regulator)
{
const char * const reg_name[] = { "pll1", "pll2" };
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
@@ -188,7 +191,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
pll->hw = &dss_dra7_video_pll_hw;
pll->ops = &dss_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return ERR_PTR(r);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index aa5ba9a..a0d7b1d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_connector.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -149,6 +147,12 @@ static int omap_connector_get_modes(struct drm_connector *connector)
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
+ if (dssdrv->get_size) {
+ dssdrv->get_size(dssdev,
+ &connector->display_info.width_mm,
+ &connector->display_info.height_mm);
+ }
+
n = 1;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
new file mode 100644
index 0000000..98bbc77
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -0,0 +1,37 @@
+/*
+ * omap_connector.h -- OMAP DRM Connector
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_CONNECTOR_H__
+#define __OMAPDRM_CONNECTOR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+struct omap_dss_device;
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
+
+#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index cc85c16..6c4d40b 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_crtc.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,6 +21,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <linux/math64.h>
#include "omap_drv.h"
@@ -114,15 +113,17 @@ static struct omap_crtc *omap_crtcs[8];
static struct omap_dss_device *omap_crtc_output[8];
/* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(enum omap_channel channel,
+static int omap_crtc_dss_connect(struct omap_drm_private *priv,
+ enum omap_channel channel,
struct omap_dss_device *dst)
{
- const struct dispc_ops *dispc_ops = dispc_get_ops();
+ const struct dispc_ops *dispc_ops = priv->dispc_ops;
+ struct dispc_device *dispc = priv->dispc;
if (omap_crtc_output[channel])
return -EINVAL;
- if ((dispc_ops->mgr_get_supported_outputs(channel) & dst->id) == 0)
+ if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id))
return -EINVAL;
omap_crtc_output[channel] = dst;
@@ -131,14 +132,16 @@ static int omap_crtc_dss_connect(enum omap_channel channel,
return 0;
}
-static void omap_crtc_dss_disconnect(enum omap_channel channel,
+static void omap_crtc_dss_disconnect(struct omap_drm_private *priv,
+ enum omap_channel channel,
struct omap_dss_device *dst)
{
omap_crtc_output[channel] = NULL;
dst->dispc_channel_connected = false;
}
-static void omap_crtc_dss_start_update(enum omap_channel channel)
+static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
}
@@ -157,7 +160,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
return;
if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
- priv->dispc_ops->mgr_enable(channel, enable);
+ priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
@@ -170,8 +173,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
omap_crtc->ignore_digit_sync_lost = true;
}
- framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(channel);
- vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(channel);
+ framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+ channel);
+ vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -191,7 +195,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
- priv->dispc_ops->mgr_enable(channel, enable);
+ priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -208,25 +212,28 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(enum omap_channel channel)
+static int omap_crtc_dss_enable(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
- struct omap_drm_private *priv = omap_crtc->base.dev->dev_private;
- priv->dispc_ops->mgr_set_timings(omap_crtc->channel, &omap_crtc->vm);
+ priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
-static void omap_crtc_dss_disable(enum omap_channel channel)
+static void omap_crtc_dss_disable(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(enum omap_channel channel,
+static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+ enum omap_channel channel,
const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
@@ -234,25 +241,26 @@ static void omap_crtc_dss_set_timings(enum omap_channel channel,
omap_crtc->vm = *vm;
}
-static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
+static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+ enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
- struct omap_drm_private *priv = omap_crtc->base.dev->dev_private;
DBG("%s", omap_crtc->name);
- priv->dispc_ops->mgr_set_lcd_config(omap_crtc->channel, config);
+ priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+ config);
}
static int omap_crtc_dss_register_framedone(
- enum omap_channel channel,
+ struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
return 0;
}
static void omap_crtc_dss_unregister_framedone(
- enum omap_channel channel,
+ struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
}
@@ -273,7 +281,7 @@ static const struct dss_mgr_ops mgr_ops = {
* Setup, Flush and Page Flip
*/
-void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus)
+void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -298,7 +306,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
- if (priv->dispc_ops->mgr_go_busy(omap_crtc->channel)) {
+ if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
@@ -335,7 +343,7 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
info.partial_alpha_enabled = false;
info.cpr_enable = false;
- priv->dispc_ops->mgr_setup(omap_crtc->channel, &info);
+ priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
@@ -400,6 +408,41 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
}
+static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ /* Check for bandwidth limit */
+ if (priv->max_bandwidth) {
+ /*
+ * Estimation for the bandwidth need of a given mode with one
+ * full screen plane:
+ * bandwidth = resolution * 32bpp * (pclk / (vtotal * htotal))
+ * ^^ Refresh rate ^^
+ *
+ * The interlaced mode is taken into account by using the
+ * pixelclock in the calculation.
+ *
+ * The equation is rearranged for 64bit arithmetic.
+ */
+ uint64_t bandwidth = mode->clock * 1000;
+ unsigned int bpp = 4;
+
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ /*
+ * Reject modes which would need more bandwidth if used with one
+ * full resolution plane (most common use case).
+ */
+ if (priv->max_bandwidth < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -458,7 +501,7 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane_state *pri_state;
if (state->color_mgmt_changed && state->gamma_lut) {
- uint length = state->gamma_lut->length /
+ unsigned int length = state->gamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
@@ -492,7 +535,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
struct drm_color_lut *lut = NULL;
- uint length = 0;
+ unsigned int length = 0;
if (crtc->state->gamma_lut) {
lut = (struct drm_color_lut *)
@@ -500,7 +543,8 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
length = crtc->state->gamma_lut->length /
sizeof(*lut);
}
- priv->dispc_ops->mgr_set_gamma(omap_crtc->channel, lut, length);
+ priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+ lut, length);
}
omap_crtc_write_crtc_properties(crtc);
@@ -515,7 +559,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
- priv->dispc_ops->mgr_go(omap_crtc->channel);
+ priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -523,7 +567,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct drm_plane_state *plane_state;
@@ -551,7 +595,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
@@ -621,6 +665,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.atomic_flush = omap_crtc_atomic_flush,
.atomic_enable = omap_crtc_atomic_enable,
.atomic_disable = omap_crtc_atomic_disable,
+ .mode_valid = omap_crtc_mode_valid,
};
/* -----------------------------------------------------------------------------
@@ -634,11 +679,11 @@ static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
-void omap_crtc_pre_init(void)
+void omap_crtc_pre_init(struct omap_drm_private *priv)
{
memset(omap_crtcs, 0, sizeof(omap_crtcs));
- dss_install_mgr_ops(&mgr_ops);
+ dss_install_mgr_ops(&mgr_ops, priv);
}
void omap_crtc_pre_uninit(void)
@@ -696,8 +741,8 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supprted.
*/
- if (priv->dispc_ops->mgr_gamma_size(channel)) {
- uint gamma_lut_size = 256;
+ if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+ unsigned int gamma_lut_size = 256;
drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
new file mode 100644
index 0000000..eaab2d7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -0,0 +1,43 @@
+/*
+ * omap_crtc.h -- OMAP DRM CRTC
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_CRTC_H__
+#define __OMAPDRM_CRTC_H__
+
+#include <linux/types.h>
+
+enum omap_channel;
+
+struct drm_crtc;
+struct drm_device;
+struct drm_plane;
+struct omap_dss_device;
+struct videomode;
+
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+void omap_crtc_pre_init(struct omap_drm_private *priv);
+void omap_crtc_pre_uninit(void);
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, struct omap_dss_device *dssdev);
+int omap_crtc_wait_pending(struct drm_crtc *crtc);
+void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
+void omap_crtc_vblank_irq(struct drm_crtc *crtc);
+
+#endif /* __OMAPDRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 19b7167..b42e286 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_debugfs.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 9f32a83..c2785cc 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
@@ -13,6 +12,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
#ifndef OMAP_DMM_PRIV_H
#define OMAP_DMM_PRIV_H
@@ -59,12 +59,12 @@
#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
-#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
- DMM_IRQ_STAT_ERR_INV_DATA | \
- DMM_IRQ_STAT_ERR_UPD_AREA | \
- DMM_IRQ_STAT_ERR_UPD_CTRL | \
- DMM_IRQ_STAT_ERR_UPD_DATA | \
- DMM_IRQ_STAT_ERR_LUT_MISS)
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQSTAT_ERR_INV_DSC | \
+ DMM_IRQSTAT_ERR_INV_DATA | \
+ DMM_IRQSTAT_ERR_UPD_AREA | \
+ DMM_IRQSTAT_ERR_UPD_CTRL | \
+ DMM_IRQSTAT_ERR_UPD_DATA | \
+ DMM_IRQSTAT_ERR_LUT_MISS)
#define DMM_PATSTATUS_READY (1<<0)
#define DMM_PATSTATUS_VALID (1<<1)
@@ -102,10 +102,10 @@ struct pat_ctrl {
};
struct pat {
- uint32_t next_pa;
+ u32 next_pa;
struct pat_area area;
struct pat_ctrl ctrl;
- uint32_t data_pa;
+ u32 data_pa;
};
#define DMM_FIXED_RETRY_COUNT 1000
@@ -129,7 +129,7 @@ struct dmm_txn {
void *engine_handle;
struct tcm *tcm;
- uint8_t *current_va;
+ u8 *current_va;
dma_addr_t current_pa;
struct pat *last_pat;
@@ -140,7 +140,7 @@ struct refill_engine {
struct dmm *dmm;
struct tcm *tcm;
- uint8_t *refill_va;
+ u8 *refill_va;
dma_addr_t refill_pa;
/* only one trans per engine for now */
@@ -154,7 +154,7 @@ struct refill_engine {
};
struct dmm_platform_data {
- uint32_t cpu_cache_flags;
+ u32 cpu_cache_flags;
};
struct dmm {
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index c60a85e..f9fa1c9 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -1,11 +1,10 @@
/*
* DMM IOMMU driver support functions for TI OMAP processors.
*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
@@ -59,11 +58,11 @@ static DEFINE_SPINLOCK(list_lock);
}
static const struct {
- uint32_t x_shft; /* unused X-bits (as part of bpp) */
- uint32_t y_shft; /* unused Y-bits (as part of bpp) */
- uint32_t cpp; /* bytes/chars per pixel */
- uint32_t slot_w; /* width of each slot (in pixels) */
- uint32_t slot_h; /* height of each slot (in pixels) */
+ u32 x_shft; /* unused X-bits (as part of bpp) */
+ u32 y_shft; /* unused Y-bits (as part of bpp) */
+ u32 cpp; /* bytes/chars per pixel */
+ u32 slot_w; /* width of each slot (in pixels) */
+ u32 slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
[TILFMT_8BIT] = GEOM(0, 0, 1),
[TILFMT_16BIT] = GEOM(0, 1, 2),
@@ -73,7 +72,7 @@ static const struct {
/* lookup table for registers w/ per-engine instances */
-static const uint32_t reg[][4] = {
+static const u32 reg[][4] = {
[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
[PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
@@ -112,23 +111,31 @@ static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
}
/* check status and spin until wait_mask comes true */
-static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+static int wait_status(struct refill_engine *engine, u32 wait_mask)
{
struct dmm *dmm = engine->dmm;
- uint32_t r = 0, err, i;
+ u32 r = 0, err, i;
i = DMM_FIXED_RETRY_COUNT;
while (true) {
r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
err = r & DMM_PATSTATUS_ERR;
- if (err)
+ if (err) {
+ dev_err(dmm->dev,
+ "%s: error (engine%d). PAT_STATUS: 0x%08x\n",
+ __func__, engine->id, r);
return -EFAULT;
+ }
if ((r & wait_mask) == wait_mask)
break;
- if (--i == 0)
+ if (--i == 0) {
+ dev_err(dmm->dev,
+ "%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
+ __func__, engine->id, r);
return -ETIMEDOUT;
+ }
udelay(1);
}
@@ -151,13 +158,18 @@ static void release_engine(struct refill_engine *engine)
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
- uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
+ u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_ERR_MASK)
+ dev_err(dmm->dev,
+ "irq error(engine%d): IRQSTAT 0x%02x\n",
+ i, status & 0xff);
+
if (status & DMM_IRQSTAT_LST) {
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
@@ -214,10 +226,10 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
* corresponding slot is cleared (ie. dummy_pa is programmed)
*/
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
- struct page **pages, uint32_t npages, uint32_t roll)
+ struct page **pages, u32 npages, u32 roll)
{
dma_addr_t pat_pa = 0, data_pa = 0;
- uint32_t *data;
+ u32 *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
int columns = (1 + area->x1 - area->x0);
@@ -227,7 +239,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
if (txn->last_pat)
- txn->last_pat->next_pa = (uint32_t)pat_pa;
+ txn->last_pat->next_pa = (u32)pat_pa;
pat->area = *area;
@@ -298,7 +310,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
+ goto cleanup;
}
+
+ /* Check the engine status before continue */
+ ret = wait_status(engine, DMM_PATSTATUS_READY |
+ DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
}
cleanup:
@@ -313,7 +330,7 @@ cleanup:
* DMM programming
*/
static int fill(struct tcm_area *area, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait)
+ u32 npages, u32 roll, bool wait)
{
int ret = 0;
struct tcm_area slice, area_s;
@@ -361,7 +378,7 @@ static int fill(struct tcm_area *area, struct page **pages,
/* note: slots for which pages[i] == NULL are filled w/ dummy page
*/
int tiler_pin(struct tiler_block *block, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait)
+ u32 npages, u32 roll, bool wait)
{
int ret;
@@ -381,8 +398,8 @@ int tiler_unpin(struct tiler_block *block)
/*
* Reserve/release
*/
-struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
- uint16_t h, uint16_t align)
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
+ u16 h, u16 align)
{
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
u32 min_align = 128;
@@ -525,8 +542,8 @@ dma_addr_t tiler_ssptr(struct tiler_block *block)
block->area.p0.y * geom[block->fmt].slot_h);
}
-dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
- uint32_t x, uint32_t y)
+dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
+ u32 x, u32 y)
{
struct tcm_pt *p = &block->area.p0;
BUG_ON(!validfmt(block->fmt));
@@ -536,14 +553,14 @@ dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
(p->y * geom[block->fmt].slot_h) + y);
}
-void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
{
BUG_ON(!validfmt(fmt));
*w = round_up(*w, geom[fmt].slot_w);
*h = round_up(*h, geom[fmt].slot_h);
}
-uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
+u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
{
BUG_ON(!validfmt(fmt));
@@ -553,19 +570,19 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
}
-size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
{
tiler_align(fmt, &w, &h);
return geom[fmt].cpp * w * h;
}
-size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
{
BUG_ON(!validfmt(fmt));
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
-uint32_t tiler_get_cpu_cache_flags(void)
+u32 tiler_get_cpu_cache_flags(void)
{
return omap_dmm->plat_data->cpu_cache_flags;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index e83c783..835e665 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
@@ -89,30 +88,30 @@ int tiler_map_show(struct seq_file *s, void *arg);
/* pin/unpin */
int tiler_pin(struct tiler_block *block, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait);
+ u32 npages, u32 roll, bool wait);
int tiler_unpin(struct tiler_block *block);
/* reserve/release */
-struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
- uint16_t align);
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h,
+ u16 align);
struct tiler_block *tiler_reserve_1d(size_t size);
int tiler_release(struct tiler_block *block);
/* utilities */
dma_addr_t tiler_ssptr(struct tiler_block *block);
-dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
- uint32_t x, uint32_t y);
-uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
-size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
-size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
-void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
-uint32_t tiler_get_cpu_cache_flags(void);
+dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
+ u32 x, u32 y);
+u32 tiler_stride(enum tiler_fmt fmt, u32 orient);
+size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h);
+size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h);
+void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h);
+u32 tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
-static inline enum tiler_fmt gem2fmt(uint32_t flags)
+static inline enum tiler_fmt gem2fmt(u32 flags)
{
switch (flags & OMAP_BO_TILED) {
case OMAP_BO_TILED_8:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index cdf5b06..3632854 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_drv.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -46,14 +44,6 @@
* devices
*/
-static void omap_fb_output_poll_changed(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- DBG("dev=%p", dev);
- if (priv->fbdev)
- drm_fb_helper_hotplug_event(priv->fbdev);
-}
-
static void omap_atomic_wait_for_completion(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
@@ -79,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- priv->dispc_ops->runtime_get();
+ priv->dispc_ops->runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -123,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
- priv->dispc_ops->runtime_put();
+ priv->dispc_ops->runtime_put(priv->dispc);
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -132,7 +122,7 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
- .output_poll_changed = omap_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -201,7 +191,7 @@ cleanup:
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls();
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
@@ -215,8 +205,8 @@ static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_dss_device *dssdev = NULL;
- int num_ovls = priv->dispc_ops->get_num_ovls();
- int num_mgrs = priv->dispc_ops->get_num_mgrs();
+ int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
+ int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
int num_crtcs, crtc_idx, plane_idx;
int ret;
u32 plane_crtc_mask;
@@ -320,11 +310,14 @@ static int omap_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 8;
dev->mode_config.min_height = 2;
- /* note: eventually will need some cpu_is_omapXYZ() type stuff here
- * to fill in these limits properly on different OMAP generations..
+ /*
+ * Note: these values are used for multiple independent things:
+ * connector mode filtering, buffer sizes, crtc sizes...
+ * Use big enough values here to cover all use cases, and do more
+ * specific checking in the respective code paths.
*/
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
dev->mode_config.funcs = &omap_mode_config_funcs;
dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
@@ -467,28 +460,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-/**
- * lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- */
-static void dev_lastclose(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- int ret;
-
- DBG("lastclose: dev=%p", dev);
-
- if (priv->fbdev) {
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
- if (ret)
- DBG("failed to restore crtc mode");
- }
-}
-
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
@@ -511,7 +482,7 @@ static struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC | DRIVER_RENDER,
.open = dev_open,
- .lastclose = dev_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
#endif
@@ -542,40 +513,26 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
{ /* sentinel */ }
};
-static int pdev_probe(struct platform_device *pdev)
+static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
- struct omap_drm_private *priv;
struct drm_device *ddev;
unsigned int i;
int ret;
- DBG("%s", pdev->name);
-
- if (omapdss_is_initialized() == false)
- return -EPROBE_DEFER;
+ DBG("%s", dev_name(dev));
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the DMA mask\n");
- return ret;
- }
+ priv->dev = dev;
+ priv->dss = omapdss_get_dss();
+ priv->dispc = dispc_get_dispc(priv->dss);
+ priv->dispc_ops = dispc_get_ops(priv->dss);
- omap_crtc_pre_init();
+ omap_crtc_pre_init(priv);
ret = omap_connect_dssdevs();
if (ret)
goto err_crtc_uninit;
- /* Allocate and initialize the driver private structure. */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_disconnect_dssdevs;
- }
-
- priv->dispc_ops = dispc_get_ops();
-
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
@@ -584,34 +541,39 @@ static int pdev_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
+ ddev = drm_dev_alloc(&omap_drm_driver, priv->dev);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
- goto err_free_priv;
+ goto err_destroy_wq;
}
+ priv->ddev = ddev;
ddev->dev_private = priv;
- platform_set_drvdata(pdev, ddev);
+
+ /* Get memory bandwidth limits */
+ if (priv->dispc_ops->get_memory_bandwidth_limit)
+ priv->max_bandwidth =
+ priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
ret = omap_modeset_init(ddev);
if (ret) {
- dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
goto err_free_drm_dev;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret) {
- dev_err(&pdev->dev, "could not init vblank\n");
+ dev_err(priv->dev, "could not init vblank\n");
goto err_cleanup_modeset;
}
for (i = 0; i < priv->num_crtcs; i++)
drm_crtc_vblank_off(priv->crtcs[i]);
- priv->fbdev = omap_fbdev_init(ddev);
+ omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
omap_modeset_enable_external_hpd();
@@ -629,28 +591,25 @@ static int pdev_probe(struct platform_device *pdev)
err_cleanup_helpers:
omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
- if (priv->fbdev)
- omap_fbdev_free(ddev);
+
+ omap_fbdev_fini(ddev);
err_cleanup_modeset:
drm_mode_config_cleanup(ddev);
omap_drm_irq_uninstall(ddev);
err_free_drm_dev:
omap_gem_deinit(ddev);
drm_dev_unref(ddev);
-err_free_priv:
+err_destroy_wq:
destroy_workqueue(priv->wq);
- kfree(priv);
-err_disconnect_dssdevs:
omap_disconnect_dssdevs();
err_crtc_uninit:
omap_crtc_pre_uninit();
return ret;
}
-static int pdev_remove(struct platform_device *pdev)
+static void omapdrm_cleanup(struct omap_drm_private *priv)
{
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct omap_drm_private *priv = ddev->dev_private;
+ struct drm_device *ddev = priv->ddev;
DBG("");
@@ -659,8 +618,7 @@ static int pdev_remove(struct platform_device *pdev)
omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
- if (priv->fbdev)
- omap_fbdev_free(ddev);
+ omap_fbdev_fini(ddev);
drm_atomic_helper_shutdown(ddev);
@@ -672,10 +630,45 @@ static int pdev_remove(struct platform_device *pdev)
drm_dev_unref(ddev);
destroy_workqueue(priv->wq);
- kfree(priv);
omap_disconnect_dssdevs();
omap_crtc_pre_uninit();
+}
+
+static int pdev_probe(struct platform_device *pdev)
+{
+ struct omap_drm_private *priv;
+ int ret;
+
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ return ret;
+ }
+
+ /* Allocate and initialize the driver private structure. */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = omapdrm_init(priv, &pdev->dev);
+ if (ret < 0)
+ kfree(priv);
+
+ return ret;
+}
+
+static int pdev_remove(struct platform_device *pdev)
+{
+ struct omap_drm_private *priv = platform_get_drvdata(pdev);
+
+ omapdrm_cleanup(priv);
+ kfree(priv);
return 0;
}
@@ -719,7 +712,8 @@ static int omap_drm_resume_all_displays(void)
static int omap_drm_suspend(struct device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = priv->ddev;
drm_kms_helper_poll_disable(drm_dev);
@@ -732,7 +726,8 @@ static int omap_drm_suspend(struct device *dev)
static int omap_drm_resume(struct device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = priv->ddev;
drm_modeset_lock_all(drm_dev);
omap_drm_resume_all_displays();
@@ -740,7 +735,7 @@ static int omap_drm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
- return omap_gem_resume(dev);
+ return omap_gem_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 4bd1e90..6eaee4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_drv.h
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -17,8 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __OMAP_DRV_H__
-#define __OMAP_DRV_H__
+#ifndef __OMAPDRM_DRV_H__
+#define __OMAPDRM_DRV_H__
#include <linux/module.h>
#include <linux/types.h>
@@ -31,6 +29,15 @@
#include "dss/omapdss.h"
+#include "omap_connector.h"
+#include "omap_crtc.h"
+#include "omap_encoder.h"
+#include "omap_fb.h"
+#include "omap_fbdev.h"
+#include "omap_gem.h"
+#include "omap_irq.h"
+#include "omap_plane.h"
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -38,17 +45,13 @@
struct omap_drm_usergart;
-/* For KMS code that needs to wait for a certain # of IRQs:
- */
-struct omap_irq_wait;
-struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
- uint32_t irqmask, int count);
-int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
- unsigned long timeout);
-
struct omap_drm_private {
- uint32_t omaprev;
+ struct drm_device *ddev;
+ struct device *dev;
+ u32 omaprev;
+ struct dss_device *dss;
+ struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
unsigned int num_crtcs;
@@ -82,118 +85,13 @@ struct omap_drm_private {
/* irq handling: */
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
- uint32_t irq_mask; /* enabled irqs in addition to wait_list */
+ u32 irq_mask; /* enabled irqs in addition to wait_list */
+
+ /* memory bandwidth limit if it is needed on the platform */
+ unsigned int max_bandwidth;
};
-#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
-void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
-void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
-void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
-#endif
-
-#ifdef CONFIG_PM
-int omap_gem_resume(struct device *dev);
-#endif
-
-int omap_irq_enable_vblank(struct drm_crtc *crtc);
-void omap_irq_disable_vblank(struct drm_crtc *crtc);
-void omap_drm_irq_uninstall(struct drm_device *dev);
-int omap_drm_irq_install(struct drm_device *dev);
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
-void omap_fbdev_free(struct drm_device *dev);
-#else
-static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
-{
- return NULL;
-}
-static inline void omap_fbdev_free(struct drm_device *dev)
-{
-}
-#endif
-
-struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
-enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(void);
-void omap_crtc_pre_uninit(void);
-struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev);
-int omap_crtc_wait_pending(struct drm_crtc *crtc);
-void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
-void omap_crtc_vblank_irq(struct drm_crtc *crtc);
-
-struct drm_plane *omap_plane_init(struct drm_device *dev,
- int idx, enum drm_plane_type type,
- u32 possible_crtcs);
-void omap_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
-
-struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder);
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-
-struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
-struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-int omap_framebuffer_pin(struct drm_framebuffer *fb);
-void omap_framebuffer_unpin(struct drm_framebuffer *fb);
-void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
-bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
-
-void omap_gem_init(struct drm_device *dev);
-void omap_gem_deinit(struct drm_device *dev);
-
-struct drm_gem_object *omap_gem_new(struct drm_device *dev,
- union omap_gem_size gsize, uint32_t flags);
-struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
- struct sg_table *sgt);
-int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
-void omap_gem_free_object(struct drm_gem_object *obj);
-void *omap_gem_vaddr(struct drm_gem_object *obj);
-int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-int omap_gem_fault(struct vm_fault *vmf);
-int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
-void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
-void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
- enum dma_data_direction dir);
-int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
-void omap_gem_unpin(struct drm_gem_object *obj);
-int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
- bool remap);
-int omap_gem_put_pages(struct drm_gem_object *obj);
-uint32_t omap_gem_flags(struct drm_gem_object *obj);
-int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
- int x, int y, dma_addr_t *dma_addr);
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
-size_t omap_gem_mmap_size(struct drm_gem_object *obj);
-int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
-
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
- struct dma_buf *buffer);
-
-/* map crtc to vblank mask */
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
-
-#endif /* __OMAP_DRV_H__ */
+
+#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 624f5b5..fcdf4b0 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_encoder.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
new file mode 100644
index 0000000..d2f308b
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -0,0 +1,33 @@
+/*
+ * omap_encoder.h -- OMAP DRM Encoder
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_ENCODER_H__
+#define __OMAPDRM_ENCODER_H__
+
+struct drm_device;
+struct drm_encoder;
+struct omap_dss_device;
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_dss_device *dssdev);
+
+/* map crtc to vblank mask */
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
+
+#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index b1a762b..5fd22ca7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_fb.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -54,8 +52,8 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
- uint32_t pitch;
- uint32_t offset;
+ u32 pitch;
+ u32 offset;
dma_addr_t dma_addr;
};
@@ -102,10 +100,10 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.destroy = omap_framebuffer_destroy,
};
-static uint32_t get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct plane *plane,
const struct drm_format_info *format, int n, int x, int y)
{
- uint32_t offset;
+ u32 offset;
offset = plane->offset
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
@@ -123,9 +121,9 @@ bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
-static uint32_t drm_rotation_to_tiler(unsigned int drm_rot)
+static u32 drm_rotation_to_tiler(unsigned int drm_rot)
{
- uint32_t orient;
+ u32 orient;
switch (drm_rot & DRM_MODE_ROTATE_MASK) {
default:
@@ -160,7 +158,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
struct plane *plane = &omap_fb->planes[0];
- uint32_t x, y, orient = 0;
+ u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -179,8 +177,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
y = state->src_y >> 16;
if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
- uint32_t w = state->src_w >> 16;
- uint32_t h = state->src_h >> 16;
+ u32 w = state->src_w >> 16;
+ u32 h = state->src_h >> 16;
orient = drm_rotation_to_tiler(state->rotation);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
new file mode 100644
index 0000000..94ad5f9
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -0,0 +1,46 @@
+/*
+ * omap_fb.h -- OMAP DRM Framebuffer
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_FB_H__
+#define __OMAPDRM_FB_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_mode_fb_cmd2;
+struct drm_plane_state;
+struct omap_overlay_info;
+struct seq_file;
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+ struct drm_plane_state *state, struct omap_overlay_info *info);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+
+#endif /* __OMAPDRM_FB_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 9273118..0f66c74 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_fbdev.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -82,18 +80,21 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- /* Note: to properly handle manual update displays, we wrap the
- * basic fbdev ops which write to the framebuffer
- */
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+ .fb_ioctl = drm_fb_helper_ioctl,
+
.fb_read = drm_fb_helper_sys_read,
.fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
-
- .fb_pan_display = omap_fbdev_pan_display,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -193,7 +194,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = dma_addr;
- fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+ fbi->screen_buffer = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = fbdev->bo->size;
@@ -241,13 +242,16 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
}
/* initialize fbdev helper */
-struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+void omap_fbdev_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
int ret = 0;
+ if (!priv->num_crtcs || !priv->num_connectors)
+ return;
+
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
goto fail;
@@ -259,10 +263,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
- if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ if (ret)
goto fail;
- }
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret)
@@ -274,7 +276,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
priv->fbdev = helper;
- return helper;
+ return;
fini:
drm_fb_helper_fini(helper);
@@ -282,12 +284,9 @@ fail:
kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
- /* well, limp along without an fbdev.. maybe X11 will work? */
-
- return NULL;
}
-void omap_fbdev_free(struct drm_device *dev)
+void omap_fbdev_fini(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
@@ -295,14 +294,18 @@ void omap_fbdev_free(struct drm_device *dev)
DBG();
+ if (!helper)
+ return;
+
drm_fb_helper_unregister_fbi(helper);
drm_fb_helper_fini(helper);
- fbdev = to_omap_fbdev(priv->fbdev);
+ fbdev = to_omap_fbdev(helper);
/* unpin the GEM object pinned in omap_fbdev_create() */
- omap_gem_unpin(fbdev->bo);
+ if (fbdev->bo)
+ omap_gem_unpin(fbdev->bo);
/* this will free the backing object */
if (fbdev->fb)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h
new file mode 100644
index 0000000..7dfd843
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h
@@ -0,0 +1,38 @@
+/*
+ * omap_fbdev.h -- OMAP DRM FBDEV Compatibility
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_FBDEV_H__
+#define __OMAPDRM_FBDEV_H__
+
+struct drm_device;
+struct drm_fb_helper;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+void omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_fini(struct drm_device *dev);
+#else
+static inline void omap_fbdev_init(struct drm_device *dev)
+{
+}
+static inline void omap_fbdev_fini(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __OMAPDRM_FBDEV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 5c5c86d..0faf042 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_gem.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
@@ -41,13 +39,13 @@ struct omap_gem_object {
struct list_head mm_list;
- uint32_t flags;
+ u32 flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
- uint16_t width, height;
+ u16 width, height;
/** roll applied when mapping to DMM */
- uint32_t roll;
+ u32 roll;
/**
* dma_addr contains the buffer DMA address. It is valid for
@@ -75,7 +73,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- uint32_t dma_addr_cnt;
+ u32 dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -139,7 +137,7 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
-static uint64_t mmap_offset(struct drm_gem_object *obj)
+static u64 mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
@@ -333,14 +331,15 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
}
/* get buffer flags */
-uint32_t omap_gem_flags(struct drm_gem_object *obj)
+u32 omap_gem_flags(struct drm_gem_object *obj)
{
return to_omap_bo(obj)->flags;
}
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
- uint64_t offset;
+ u64 offset;
+
mutex_lock(&obj->dev->struct_mutex);
offset = mmap_offset(obj);
mutex_unlock(&obj->dev->struct_mutex);
@@ -651,7 +650,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* into user memory. We don't have to do much here at the moment.
*/
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+ u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -677,10 +676,10 @@ fail:
*
* Call only from non-atomic contexts.
*/
-int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
int ret = 0;
if (roll > npages) {
@@ -810,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (!is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
@@ -906,7 +905,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
-int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -923,7 +922,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
}
/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
-int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
+int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
@@ -996,23 +995,23 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
-int omap_gem_resume(struct device *dev)
+int omap_gem_resume(struct drm_device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct omap_drm_private *priv = drm_dev->dev_private;
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
int ret = 0;
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
+
WARN_ON(!omap_obj->pages); /* this can't happen */
ret = tiler_pin(omap_obj->block,
omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
- dev_err(dev, "could not repin: %d\n", ret);
+ dev_err(dev->dev, "could not repin: %d\n", ret);
return ret;
}
}
@@ -1030,7 +1029,7 @@ int omap_gem_resume(struct device *dev)
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint64_t off;
+ u64 off;
off = drm_vma_node_start(&obj->vma_node);
@@ -1118,7 +1117,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
- union omap_gem_size gsize, uint32_t flags)
+ union omap_gem_size gsize, u32 flags)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
@@ -1283,7 +1282,7 @@ done:
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+ union omap_gem_size gsize, u32 flags, u32 *handle)
{
struct drm_gem_object *obj;
int ret;
@@ -1330,7 +1329,8 @@ void omap_gem_init(struct drm_device *dev)
/* reserve 4k aligned/wide regions for userspace mappings: */
for (i = 0; i < ARRAY_SIZE(fmts); i++) {
- uint16_t h = 1, w = PAGE_SIZE >> i;
+ u16 h = 1, w = PAGE_SIZE >> i;
+
tiler_align(fmts[i], &w, &h);
/* note: since each region is 1 4kb page wide, and minimum
* number of rows, the height ends up being the same as the
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
new file mode 100644
index 0000000..a78bde0
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -0,0 +1,99 @@
+/*
+ * omap_gem.h -- OMAP DRM GEM Object Management
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_GEM_H__
+#define __OMAPDRM_GEM_H__
+
+#include <linux/types.h>
+
+enum dma_data_direction;
+
+struct dma_buf;
+struct drm_device;
+struct drm_file;
+struct drm_gem_object;
+struct drm_mode_create_dumb;
+struct file;
+struct list_head;
+struct page;
+struct seq_file;
+struct vm_area_struct;
+struct vm_fault;
+
+union omap_gem_size;
+
+/* Initialization and Cleanup */
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+#ifdef CONFIG_PM
+int omap_gem_resume(struct drm_device *dev);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
+#endif
+
+/* GEM Object Creation and Deletion */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, u32 flags);
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, u32 flags, u32 *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+
+/* Dumb Buffers Interface */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+/* mmap() Interface */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+
+/* PRIME Interface */
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer);
+
+int omap_gem_fault(struct vm_fault *vmf);
+int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
+ enum dma_data_direction dir);
+int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
+void omap_gem_unpin(struct drm_gem_object *obj);
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap);
+int omap_gem_put_pages(struct drm_gem_object *obj);
+
+u32 omap_gem_flags(struct drm_gem_object *obj);
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
+ int x, int y, dma_addr_t *dma_addr);
+int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
+
+#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index afdbad5..8e41d64 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 013b0bb..c851150 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_irq.c
- *
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
@@ -22,7 +20,7 @@
struct omap_irq_wait {
struct list_head node;
wait_queue_head_t wq;
- uint32_t irqmask;
+ u32 irqmask;
int count;
};
@@ -31,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait;
- uint32_t irqmask = priv->irq_mask;
+ u32 irqmask = priv->irq_mask;
assert_spin_locked(&priv->wait_lock);
@@ -40,7 +38,7 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
- priv->dispc_ops->write_irqenable(irqmask);
+ priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -50,7 +48,7 @@ static void omap_irq_wait_handler(struct omap_irq_wait *wait)
}
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
- uint32_t irqmask, int count)
+ u32 irqmask, int count)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
@@ -110,7 +108,8 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(channel);
+ priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -136,7 +135,8 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(channel);
+ priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
}
@@ -200,9 +200,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
unsigned int id;
u32 irqstatus;
- irqstatus = priv->dispc_ops->read_irqstatus();
- priv->dispc_ops->clear_irqstatus(irqstatus);
- priv->dispc_ops->read_irqstatus(); /* flush posted write */
+ irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
+ priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
+ priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
@@ -210,12 +210,12 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
struct drm_crtc *crtc = priv->crtcs[id];
enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(channel)) {
+ if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
- if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(channel))
+ if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
}
@@ -249,7 +249,7 @@ static const u32 omap_underflow_irqs[] = {
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs();
+ unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
@@ -267,13 +267,13 @@ int omap_drm_irq_install(struct drm_device *dev)
}
for (i = 0; i < num_mgrs; ++i)
- priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(i);
+ priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
- priv->dispc_ops->runtime_get();
- priv->dispc_ops->clear_irqstatus(0xffffffff);
- priv->dispc_ops->runtime_put();
+ priv->dispc_ops->runtime_get(priv->dispc);
+ priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
+ priv->dispc_ops->runtime_put(priv->dispc);
- ret = priv->dispc_ops->request_irq(omap_irq_handler, dev);
+ ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
@@ -291,5 +291,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
- priv->dispc_ops->free_irq(dev);
+ priv->dispc_ops->free_irq(priv->dispc, dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.h b/drivers/gpu/drm/omapdrm/omap_irq.h
new file mode 100644
index 0000000..9d54414
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_irq.h
@@ -0,0 +1,39 @@
+/*
+ * omap_irq.h -- OMAP DRM IRQ Handling
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_IRQ_H__
+#define __OMAPDRM_IRQ_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_device;
+struct omap_irq_wait;
+
+int omap_irq_enable_vblank(struct drm_crtc *crtc);
+void omap_irq_disable_vblank(struct drm_crtc *crtc);
+void omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
+struct omap_irq_wait *omap_irq_wait_init(struct drm_device *dev,
+ u32 irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout);
+
+#endif /* __OMAPDRM_IRQ_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 15e5d5d..2899435 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_plane.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
@@ -79,17 +77,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
&info.paddr, &info.p_uv_addr);
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(omap_plane->id, &info,
+ ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(omap_plane->id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
return;
}
- priv->dispc_ops->ovl_enable(omap_plane->id, true);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -102,7 +100,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- priv->dispc_ops->ovl_enable(omap_plane->id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -203,7 +201,7 @@ static void omap_plane_reset(struct drm_plane *plane)
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
@@ -218,7 +216,7 @@ static int omap_plane_atomic_set_property(struct drm_plane *plane,
static int omap_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
@@ -261,7 +259,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls();
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
@@ -280,7 +278,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(id);
+ formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
for (nformats = 0; formats[nformats]; ++nformats)
;
omap_plane->id = id;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h
new file mode 100644
index 0000000..dc5e82a
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_plane.h
@@ -0,0 +1,37 @@
+/*
+ * omap_plane.h -- OMAP DRM Plane
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_PLANE_H__
+#define __OMAPDRM_PLANE_H__
+
+#include <linux/types.h>
+
+enum drm_plane_type;
+
+struct drm_device;
+struct drm_mode_object;
+struct drm_plane;
+
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ int idx, enum drm_plane_type type,
+ u32 possible_crtcs);
+void omap_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+
+#endif /* __OMAPDRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index c10fdfc..d7f7bc9 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -1,13 +1,11 @@
/*
- * tcm-sita.c
- *
* SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
*
* Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
* Lajos Molnar <molnar@ti.com>
* Andy Gross <andy.gross@ti.com>
*
- * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -35,8 +33,8 @@ static unsigned long mask[8];
* map ptr to bitmap
* stride slots in a row
*/
-static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
- unsigned long *map, uint16_t stride)
+static void free_slots(unsigned long pos, u16 w, u16 h,
+ unsigned long *map, u16 stride)
{
int i;
@@ -50,7 +48,7 @@ static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
* map ptr to bitmap
* num_bits number of bits in bitmap
*/
-static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
+static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map,
size_t num_bits)
{
unsigned long search_count = 0;
@@ -86,7 +84,7 @@ static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
* num_bits = size of bitmap
* stride = bits in one row of container
*/
-static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
+static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
unsigned long *pos, unsigned long slot_bytes,
unsigned long *map, size_t num_bits, size_t slot_stride)
{
@@ -181,7 +179,7 @@ static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
}
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
- int16_t offset, uint16_t slot_bytes,
+ s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
unsigned long pos;
@@ -210,7 +208,7 @@ static void sita_deinit(struct tcm *tcm)
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
{
unsigned long pos;
- uint16_t w, h;
+ u16 w, h;
pos = area->p0.x + area->p0.y * tcm->width;
if (area->is2d) {
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f86..460e63d 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
@@ -1,11 +1,9 @@
/*
- * tcm_sita.h
- *
* SImple Tiler Allocator (SiTA) private structures.
*
+ * Copyright (C) 2009-2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Ravi Ramachandra <r.ramachandra@ti.com>
*
- * Copyright (C) 2009-2011 Texas Instruments, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index ef7df7d..8efcda9 100644
--- a/drivers/gpu/drm/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
@@ -1,6 +1,4 @@
/*
- * tcm.h
- *
* TILER container manager specification and support functions for TI
* TILER driver.
*
@@ -67,7 +65,7 @@ struct tcm {
/* function table */
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
- int16_t offset, uint16_t slot_bytes,
+ s16 offset, u16 slot_bytes,
struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free)(struct tcm *tcm, struct tcm_area *area);
@@ -131,7 +129,7 @@ static inline void tcm_deinit(struct tcm *tcm)
* allocation.
*/
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
- u16 align, int16_t offset, uint16_t slot_bytes,
+ u16 align, s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
/* perform rudimentary error checking */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 726f3fb..25682ff 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -7,6 +7,16 @@ config DRM_PANEL
menu "Display Panels"
depends on DRM && DRM_PANEL
+config DRM_PANEL_ARM_VERSATILE
+ tristate "ARM Versatile panel driver"
+ depends on OF
+ depends on MFD_SYSCON
+ select VIDEOMODE_HELPERS
+ help
+ This driver supports the ARM Versatile panels connected to ARM
+ reference designs. The panel is detected using special registers
+ in the Versatile family syscon registers.
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -28,6 +38,14 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ILITEK_IL9322
+ tristate "Ilitek ILI9322 320x240 QVGA panels"
+ depends on OF && SPI
+ select REGMAP
+ help
+ Say Y here if you want to enable support for Ilitek IL9322
+ QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
+
config DRM_PANEL_INNOLUX_P079ZCA
tristate "Innolux P079ZCA panel"
depends on OF
@@ -90,6 +108,15 @@ config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
Pi 7" Touchscreen. To compile this driver as a module,
choose M here.
+config DRM_PANEL_RAYDIUM_RM68200
+ tristate "Raydium RM68200 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM68200
+ 720x1280 DSI video mode panel.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 2c4e1a9..f26efc1 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,12 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
new file mode 100644
index 0000000..b428c46
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Panel driver for the ARM Versatile family reference designs from
+ * ARM Limited.
+ *
+ * Author:
+ * Linus Walleij <linus.wallei@linaro.org>
+ *
+ * On the Versatile AB, these panels come mounted on daughterboards
+ * named "IB1" or "IB2" (Interface Board 1 & 2 respectively.) They
+ * are documented in ARM DUI 0225D Appendix C and D. These daughter
+ * boards support TFT display panels.
+ *
+ * - The IB1 is a passive board where the display connector defines a
+ * few wires for encoding the display type for autodetection,
+ * suitable display settings can then be looked up from this setting.
+ * The magic bits can be read out from the system controller.
+ *
+ * - The IB2 is a more complex board intended for GSM phone development
+ * with some logic and a control register, which needs to be accessed
+ * and the board display needs to be turned on explicitly.
+ *
+ * On the Versatile PB, a special CLCD adaptor board is available
+ * supporting the same displays as the Versatile AB, plus one more
+ * Epson QCIF display.
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/*
+ * This configuration register in the Versatile and RealView
+ * family is uniformly present but appears more and more
+ * unutilized starting with the RealView series.
+ */
+#define SYS_CLCD 0x50
+
+/* The Versatile can detect the connected panel type */
+#define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12))
+#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8)
+#define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8)
+#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8)
+#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
+#define SYS_CLCD_ID_VGA (0x1f << 8)
+
+/* IB2 control register for the Versatile daughterboard */
+#define IB2_CTRL 0x00
+#define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */
+#define IB2_CTRL_LCD_BL_ON BIT(0)
+#define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1))
+
+/**
+ * struct versatile_panel_type - lookup struct for the supported panels
+ */
+struct versatile_panel_type {
+ /**
+ * @name: the name of this panel
+ */
+ const char *name;
+ /**
+ * @magic: the magic value from the detection register
+ */
+ u32 magic;
+ /**
+ * @mode: the DRM display mode for this panel
+ */
+ struct drm_display_mode mode;
+ /**
+ * @bus_flags: the DRM bus flags for this panel e.g. inverted clock
+ */
+ u32 bus_flags;
+ /**
+ * @width_mm: the panel width in mm
+ */
+ u32 width_mm;
+ /**
+ * @height_mm: the panel height in mm
+ */
+ u32 height_mm;
+ /**
+ * @ib2: the panel may be connected on an IB2 daughterboard
+ */
+ bool ib2;
+};
+
+/**
+ * struct versatile_panel - state container for the Versatile panels
+ */
+struct versatile_panel {
+ /**
+ * @dev: the container device
+ */
+ struct device *dev;
+ /**
+ * @panel: the DRM panel instance for this device
+ */
+ struct drm_panel panel;
+ /**
+ * @panel_type: the Versatile panel type as detected
+ */
+ const struct versatile_panel_type *panel_type;
+ /**
+ * @map: map to the parent syscon where the main register reside
+ */
+ struct regmap *map;
+ /**
+ * @ib2_map: map to the IB2 syscon, if applicable
+ */
+ struct regmap *ib2_map;
+};
+
+static const struct versatile_panel_type versatile_panels[] = {
+ /*
+ * Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT
+ * found on the Versatile AB IB1 connector or the Versatile
+ * PB adaptor board connector.
+ */
+ {
+ .name = "Sanyo TM38QV67A02A",
+ .magic = SYS_CLCD_ID_SANYO_3_8,
+ .width_mm = 79,
+ .height_mm = 54,
+ .mode = {
+ .clock = 10000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 6,
+ .hsync_end = 320 + 6 + 6,
+ .htotal = 320 + 6 + 6 + 6,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 6,
+ .vtotal = 240 + 5 + 6 + 5,
+ .vrefresh = 116,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ },
+ },
+ /*
+ * Sharp LQ084V1DG21 640x480 VGA Color TFT module
+ * found on the Versatile AB IB1 connector or the Versatile
+ * PB adaptor board connector.
+ */
+ {
+ .name = "Sharp LQ084V1DG21",
+ .magic = SYS_CLCD_ID_SHARP_8_4,
+ .width_mm = 171,
+ .height_mm = 130,
+ .mode = {
+ .clock = 25000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 24,
+ .hsync_end = 640 + 24 + 96,
+ .htotal = 640 + 24 + 96 + 24,
+ .vdisplay = 480,
+ .vsync_start = 480 + 11,
+ .vsync_end = 480 + 11 + 2,
+ .vtotal = 480 + 11 + 2 + 32,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ },
+ },
+ /*
+ * Epson L2F50113T00 - 2.2 inch QCIF 176x220 Color TFT
+ * found on the Versatile PB adaptor board connector.
+ */
+ {
+ .name = "Epson L2F50113T00",
+ .magic = SYS_CLCD_ID_EPSON_2_2,
+ .width_mm = 34,
+ .height_mm = 45,
+ .mode = {
+ .clock = 62500,
+ .hdisplay = 176,
+ .hsync_start = 176 + 2,
+ .hsync_end = 176 + 2 + 3,
+ .htotal = 176 + 2 + 3 + 3,
+ .vdisplay = 220,
+ .vsync_start = 220 + 0,
+ .vsync_end = 220 + 0 + 2,
+ .vtotal = 220 + 0 + 2 + 1,
+ .vrefresh = 390,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ },
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ },
+ /*
+ * Sanyo ALR252RGT 240x320 portrait display found on the
+ * Versatile AB IB2 daughterboard for GSM prototyping.
+ */
+ {
+ .name = "Sanyo ALR252RGT",
+ .magic = SYS_CLCD_ID_SANYO_2_5,
+ .width_mm = 37,
+ .height_mm = 50,
+ .mode = {
+ .clock = 5400,
+ .hdisplay = 240,
+ .hsync_start = 240 + 10,
+ .hsync_end = 240 + 10 + 10,
+ .htotal = 240 + 10 + 10 + 20,
+ .vdisplay = 320,
+ .vsync_start = 320 + 2,
+ .vsync_end = 320 + 2 + 2,
+ .vtotal = 320 + 2 + 2 + 2,
+ .vrefresh = 116,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .ib2 = true,
+ },
+};
+
+static inline struct versatile_panel *
+to_versatile_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct versatile_panel, panel);
+}
+
+static int versatile_panel_disable(struct drm_panel *panel)
+{
+ struct versatile_panel *vpanel = to_versatile_panel(panel);
+
+ /* If we're on an IB2 daughterboard, turn off display */
+ if (vpanel->ib2_map) {
+ dev_dbg(vpanel->dev, "disable IB2 display\n");
+ regmap_update_bits(vpanel->ib2_map,
+ IB2_CTRL,
+ IB2_CTRL_LCD_MASK,
+ IB2_CTRL_LCD_SD);
+ }
+
+ return 0;
+}
+
+static int versatile_panel_enable(struct drm_panel *panel)
+{
+ struct versatile_panel *vpanel = to_versatile_panel(panel);
+
+ /* If we're on an IB2 daughterboard, turn on display */
+ if (vpanel->ib2_map) {
+ dev_dbg(vpanel->dev, "enable IB2 display\n");
+ regmap_update_bits(vpanel->ib2_map,
+ IB2_CTRL,
+ IB2_CTRL_LCD_MASK,
+ IB2_CTRL_LCD_BL_ON);
+ }
+
+ return 0;
+}
+
+static int versatile_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct versatile_panel *vpanel = to_versatile_panel(panel);
+ struct drm_display_mode *mode;
+
+ strncpy(connector->display_info.name, vpanel->panel_type->name,
+ DRM_DISPLAY_INFO_LEN);
+ connector->display_info.width_mm = vpanel->panel_type->width_mm;
+ connector->display_info.height_mm = vpanel->panel_type->height_mm;
+ connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+
+ mode = drm_mode_duplicate(panel->drm, &vpanel->panel_type->mode);
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ mode->width_mm = vpanel->panel_type->width_mm;
+ mode->height_mm = vpanel->panel_type->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs versatile_panel_drm_funcs = {
+ .disable = versatile_panel_disable,
+ .enable = versatile_panel_enable,
+ .get_modes = versatile_panel_get_modes,
+};
+
+static int versatile_panel_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct versatile_panel *vpanel;
+ struct device *parent;
+ struct regmap *map;
+ int ret;
+ u32 val;
+ int i;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for versatile panel\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(map)) {
+ dev_err(dev, "no regmap for versatile panel parent\n");
+ return PTR_ERR(map);
+ }
+
+ vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
+ if (!vpanel)
+ return -ENOMEM;
+
+ ret = regmap_read(map, SYS_CLCD, &val);
+ if (ret) {
+ dev_err(dev, "cannot access syscon regs\n");
+ return ret;
+ }
+
+ val &= SYS_CLCD_CLCDID_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) {
+ const struct versatile_panel_type *pt;
+
+ pt = &versatile_panels[i];
+ if (pt->magic == val) {
+ vpanel->panel_type = pt;
+ break;
+ }
+ }
+
+ /* No panel detected or VGA, let's leave this show */
+ if (i == ARRAY_SIZE(versatile_panels)) {
+ dev_info(dev, "no panel detected\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "detected: %s\n", vpanel->panel_type->name);
+ vpanel->dev = dev;
+ vpanel->map = map;
+
+ /* Check if the panel is mounted on an IB2 daughterboard */
+ if (vpanel->panel_type->ib2) {
+ vpanel->ib2_map = syscon_regmap_lookup_by_compatible(
+ "arm,versatile-ib2-syscon");
+ if (IS_ERR(vpanel->ib2_map))
+ vpanel->ib2_map = NULL;
+ else
+ dev_info(dev, "panel mounted on IB2 daughterboard\n");
+ }
+
+ drm_panel_init(&vpanel->panel);
+ vpanel->panel.dev = dev;
+ vpanel->panel.funcs = &versatile_panel_drm_funcs;
+
+ return drm_panel_add(&vpanel->panel);
+}
+
+static const struct of_device_id versatile_panel_match[] = {
+ { .compatible = "arm,versatile-tft-panel", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, versatile_panel_match);
+
+static struct platform_driver versatile_panel_driver = {
+ .probe = versatile_panel_probe,
+ .driver = {
+ .name = "versatile-tft-panel",
+ .of_match_table = versatile_panel_match,
+ },
+};
+module_platform_driver(versatile_panel_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("ARM Versatile panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
new file mode 100644
index 0000000..bd38bf4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -0,0 +1,962 @@
+/*
+ * Ilitek ILI9322 TFT LCD drm_panel driver.
+ *
+ * This panel can be configured to support:
+ * - 8-bit serial RGB interface
+ * - 24-bit parallel RGB interface
+ * - 8-bit ITU-R BT.601 interface
+ * - 8-bit ITU-R BT.656 interface
+ * - Up to 320RGBx240 dots resolution TFT LCD displays
+ * - Scaling, brightness and contrast
+ *
+ * The scaling means that the display accepts a 640x480 or 720x480
+ * input and rescales it to fit to the 320x240 display. So what we
+ * present to the system is something else than what comes out on the
+ * actual display.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Derived from drivers/drm/gpu/panel/panel-samsung-ld9040.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/of_device.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#define ILI9322_CHIP_ID 0x00
+#define ILI9322_CHIP_ID_MAGIC 0x96
+
+/*
+ * Voltage on the communication interface, from 0.7 (0x00)
+ * to 1.32 (0x1f) times the VREG1OUT voltage in 2% increments.
+ * 1.00 (0x0f) is the default.
+ */
+#define ILI9322_VCOM_AMP 0x01
+
+/*
+ * High voltage on the communication signals, from 0.37 (0x00) to
+ * 1.0 (0x3f) times the VREGOUT1 voltage in 1% increments.
+ * 0.83 (0x2e) is the default.
+ */
+#define ILI9322_VCOM_HIGH 0x02
+
+/*
+ * VREG1 voltage regulator from 3.6V (0x00) to 6.0V (0x18) in 0.1V
+ * increments. 5.4V (0x12) is the default. This is the reference
+ * voltage for the VCOM levels and the greyscale level.
+ */
+#define ILI9322_VREG1_VOLTAGE 0x03
+
+/* Describes the incoming signal */
+#define ILI9322_ENTRY 0x06
+/* 0 = right-to-left, 1 = left-to-right (default), horizontal flip */
+#define ILI9322_ENTRY_HDIR BIT(0)
+/* 0 = down-to-up, 1 = up-to-down (default), vertical flip */
+#define ILI9322_ENTRY_VDIR BIT(1)
+/* NTSC, PAL or autodetect */
+#define ILI9322_ENTRY_NTSC (0 << 2)
+#define ILI9322_ENTRY_PAL (1 << 2)
+#define ILI9322_ENTRY_AUTODETECT (3 << 2)
+/* Input format */
+#define ILI9322_ENTRY_SERIAL_RGB_THROUGH (0 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_ALIGNED (1 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_DUMMY_320X240 (2 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_DUMMY_360X240 (3 << 4)
+#define ILI9322_ENTRY_DISABLE_1 (4 << 4)
+#define ILI9322_ENTRY_PARALLEL_RGB_THROUGH (5 << 4)
+#define ILI9322_ENTRY_PARALLEL_RGB_ALIGNED (6 << 4)
+#define ILI9322_ENTRY_YUV_640Y_320CBCR_25_54_MHZ (7 << 4)
+#define ILI9322_ENTRY_YUV_720Y_360CBCR_27_MHZ (8 << 4)
+#define ILI9322_ENTRY_DISABLE_2 (9 << 4)
+#define ILI9322_ENTRY_ITU_R_BT_656_720X360 (10 << 4)
+#define ILI9322_ENTRY_ITU_R_BT_656_640X320 (11 << 4)
+
+/* Power control */
+#define ILI9322_POW_CTRL 0x07
+#define ILI9322_POW_CTRL_STB BIT(0) /* 0 = standby, 1 = normal */
+#define ILI9322_POW_CTRL_VGL BIT(1) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VGH BIT(2) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_DDVDH BIT(3) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VCOM BIT(4) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VCL BIT(5) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_AUTO BIT(6) /* 0 = interactive, 1 = auto */
+#define ILI9322_POW_CTRL_STANDBY (ILI9322_POW_CTRL_VGL | \
+ ILI9322_POW_CTRL_VGH | \
+ ILI9322_POW_CTRL_DDVDH | \
+ ILI9322_POW_CTRL_VCL | \
+ ILI9322_POW_CTRL_AUTO | \
+ BIT(7))
+#define ILI9322_POW_CTRL_DEFAULT (ILI9322_POW_CTRL_STANDBY | \
+ ILI9322_POW_CTRL_STB)
+
+/* Vertical back porch bits 0..5 */
+#define ILI9322_VBP 0x08
+
+/* Horizontal back porch, 8 bits */
+#define ILI9322_HBP 0x09
+
+/*
+ * Polarity settings:
+ * 1 = positive polarity
+ * 0 = negative polarity
+ */
+#define ILI9322_POL 0x0a
+#define ILI9322_POL_DCLK BIT(0) /* 1 default */
+#define ILI9322_POL_HSYNC BIT(1) /* 0 default */
+#define ILI9322_POL_VSYNC BIT(2) /* 0 default */
+#define ILI9322_POL_DE BIT(3) /* 1 default */
+/*
+ * 0 means YCBCR are ordered Cb0,Y0,Cr0,Y1,Cb2,Y2,Cr2,Y3 (default)
+ * in RGB mode this means RGB comes in RGBRGB
+ * 1 means YCBCR are ordered Cr0,Y0,Cb0,Y1,Cr2,Y2,Cb2,Y3
+ * in RGB mode this means RGB comes in BGRBGR
+ */
+#define ILI9322_POL_YCBCR_MODE BIT(4)
+/* Formula A for YCbCR->RGB = 0, Formula B = 1 */
+#define ILI9322_POL_FORMULA BIT(5)
+/* Reverse polarity: 0 = 0..255, 1 = 255..0 */
+#define ILI9322_POL_REV BIT(6)
+
+#define ILI9322_IF_CTRL 0x0b
+#define ILI9322_IF_CTRL_HSYNC_VSYNC 0x00
+#define ILI9322_IF_CTRL_HSYNC_VSYNC_DE BIT(2)
+#define ILI9322_IF_CTRL_DE_ONLY BIT(3)
+#define ILI9322_IF_CTRL_SYNC_DISABLED (BIT(2) | BIT(3))
+#define ILI9322_IF_CTRL_LINE_INVERSION BIT(0) /* Not set means frame inv */
+
+#define ILI9322_GLOBAL_RESET 0x04
+#define ILI9322_GLOBAL_RESET_ASSERT 0x00 /* bit 0 = 0 -> reset */
+
+/*
+ * 4+4 bits of negative and positive gamma correction
+ * Upper nybble, bits 4-7 are negative gamma
+ * Lower nybble, bits 0-3 are positive gamma
+ */
+#define ILI9322_GAMMA_1 0x10
+#define ILI9322_GAMMA_2 0x11
+#define ILI9322_GAMMA_3 0x12
+#define ILI9322_GAMMA_4 0x13
+#define ILI9322_GAMMA_5 0x14
+#define ILI9322_GAMMA_6 0x15
+#define ILI9322_GAMMA_7 0x16
+#define ILI9322_GAMMA_8 0x17
+
+/**
+ * enum ili9322_input - the format of the incoming signal to the panel
+ *
+ * The panel can be connected to various input streams and four of them can
+ * be selected by electronic straps on the display. However it is possible
+ * to select another mode or override the electronic default with this
+ * setting.
+ */
+enum ili9322_input {
+ ILI9322_INPUT_SRGB_THROUGH = 0x0,
+ ILI9322_INPUT_SRGB_ALIGNED = 0x1,
+ ILI9322_INPUT_SRGB_DUMMY_320X240 = 0x2,
+ ILI9322_INPUT_SRGB_DUMMY_360X240 = 0x3,
+ ILI9322_INPUT_DISABLED_1 = 0x4,
+ ILI9322_INPUT_PRGB_THROUGH = 0x5,
+ ILI9322_INPUT_PRGB_ALIGNED = 0x6,
+ ILI9322_INPUT_YUV_640X320_YCBCR = 0x7,
+ ILI9322_INPUT_YUV_720X360_YCBCR = 0x8,
+ ILI9322_INPUT_DISABLED_2 = 0x9,
+ ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR = 0xa,
+ ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR = 0xb,
+ ILI9322_INPUT_UNKNOWN = 0xc,
+};
+
+static const char * const ili9322_inputs[] = {
+ "8 bit serial RGB through",
+ "8 bit serial RGB aligned",
+ "8 bit serial RGB dummy 320x240",
+ "8 bit serial RGB dummy 360x240",
+ "disabled 1",
+ "24 bit parallel RGB through",
+ "24 bit parallel RGB aligned",
+ "24 bit YUV 640Y 320CbCr",
+ "24 bit YUV 720Y 360CbCr",
+ "disabled 2",
+ "8 bit ITU-R BT.656 720Y 360CbCr",
+ "8 bit ITU-R BT.656 640Y 320CbCr",
+};
+
+/**
+ * struct ili9322_config - the system specific ILI9322 configuration
+ * @width_mm: physical panel width [mm]
+ * @height_mm: physical panel height [mm]
+ * @flip_horizontal: flip the image horizontally (right-to-left scan)
+ * (only in RGB and YUV modes)
+ * @flip_vertical: flip the image vertically (down-to-up scan)
+ * (only in RGB and YUV modes)
+ * @input: the input/entry type used in this system, if this is set to
+ * ILI9322_INPUT_UNKNOWN the driver will try to figure it out by probing
+ * the hardware
+ * @vreg1out_mv: the output in microvolts for the VREGOUT1 regulator used
+ * to drive the physical display. Valid ranges are 3600 thru 6000 in 100
+ * microvolt increments. If not specified, hardware defaults will be
+ * used (4.5V).
+ * @vcom_high_percent: the percentage of VREGOUT1 used for the peak
+ * voltage on the communications link. Valid ranges are 37 thru 100
+ * percent. If not specified, hardware defaults will be used (91%).
+ * @vcom_amplitude_percent: the percentage of VREGOUT1 used for the
+ * peak-to-peak amplitude of the communcation signals to the physical
+ * display. Valid ranges are 70 thru 132 percent in increments if two
+ * percent. Odd percentages will be truncated. If not specified, hardware
+ * defaults will be used (114%).
+ * @dclk_active_high: data/pixel clock active high, data will be clocked
+ * in on the rising edge of the DCLK (this is usually the case).
+ * @syncmode: The synchronization mode, what sync signals are emitted.
+ * See the enum for details.
+ * @de_active_high: DE (data entry) is active high
+ * @hsync_active_high: HSYNC is active high
+ * @vsync_active_high: VSYNC is active high
+ * @gamma_corr_pos: a set of 8 nybbles describing positive
+ * gamma correction for voltages V1 thru V8. Valid range 0..15
+ * @gamma_corr_neg: a set of 8 nybbles describing negative
+ * gamma correction for voltages V1 thru V8. Valid range 0..15
+ *
+ * These adjust what grayscale voltage will be output for input data V1 = 0,
+ * V2 = 16, V3 = 48, V4 = 96, V5 = 160, V6 = 208, V7 = 240 and V8 = 255.
+ * The curve is shaped like this:
+ *
+ * ^
+ * | V8
+ * | V7
+ * | V6
+ * | V5
+ * | V4
+ * | V3
+ * | V2
+ * | V1
+ * +----------------------------------------------------------->
+ * 0 16 48 96 160 208 240 255
+ *
+ * The negative and postive gamma values adjust the V1 thru V8 up/down
+ * according to the datasheet specifications. This is a property of the
+ * physical display connected to the display controller and may vary.
+ * If defined, both arrays must be supplied in full. If the properties
+ * are not supplied, hardware defaults will be used.
+ */
+struct ili9322_config {
+ u32 width_mm;
+ u32 height_mm;
+ bool flip_horizontal;
+ bool flip_vertical;
+ enum ili9322_input input;
+ u32 vreg1out_mv;
+ u32 vcom_high_percent;
+ u32 vcom_amplitude_percent;
+ bool dclk_active_high;
+ bool de_active_high;
+ bool hsync_active_high;
+ bool vsync_active_high;
+ u8 syncmode;
+ u8 gamma_corr_pos[8];
+ u8 gamma_corr_neg[8];
+};
+
+struct ili9322 {
+ struct device *dev;
+ const struct ili9322_config *conf;
+ struct drm_panel panel;
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[3];
+ struct gpio_desc *reset_gpio;
+ enum ili9322_input input;
+ struct videomode vm;
+ u8 gamma[8];
+ u8 vreg1out;
+ u8 vcom_high;
+ u8 vcom_amplitude;
+};
+
+static inline struct ili9322 *panel_to_ili9322(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9322, panel);
+}
+
+static int ili9322_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[2];
+
+ /* Clear bit 7 to write */
+ memcpy(buf, data, 2);
+ buf[0] &= ~0x80;
+
+ dev_dbg(dev, "WRITE: %02x %02x\n", buf[0], buf[1]);
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int ili9322_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[1];
+
+ /* Set bit 7 to 1 to read */
+ memcpy(buf, reg, 1);
+ dev_dbg(dev, "READ: %02x reg size = %zu, val size = %zu\n",
+ buf[0], reg_size, val_size);
+ buf[0] |= 0x80;
+
+ return spi_write_then_read(spi, buf, 1, val, 1);
+}
+
+static struct regmap_bus ili9322_regmap_bus = {
+ .write = ili9322_regmap_spi_write,
+ .read = ili9322_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static bool ili9322_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool ili9322_writeable_reg(struct device *dev, unsigned int reg)
+{
+ /* Just register 0 is read-only */
+ if (reg == 0x00)
+ return false;
+ return true;
+}
+
+static const struct regmap_config ili9322_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x44,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = ili9322_volatile_reg,
+ .writeable_reg = ili9322_writeable_reg,
+};
+
+static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
+{
+ struct drm_connector *connector = panel->connector;
+ u8 reg;
+ int ret;
+ int i;
+
+ /* Reset display */
+ ret = regmap_write(ili->regmap, ILI9322_GLOBAL_RESET,
+ ILI9322_GLOBAL_RESET_ASSERT);
+ if (ret) {
+ dev_err(ili->dev, "can't issue GRESET (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set up the main voltage regulator */
+ if (ili->vreg1out != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VREG1_VOLTAGE,
+ ili->vreg1out);
+ if (ret) {
+ dev_err(ili->dev, "can't set up VREG1OUT (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (ili->vcom_amplitude != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VCOM_AMP,
+ ili->vcom_amplitude);
+ if (ret) {
+ dev_err(ili->dev,
+ "can't set up VCOM amplitude (%d)\n", ret);
+ return ret;
+ }
+ };
+
+ if (ili->vcom_high != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
+ ili->vcom_high);
+ if (ret) {
+ dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
+ return ret;
+ }
+ };
+
+ /* Set up gamma correction */
+ for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
+ ret = regmap_write(ili->regmap, ILI9322_GAMMA_1 + i,
+ ili->gamma[i]);
+ if (ret) {
+ dev_err(ili->dev,
+ "can't write gamma V%d to 0x%02x (%d)\n",
+ i + 1, ILI9322_GAMMA_1 + i, ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Polarity and inverted color order for RGB input.
+ * None of this applies in the BT.656 mode.
+ */
+ if (ili->conf->dclk_active_high) {
+ reg = ILI9322_POL_DCLK;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ } else {
+ reg = 0;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ }
+ if (ili->conf->de_active_high) {
+ reg |= ILI9322_POL_DE;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_DE_HIGH;
+ } else {
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_DE_LOW;
+ }
+ if (ili->conf->hsync_active_high)
+ reg |= ILI9322_POL_HSYNC;
+ if (ili->conf->vsync_active_high)
+ reg |= ILI9322_POL_VSYNC;
+ ret = regmap_write(ili->regmap, ILI9322_POL, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write POL register (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set up interface control.
+ * This is not used in the BT.656 mode (no H/Vsync or DE signals).
+ */
+ reg = ili->conf->syncmode;
+ reg |= ILI9322_IF_CTRL_LINE_INVERSION;
+ ret = regmap_write(ili->regmap, ILI9322_IF_CTRL, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write IF CTRL register (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set up the input mode */
+ reg = (ili->input << 4);
+ /* These are inverted, setting to 1 is the default, clearing flips */
+ if (!ili->conf->flip_horizontal)
+ reg |= ILI9322_ENTRY_HDIR;
+ if (!ili->conf->flip_vertical)
+ reg |= ILI9322_ENTRY_VDIR;
+ reg |= ILI9322_ENTRY_AUTODETECT;
+ ret = regmap_write(ili->regmap, ILI9322_ENTRY, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write ENTRY reg (%d)\n", ret);
+ return ret;
+ }
+ dev_info(ili->dev, "display is in %s mode, syncmode %02x\n",
+ ili9322_inputs[ili->input],
+ ili->conf->syncmode);
+
+ dev_info(ili->dev, "initialized display\n");
+
+ return 0;
+}
+
+/*
+ * This power-on sequence if from the datasheet, page 57.
+ */
+static int ili9322_power_on(struct ili9322 *ili)
+{
+ int ret;
+
+ /* Assert RESET */
+ gpiod_set_value(ili->reset_gpio, 1);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ili->supplies), ili->supplies);
+ if (ret < 0) {
+ dev_err(ili->dev, "unable to enable regulators\n");
+ return ret;
+ }
+ msleep(20);
+
+ /* De-assert RESET */
+ gpiod_set_value(ili->reset_gpio, 0);
+
+ msleep(10);
+
+ return 0;
+}
+
+static int ili9322_power_off(struct ili9322 *ili)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ili->supplies), ili->supplies);
+}
+
+static int ili9322_disable(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = regmap_write(ili->regmap, ILI9322_POW_CTRL,
+ ILI9322_POW_CTRL_STANDBY);
+ if (ret) {
+ dev_err(ili->dev, "unable to go to standby mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9322_unprepare(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+
+ return ili9322_power_off(ili);
+}
+
+static int ili9322_prepare(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = ili9322_power_on(ili);
+ if (ret < 0)
+ return ret;
+
+ ret = ili9322_init(panel, ili);
+ if (ret < 0)
+ ili9322_unprepare(panel);
+
+ return ret;
+}
+
+static int ili9322_enable(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = regmap_write(ili->regmap, ILI9322_POW_CTRL,
+ ILI9322_POW_CTRL_DEFAULT);
+ if (ret) {
+ dev_err(ili->dev, "unable to enable panel\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Serial RGB modes */
+static const struct drm_display_mode srgb_320x240_mode = {
+ .clock = 2453500,
+ .hdisplay = 320,
+ .hsync_start = 320 + 359,
+ .hsync_end = 320 + 359 + 1,
+ .htotal = 320 + 359 + 1 + 241,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct drm_display_mode srgb_360x240_mode = {
+ .clock = 2700000,
+ .hdisplay = 360,
+ .hsync_start = 360 + 35,
+ .hsync_end = 360 + 35 + 1,
+ .htotal = 360 + 35 + 1 + 241,
+ .vdisplay = 240,
+ .vsync_start = 240 + 21,
+ .vsync_end = 240 + 21 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* This is the only mode listed for parallel RGB in the datasheet */
+static const struct drm_display_mode prgb_320x240_mode = {
+ .clock = 6400000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 38,
+ .hsync_end = 320 + 38 + 1,
+ .htotal = 320 + 38 + 1 + 50,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* YUV modes */
+static const struct drm_display_mode yuv_640x320_mode = {
+ .clock = 2454000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 252,
+ .hsync_end = 640 + 252 + 1,
+ .htotal = 640 + 252 + 1 + 28,
+ .vdisplay = 320,
+ .vsync_start = 320 + 4,
+ .vsync_end = 320 + 4 + 1,
+ .vtotal = 320 + 4 + 1 + 18,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct drm_display_mode yuv_720x360_mode = {
+ .clock = 2700000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 252,
+ .hsync_end = 720 + 252 + 1,
+ .htotal = 720 + 252 + 1 + 24,
+ .vdisplay = 360,
+ .vsync_start = 360 + 4,
+ .vsync_end = 360 + 4 + 1,
+ .vtotal = 360 + 4 + 1 + 18,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* BT.656 VGA mode, 640x480 */
+static const struct drm_display_mode itu_r_bt_656_640_mode = {
+ .clock = 2454000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 3,
+ .hsync_end = 640 + 3 + 1,
+ .htotal = 640 + 3 + 1 + 272,
+ .vdisplay = 480,
+ .vsync_start = 480 + 4,
+ .vsync_end = 480 + 4 + 1,
+ .vtotal = 500,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* BT.656 D1 mode 720x480 */
+static const struct drm_display_mode itu_r_bt_656_720_mode = {
+ .clock = 2700000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 3,
+ .hsync_end = 720 + 3 + 1,
+ .htotal = 720 + 3 + 1 + 272,
+ .vdisplay = 480,
+ .vsync_start = 480 + 4,
+ .vsync_end = 480 + 4 + 1,
+ .vtotal = 500,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int ili9322_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ struct drm_display_mode *mode;
+
+ strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
+ DRM_DISPLAY_INFO_LEN);
+ connector->display_info.width_mm = ili->conf->width_mm;
+ connector->display_info.height_mm = ili->conf->height_mm;
+
+ switch (ili->input) {
+ case ILI9322_INPUT_SRGB_DUMMY_320X240:
+ mode = drm_mode_duplicate(panel->drm, &srgb_320x240_mode);
+ break;
+ case ILI9322_INPUT_SRGB_DUMMY_360X240:
+ mode = drm_mode_duplicate(panel->drm, &srgb_360x240_mode);
+ break;
+ case ILI9322_INPUT_PRGB_THROUGH:
+ case ILI9322_INPUT_PRGB_ALIGNED:
+ mode = drm_mode_duplicate(panel->drm, &prgb_320x240_mode);
+ break;
+ case ILI9322_INPUT_YUV_640X320_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &yuv_640x320_mode);
+ break;
+ case ILI9322_INPUT_YUV_720X360_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &yuv_720x360_mode);
+ break;
+ case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_720_mode);
+ break;
+ case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_640_mode);
+ break;
+ default:
+ mode = NULL;
+ break;
+ }
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ /*
+ * This is the preferred mode because most people are going
+ * to want to use the display with VGA type graphics.
+ */
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ /* Set up the polarity */
+ if (ili->conf->hsync_active_high)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (ili->conf->vsync_active_high)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ mode->width_mm = ili->conf->width_mm;
+ mode->height_mm = ili->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs ili9322_drm_funcs = {
+ .disable = ili9322_disable,
+ .unprepare = ili9322_unprepare,
+ .prepare = ili9322_prepare,
+ .enable = ili9322_enable,
+ .get_modes = ili9322_get_modes,
+};
+
+static int ili9322_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ili9322 *ili;
+ const struct regmap_config *regmap_config;
+ u8 gamma;
+ u32 val;
+ int ret;
+ int i;
+
+ ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
+ if (!ili)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ili);
+
+ ili->dev = dev;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ ili->conf = of_device_get_match_data(dev);
+ if (!ili->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ val = ili->conf->vreg1out_mv;
+ if (!val) {
+ /* Default HW value, do not touch (should be 4.5V) */
+ ili->vreg1out = U8_MAX;
+ } else {
+ if (val < 3600) {
+ dev_err(dev, "too low VREG1OUT\n");
+ return -EINVAL;
+ }
+ if (val > 6000) {
+ dev_err(dev, "too high VREG1OUT\n");
+ return -EINVAL;
+ }
+ if ((val % 100) != 0) {
+ dev_err(dev, "VREG1OUT is no even 100 microvolt\n");
+ return -EINVAL;
+ }
+ val -= 3600;
+ val /= 100;
+ dev_dbg(dev, "VREG1OUT = 0x%02x\n", val);
+ ili->vreg1out = val;
+ }
+
+ val = ili->conf->vcom_high_percent;
+ if (!val) {
+ /* Default HW value, do not touch (should be 91%) */
+ ili->vcom_high = U8_MAX;
+ } else {
+ if (val < 37) {
+ dev_err(dev, "too low VCOM high\n");
+ return -EINVAL;
+ }
+ if (val > 100) {
+ dev_err(dev, "too high VCOM high\n");
+ return -EINVAL;
+ }
+ val -= 37;
+ dev_dbg(dev, "VCOM high = 0x%02x\n", val);
+ ili->vcom_high = val;
+ }
+
+ val = ili->conf->vcom_amplitude_percent;
+ if (!val) {
+ /* Default HW value, do not touch (should be 114%) */
+ ili->vcom_high = U8_MAX;
+ } else {
+ if (val < 70) {
+ dev_err(dev, "too low VCOM amplitude\n");
+ return -EINVAL;
+ }
+ if (val > 132) {
+ dev_err(dev, "too high VCOM amplitude\n");
+ return -EINVAL;
+ }
+ val -= 70;
+ val >>= 1; /* Increments of 2% */
+ dev_dbg(dev, "VCOM amplitude = 0x%02x\n", val);
+ ili->vcom_amplitude = val;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
+ val = ili->conf->gamma_corr_neg[i];
+ if (val > 15) {
+ dev_err(dev, "negative gamma %u > 15, capping\n", val);
+ val = 15;
+ }
+ gamma = val << 4;
+ val = ili->conf->gamma_corr_pos[i];
+ if (val > 15) {
+ dev_err(dev, "positive gamma %u > 15, capping\n", val);
+ val = 15;
+ }
+ gamma |= val;
+ ili->gamma[i] = gamma;
+ dev_dbg(dev, "gamma V%d: 0x%02x\n", i + 1, gamma);
+ }
+
+ ili->supplies[0].supply = "vcc"; /* 2.7-3.6 V */
+ ili->supplies[1].supply = "iovcc"; /* 1.65-3.6V */
+ ili->supplies[2].supply = "vci"; /* 2.7-3.6V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ili->supplies),
+ ili->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[0].consumer,
+ 2700000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[1].consumer,
+ 1650000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[2].consumer,
+ 2700000, 3600000);
+ if (ret)
+ return ret;
+
+ ili->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ili->reset_gpio)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(ili->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+ regmap_config = &ili9322_regmap_config;
+ ili->regmap = devm_regmap_init(dev, &ili9322_regmap_bus, dev,
+ regmap_config);
+ if (IS_ERR(ili->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(ili->regmap);
+ }
+
+ ret = regmap_read(ili->regmap, ILI9322_CHIP_ID, &val);
+ if (ret) {
+ dev_err(dev, "can't get chip ID (%d)\n", ret);
+ return ret;
+ }
+ if (val != ILI9322_CHIP_ID_MAGIC) {
+ dev_err(dev, "chip ID 0x%0x2, expected 0x%02x\n", val,
+ ILI9322_CHIP_ID_MAGIC);
+ return -ENODEV;
+ }
+
+ /* Probe the system to find the display setting */
+ if (ili->conf->input == ILI9322_INPUT_UNKNOWN) {
+ ret = regmap_read(ili->regmap, ILI9322_ENTRY, &val);
+ if (ret) {
+ dev_err(dev, "can't get entry setting (%d)\n", ret);
+ return ret;
+ }
+ /* Input enum corresponds to HW setting */
+ ili->input = (val >> 4) & 0x0f;
+ if (ili->input >= ILI9322_INPUT_UNKNOWN)
+ ili->input = ILI9322_INPUT_UNKNOWN;
+ } else {
+ ili->input = ili->conf->input;
+ }
+
+ drm_panel_init(&ili->panel);
+ ili->panel.dev = dev;
+ ili->panel.funcs = &ili9322_drm_funcs;
+
+ return drm_panel_add(&ili->panel);
+}
+
+static int ili9322_remove(struct spi_device *spi)
+{
+ struct ili9322 *ili = spi_get_drvdata(spi);
+
+ ili9322_power_off(ili);
+ drm_panel_remove(&ili->panel);
+
+ return 0;
+}
+
+/*
+ * The D-Link DIR-685 panel is marked LM918A01-1A SY-B4-091116-E0199
+ */
+static const struct ili9322_config ili9322_dir_685 = {
+ .width_mm = 65,
+ .height_mm = 50,
+ .input = ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR,
+ .vreg1out_mv = 4600,
+ .vcom_high_percent = 91,
+ .vcom_amplitude_percent = 114,
+ .syncmode = ILI9322_IF_CTRL_SYNC_DISABLED,
+ .dclk_active_high = true,
+ .gamma_corr_neg = { 0xa, 0x5, 0x7, 0x7, 0x7, 0x5, 0x1, 0x6 },
+ .gamma_corr_pos = { 0x7, 0x7, 0x3, 0x2, 0x3, 0x5, 0x7, 0x2 },
+};
+
+static const struct of_device_id ili9322_of_match[] = {
+ {
+ .compatible = "dlink,dir-685-panel",
+ .data = &ili9322_dir_685,
+ },
+ {
+ .compatible = "ilitek,ili9322",
+ .data = NULL,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9322_of_match);
+
+static struct spi_driver ili9322_driver = {
+ .probe = ili9322_probe,
+ .remove = ili9322_remove,
+ .driver = {
+ .name = "panel-ilitek-ili9322",
+ .of_match_table = ili9322_of_match,
+ },
+};
+module_spi_driver(ili9322_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("ILI9322 LCD panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 6ba9344..57df39b 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -45,8 +45,7 @@ static int innolux_panel_disable(struct drm_panel *panel)
if (!innolux->enabled)
return 0;
- innolux->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(innolux->backlight);
+ backlight_disable(innolux->backlight);
err = mipi_dsi_dcs_set_display_off(innolux->link);
if (err < 0)
@@ -151,8 +150,7 @@ static int innolux_panel_enable(struct drm_panel *panel)
if (innolux->enabled)
return 0;
- innolux->backlight->props.power = FB_BLANK_UNBLANK;
- ret = backlight_update_status(innolux->backlight);
+ ret = backlight_enable(innolux->backlight);
if (ret) {
DRM_DEV_ERROR(panel->drm->dev,
"Failed to enable backlight %d\n", ret);
@@ -217,7 +215,6 @@ MODULE_DEVICE_TABLE(of, innolux_of_match);
static int innolux_panel_add(struct innolux_panel *innolux)
{
struct device *dev = &innolux->link->dev;
- struct device_node *np;
int err;
innolux->supply = devm_regulator_get(dev, "power");
@@ -232,37 +229,22 @@ static int innolux_panel_add(struct innolux_panel *innolux)
innolux->enable_gpio = NULL;
}
- np = of_parse_phandle(dev->of_node, "backlight", 0);
- if (np) {
- innolux->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
+ innolux->backlight = devm_of_find_backlight(dev);
- if (!innolux->backlight)
- return -EPROBE_DEFER;
- }
+ if (IS_ERR(innolux->backlight))
+ return PTR_ERR(innolux->backlight);
drm_panel_init(&innolux->base);
innolux->base.funcs = &innolux_panel_funcs;
innolux->base.dev = &innolux->link->dev;
- err = drm_panel_add(&innolux->base);
- if (err < 0)
- goto put_backlight;
-
- return 0;
-
-put_backlight:
- put_device(&innolux->backlight->dev);
-
- return err;
+ return drm_panel_add(&innolux->base);
}
static void innolux_panel_del(struct innolux_panel *innolux)
{
if (innolux->base.dev)
drm_panel_remove(&innolux->base);
-
- put_device(&innolux->backlight->dev);
}
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 5b2340e..0a94ab7 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -192,8 +192,7 @@ static int jdi_panel_disable(struct drm_panel *panel)
if (!jdi->enabled)
return 0;
- jdi->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(jdi->backlight);
+ backlight_disable(jdi->backlight);
jdi->enabled = false;
@@ -289,8 +288,7 @@ static int jdi_panel_enable(struct drm_panel *panel)
if (jdi->enabled)
return 0;
- jdi->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(jdi->backlight);
+ backlight_enable(jdi->backlight);
jdi->enabled = true;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index e2d57c0..5185819 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_crtc.c -- R-Car Display Unit CRTCs
+ * Generic LVDS panel driver
*
* Copyright (C) 2016 Laurent Pinchart
* Copyright (C) 2016 Renesas Electronics Corporation
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <drm/drmP.h>
@@ -39,6 +40,7 @@ struct panel_lvds {
bool data_mirror;
struct backlight_device *backlight;
+ struct regulator *supply;
struct gpio_desc *enable_gpio;
struct gpio_desc *reset_gpio;
@@ -69,6 +71,9 @@ static int panel_lvds_unprepare(struct drm_panel *panel)
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 0);
+ if (lvds->supply)
+ regulator_disable(lvds->supply);
+
return 0;
}
@@ -76,6 +81,17 @@ static int panel_lvds_prepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
+ if (lvds->supply) {
+ int err;
+
+ err = regulator_enable(lvds->supply);
+ if (err < 0) {
+ dev_err(lvds->dev, "failed to enable supply: %d\n",
+ err);
+ return err;
+ }
+ }
+
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 1);
@@ -196,6 +212,20 @@ static int panel_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ lvds->supply = devm_regulator_get_optional(lvds->dev, "power");
+ if (IS_ERR(lvds->supply)) {
+ ret = PTR_ERR(lvds->supply);
+
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(lvds->dev, "failed to request regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ lvds->supply = NULL;
+ }
+
/* Get GPIOs and backlight controller. */
lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable",
GPIOD_OUT_LOW);
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c189cd6..90f1ae4 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -1,16 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <philippe.cornu@st.com>
* Yannick Fertre <yannick.fertre@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
+
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#define DRV_NAME "orisetech_otm8009a"
@@ -62,6 +63,7 @@ struct otm8009a {
struct drm_panel panel;
struct backlight_device *bl_dev;
struct gpio_desc *reset_gpio;
+ struct regulator *supply;
bool prepared;
bool enabled;
};
@@ -279,6 +281,8 @@ static int otm8009a_unprepare(struct drm_panel *panel)
msleep(20);
}
+ regulator_disable(ctx->supply);
+
ctx->prepared = false;
return 0;
@@ -292,6 +296,12 @@ static int otm8009a_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_ERROR("failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
if (ctx->reset_gpio) {
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -414,6 +424,13 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
+ ctx->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(ctx->supply)) {
+ ret = PTR_ERR(ctx->supply);
+ dev_err(dev, "failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 7f915f7..74a8061 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -59,34 +59,28 @@ static inline struct wuxga_nt_panel *to_wuxga_nt_panel(struct drm_panel *panel)
static int wuxga_nt_panel_on(struct wuxga_nt_panel *wuxga_nt)
{
- struct mipi_dsi_device *dsi = wuxga_nt->dsi;
- int ret;
-
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret < 0)
- return ret;
-
- return 0;
+ return mipi_dsi_turn_on_peripheral(wuxga_nt->dsi);
}
static int wuxga_nt_panel_disable(struct drm_panel *panel)
{
struct wuxga_nt_panel *wuxga_nt = to_wuxga_nt_panel(panel);
+ int mipi_ret, bl_ret = 0;
if (!wuxga_nt->enabled)
return 0;
- mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
+ mipi_ret = mipi_dsi_shutdown_peripheral(wuxga_nt->dsi);
if (wuxga_nt->backlight) {
wuxga_nt->backlight->props.power = FB_BLANK_POWERDOWN;
wuxga_nt->backlight->props.state |= BL_CORE_FBBLANK;
- backlight_update_status(wuxga_nt->backlight);
+ bl_ret = backlight_update_status(wuxga_nt->backlight);
}
wuxga_nt->enabled = false;
- return 0;
+ return mipi_ret ? mipi_ret : bl_ret;
}
static int wuxga_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 890fd6f..d964d45 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -221,7 +221,7 @@ static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
return container_of(panel, struct rpi_touchscreen, base);
}
-static u8 rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
+static int rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
{
return i2c_smbus_read_byte_data(ts->i2c, reg);
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
new file mode 100644
index 0000000..7759353
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+/*** Manufacturer Command Set ***/
+#define MCS_CMD_MODE_SW 0xFE /* CMD Mode Switch */
+#define MCS_CMD1_UCS 0x00 /* User Command Set (UCS = CMD1) */
+#define MCS_CMD2_P0 0x01 /* Manufacture Command Set Page0 (CMD2 P0) */
+#define MCS_CMD2_P1 0x02 /* Manufacture Command Set Page1 (CMD2 P1) */
+#define MCS_CMD2_P2 0x03 /* Manufacture Command Set Page2 (CMD2 P2) */
+#define MCS_CMD2_P3 0x04 /* Manufacture Command Set Page3 (CMD2 P3) */
+
+/* CMD2 P0 commands (Display Options and Power) */
+#define MCS_STBCTR 0x12 /* TE1 Output Setting Zig-Zag Connection */
+#define MCS_SGOPCTR 0x16 /* Source Bias Current */
+#define MCS_SDCTR 0x1A /* Source Output Delay Time */
+#define MCS_INVCTR 0x1B /* Inversion Type */
+#define MCS_EXT_PWR_IC 0x24 /* External PWR IC Control */
+#define MCS_SETAVDD 0x27 /* PFM Control for AVDD Output */
+#define MCS_SETAVEE 0x29 /* PFM Control for AVEE Output */
+#define MCS_BT2CTR 0x2B /* DDVDL Charge Pump Control */
+#define MCS_BT3CTR 0x2F /* VGH Charge Pump Control */
+#define MCS_BT4CTR 0x34 /* VGL Charge Pump Control */
+#define MCS_VCMCTR 0x46 /* VCOM Output Level Control */
+#define MCS_SETVGN 0x52 /* VG M/S N Control */
+#define MCS_SETVGP 0x54 /* VG M/S P Control */
+#define MCS_SW_CTRL 0x5F /* Interface Control for PFM and MIPI */
+
+/* CMD2 P2 commands (GOA Timing Control) - no description in datasheet */
+#define GOA_VSTV1 0x00
+#define GOA_VSTV2 0x07
+#define GOA_VCLK1 0x0E
+#define GOA_VCLK2 0x17
+#define GOA_VCLK_OPT1 0x20
+#define GOA_BICLK1 0x2A
+#define GOA_BICLK2 0x37
+#define GOA_BICLK3 0x44
+#define GOA_BICLK4 0x4F
+#define GOA_BICLK_OPT1 0x5B
+#define GOA_BICLK_OPT2 0x60
+#define MCS_GOA_GPO1 0x6D
+#define MCS_GOA_GPO2 0x71
+#define MCS_GOA_EQ 0x74
+#define MCS_GOA_CLK_GALLON 0x7C
+#define MCS_GOA_FS_SEL0 0x7E
+#define MCS_GOA_FS_SEL1 0x87
+#define MCS_GOA_FS_SEL2 0x91
+#define MCS_GOA_FS_SEL3 0x9B
+#define MCS_GOA_BS_SEL0 0xAC
+#define MCS_GOA_BS_SEL1 0xB5
+#define MCS_GOA_BS_SEL2 0xBF
+#define MCS_GOA_BS_SEL3 0xC9
+#define MCS_GOA_BS_SEL4 0xD3
+
+/* CMD2 P3 commands (Gamma) */
+#define MCS_GAMMA_VP 0x60 /* Gamma VP1~VP16 */
+#define MCS_GAMMA_VN 0x70 /* Gamma VN1~VN16 */
+
+struct rm68200 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *supply;
+ struct backlight_device *backlight;
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 52582,
+ .hdisplay = 720,
+ .hsync_start = 720 + 38,
+ .hsync_end = 720 + 38 + 8,
+ .htotal = 720 + 38 + 8 + 38,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 4,
+ .vtotal = 1280 + 12 + 4 + 12,
+ .vrefresh = 50,
+ .flags = 0,
+ .width_mm = 68,
+ .height_mm = 122,
+};
+
+static inline struct rm68200 *panel_to_rm68200(struct drm_panel *panel)
+{
+ return container_of(panel, struct rm68200, panel);
+}
+
+static void rm68200_dcs_write_buf(struct rm68200 *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int err;
+
+ err = mipi_dsi_dcs_write_buffer(dsi, data, len);
+ if (err < 0)
+ DRM_ERROR_RATELIMITED("MIPI DSI DCS write buffer failed: %d\n",
+ err);
+}
+
+static void rm68200_dcs_write_cmd(struct rm68200 *ctx, u8 cmd, u8 value)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int err;
+
+ err = mipi_dsi_dcs_write(dsi, cmd, &value, 1);
+ if (err < 0)
+ DRM_ERROR_RATELIMITED("MIPI DSI DCS write failed: %d\n", err);
+}
+
+#define dcs_write_seq(ctx, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ \
+ rm68200_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \
+})
+
+/*
+ * This panel is not able to auto-increment all cmd addresses so for some of
+ * them, we need to send them one by one...
+ */
+#define dcs_write_cmd_seq(ctx, cmd, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ unsigned int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(d) ; i++) \
+ rm68200_dcs_write_cmd(ctx, cmd + i, d[i]); \
+})
+
+static void rm68200_init_sequence(struct rm68200 *ctx)
+{
+ /* Enter CMD2 with page 0 */
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P0);
+ dcs_write_cmd_seq(ctx, MCS_EXT_PWR_IC, 0xC0, 0x53, 0x00);
+ dcs_write_seq(ctx, MCS_BT2CTR, 0xE5);
+ dcs_write_seq(ctx, MCS_SETAVDD, 0x0A);
+ dcs_write_seq(ctx, MCS_SETAVEE, 0x0A);
+ dcs_write_seq(ctx, MCS_SGOPCTR, 0x52);
+ dcs_write_seq(ctx, MCS_BT3CTR, 0x53);
+ dcs_write_seq(ctx, MCS_BT4CTR, 0x5A);
+ dcs_write_seq(ctx, MCS_INVCTR, 0x00);
+ dcs_write_seq(ctx, MCS_STBCTR, 0x0A);
+ dcs_write_seq(ctx, MCS_SDCTR, 0x06);
+ dcs_write_seq(ctx, MCS_VCMCTR, 0x56);
+ dcs_write_seq(ctx, MCS_SETVGN, 0xA0, 0x00);
+ dcs_write_seq(ctx, MCS_SETVGP, 0xA0, 0x00);
+ dcs_write_seq(ctx, MCS_SW_CTRL, 0x11); /* 2 data lanes, see doc */
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P2);
+ dcs_write_seq(ctx, GOA_VSTV1, 0x05);
+ dcs_write_seq(ctx, 0x02, 0x0B);
+ dcs_write_seq(ctx, 0x03, 0x0F);
+ dcs_write_seq(ctx, 0x04, 0x7D, 0x00, 0x50);
+ dcs_write_cmd_seq(ctx, GOA_VSTV2, 0x05, 0x16, 0x0D, 0x11, 0x7D, 0x00,
+ 0x50);
+ dcs_write_cmd_seq(ctx, GOA_VCLK1, 0x07, 0x08, 0x01, 0x02, 0x00, 0x7D,
+ 0x00, 0x85, 0x08);
+ dcs_write_cmd_seq(ctx, GOA_VCLK2, 0x03, 0x04, 0x05, 0x06, 0x00, 0x7D,
+ 0x00, 0x85, 0x08);
+ dcs_write_seq(ctx, GOA_VCLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_seq(ctx, GOA_BICLK1, 0x07, 0x08);
+ dcs_write_seq(ctx, 0x2D, 0x01);
+ dcs_write_seq(ctx, 0x2F, 0x02, 0x00, 0x40, 0x05, 0x08, 0x54, 0x7D,
+ 0x00);
+ dcs_write_cmd_seq(ctx, GOA_BICLK2, 0x03, 0x04, 0x05, 0x06, 0x00);
+ dcs_write_seq(ctx, 0x3D, 0x40);
+ dcs_write_seq(ctx, 0x3F, 0x05, 0x08, 0x54, 0x7D, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00);
+ dcs_write_seq(ctx, 0x58, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK_OPT2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_GPO1, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_GPO2, 0x00, 0x20, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_EQ, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_CLK_GALLON, 0x00, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL0, 0xBF, 0x02, 0x06, 0x14, 0x10,
+ 0x16, 0x12, 0x08, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0C,
+ 0x0A, 0x0E, 0x3F, 0x3F, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL2, 0x04, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x05, 0x01, 0x3F, 0x3F, 0x0F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL3, 0x0B, 0x0D, 0x3F, 0x3F, 0x3F,
+ 0x3F);
+ dcs_write_cmd_seq(ctx, 0xA2, 0x3F, 0x09, 0x13, 0x17, 0x11, 0x15);
+ dcs_write_cmd_seq(ctx, 0xA9, 0x07, 0x03, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL0, 0x3F, 0x05, 0x01, 0x17, 0x13,
+ 0x15, 0x11, 0x0F, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0B,
+ 0x0D, 0x09, 0x3F, 0x3F, 0x07);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL2, 0x03, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x02, 0x06, 0x3F, 0x3F, 0x08);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL3, 0x0C, 0x0A, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x0E, 0x10, 0x14);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL4, 0x12, 0x16, 0x00, 0x04, 0x3F);
+ dcs_write_seq(ctx, 0xDC, 0x02);
+ dcs_write_seq(ctx, 0xDE, 0x12);
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, 0x0E); /* No documentation */
+ dcs_write_seq(ctx, 0x01, 0x75);
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P3);
+ dcs_write_cmd_seq(ctx, MCS_GAMMA_VP, 0x00, 0x0C, 0x12, 0x0E, 0x06,
+ 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F,
+ 0x12, 0x0C, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GAMMA_VN, 0x00, 0x0C, 0x12, 0x0E, 0x06,
+ 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F,
+ 0x12, 0x0C, 0x00);
+
+ /* Exit CMD2 */
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD1_UCS);
+}
+
+static int rm68200_disable(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+
+ if (!ctx->enabled)
+ return 0;
+
+ backlight_disable(ctx->backlight);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int rm68200_unprepare(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret)
+ DRM_WARN("failed to set display off: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret)
+ DRM_WARN("failed to enter sleep mode: %d\n", ret);
+
+ msleep(120);
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ }
+
+ regulator_disable(ctx->supply);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int rm68200_prepare(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_ERROR("failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(100);
+ }
+
+ rm68200_init_sequence(ctx);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ msleep(125);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ ctx->prepared = true;
+
+ return 0;
+}
+
+static int rm68200_enable(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->backlight);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int rm68200_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs rm68200_drm_funcs = {
+ .disable = rm68200_disable,
+ .unprepare = rm68200_unprepare,
+ .prepare = rm68200_prepare,
+ .enable = rm68200_enable,
+ .get_modes = rm68200_get_modes,
+};
+
+static int rm68200_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct rm68200 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "cannot get reset GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ctx->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(ctx->supply)) {
+ ret = PTR_ERR(ctx->supply);
+ dev_err(dev, "cannot get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ctx->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &rm68200_drm_funcs;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach() failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rm68200_remove(struct mipi_dsi_device *dsi)
+{
+ struct rm68200 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id raydium_rm68200_of_match[] = {
+ { .compatible = "raydium,rm68200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, raydium_rm68200_of_match);
+
+static struct mipi_dsi_driver raydium_rm68200_driver = {
+ .probe = rm68200_probe,
+ .remove = rm68200_remove,
+ .driver = {
+ .name = "panel-raydium-rm68200",
+ .of_match_table = raydium_rm68200_of_match,
+ },
+};
+module_mipi_dsi_driver(raydium_rm68200_driver);
+
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("DRM Driver for Raydium RM68200 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 3cce3ca..6bf8730 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -96,10 +96,7 @@ static int sharp_panel_disable(struct drm_panel *panel)
if (!sharp->enabled)
return 0;
- if (sharp->backlight) {
- sharp->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(sharp->backlight);
- }
+ backlight_disable(sharp->backlight);
sharp->enabled = false;
@@ -263,10 +260,7 @@ static int sharp_panel_enable(struct drm_panel *panel)
if (sharp->enabled)
return 0;
- if (sharp->backlight) {
- sharp->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(sharp->backlight);
- }
+ backlight_enable(sharp->backlight);
sharp->enabled = true;
@@ -324,8 +318,7 @@ MODULE_DEVICE_TABLE(of, sharp_of_match);
static int sharp_panel_add(struct sharp_panel *sharp)
{
- struct device_node *np;
- int err;
+ struct device *dev = &sharp->link1->dev;
sharp->mode = &default_mode;
@@ -333,30 +326,16 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->supply))
return PTR_ERR(sharp->supply);
- np = of_parse_phandle(sharp->link1->dev.of_node, "backlight", 0);
- if (np) {
- sharp->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
+ sharp->backlight = devm_of_find_backlight(dev);
- if (!sharp->backlight)
- return -EPROBE_DEFER;
- }
+ if (IS_ERR(sharp->backlight))
+ return PTR_ERR(sharp->backlight);
drm_panel_init(&sharp->base);
sharp->base.funcs = &sharp_panel_funcs;
sharp->base.dev = &sharp->link1->dev;
- err = drm_panel_add(&sharp->base);
- if (err < 0)
- goto put_backlight;
-
- return 0;
-
-put_backlight:
- if (sharp->backlight)
- put_device(&sharp->backlight->dev);
-
- return err;
+ return drm_panel_add(&sharp->base);
}
static void sharp_panel_del(struct sharp_panel *sharp)
@@ -364,9 +343,6 @@ static void sharp_panel_del(struct sharp_panel *sharp)
if (sharp->base.dev)
drm_panel_remove(&sharp->base);
- if (sharp->backlight)
- put_device(&sharp->backlight->dev);
-
if (sharp->link2)
put_device(&sharp->link2->dev);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 3aeb0bd..494aa9b 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -117,10 +117,7 @@ static int sharp_nt_panel_disable(struct drm_panel *panel)
if (!sharp_nt->enabled)
return 0;
- if (sharp_nt->backlight) {
- sharp_nt->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(sharp_nt->backlight);
- }
+ backlight_disable(sharp_nt->backlight);
sharp_nt->enabled = false;
@@ -203,10 +200,7 @@ static int sharp_nt_panel_enable(struct drm_panel *panel)
if (sharp_nt->enabled)
return 0;
- if (sharp_nt->backlight) {
- sharp_nt->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(sharp_nt->backlight);
- }
+ backlight_enable(sharp_nt->backlight);
sharp_nt->enabled = true;
@@ -259,8 +253,6 @@ static const struct drm_panel_funcs sharp_nt_panel_funcs = {
static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
{
struct device *dev = &sharp_nt->dsi->dev;
- struct device_node *np;
- int ret;
sharp_nt->mode = &default_mode;
@@ -277,39 +269,22 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
gpiod_set_value(sharp_nt->reset_gpio, 0);
}
- np = of_parse_phandle(dev->of_node, "backlight", 0);
- if (np) {
- sharp_nt->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
+ sharp_nt->backlight = devm_of_find_backlight(dev);
- if (!sharp_nt->backlight)
- return -EPROBE_DEFER;
- }
+ if (IS_ERR(sharp_nt->backlight))
+ return PTR_ERR(sharp_nt->backlight);
drm_panel_init(&sharp_nt->base);
sharp_nt->base.funcs = &sharp_nt_panel_funcs;
sharp_nt->base.dev = &sharp_nt->dsi->dev;
- ret = drm_panel_add(&sharp_nt->base);
- if (ret < 0)
- goto put_backlight;
-
- return 0;
-
-put_backlight:
- if (sharp_nt->backlight)
- put_device(&sharp_nt->backlight->dev);
-
- return ret;
+ return drm_panel_add(&sharp_nt->base);
}
static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
{
if (sharp_nt->base.dev)
drm_panel_remove(&sharp_nt->base);
-
- if (sharp_nt->backlight)
- put_device(&sharp_nt->backlight->dev);
}
static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b7c4709..cbf1ab4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -581,6 +581,29 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct drm_display_mode auo_g104sn02_mode = {
+ .clock = 40000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 216,
+ .htotal = 800 + 40 + 216 + 128,
+ .vdisplay = 600,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 35,
+ .vtotal = 600 + 10 + 35 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g104sn02 = {
+ .modes = &auo_g104sn02_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 211,
+ .height = 158,
+ },
+};
+
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1217,6 +1240,30 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing koe_tx31d200vm0baa_timing = {
+ .pixelclock = { 39600000, 43200000, 48000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 16, 36, 56 },
+ .hback_porch = { 16, 36, 56 },
+ .hsync_len = { 8, 8, 8 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 6, 21, 33.5 },
+ .vback_porch = { 6, 21, 33.5 },
+ .vsync_len = { 8, 8, 8 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc koe_tx31d200vm0baa = {
+ .timings = &koe_tx31d200vm0baa_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 292,
+ .height = 109,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
static const struct display_timing kyo_tcg121xglp_timing = {
.pixelclock = { 52000000, 65000000, 71000000 },
.hactive = { 1024, 1024, 1024 },
@@ -1356,6 +1403,38 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
+ .clock = 30400,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 1,
+ .htotal = 800 + 0 + 1 + 160,
+ .vdisplay = 480,
+ .vsync_start = 480 + 0,
+ .vsync_end = 480 + 48 + 1,
+ .vtotal = 480 + 48 + 1 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc mitsubishi_aa070mc01 = {
+ .modes = &mitsubishi_aa070mc01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+
+ .delay = {
+ .enable = 200,
+ .unprepare = 200,
+ .disable = 400,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@@ -1565,7 +1644,7 @@ static const struct panel_desc ontat_yx700wv03 = {
.width = 154,
.height = 83,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
@@ -1709,23 +1788,22 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
};
-static const struct drm_display_mode sharp_lq123p1jx31_mode = {
- .clock = 252750,
- .hdisplay = 2400,
- .hsync_start = 2400 + 48,
- .hsync_end = 2400 + 48 + 32,
- .htotal = 2400 + 48 + 32 + 80,
- .vdisplay = 1600,
- .vsync_start = 1600 + 3,
- .vsync_end = 1600 + 3 + 10,
- .vtotal = 1600 + 3 + 10 + 33,
- .vrefresh = 60,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+static const struct display_timing sharp_lq123p1jx31_timing = {
+ .pixelclock = { 252750000, 252750000, 266604720 },
+ .hactive = { 2400, 2400, 2400 },
+ .hfront_porch = { 48, 48, 48 },
+ .hback_porch = { 80, 80, 84 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1600, 1600, 1600 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 33, 33, 120 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
};
static const struct panel_desc sharp_lq123p1jx31 = {
- .modes = &sharp_lq123p1jx31_mode,
- .num_modes = 1,
+ .timings = &sharp_lq123p1jx31_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 259,
@@ -1837,6 +1915,30 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct display_timing tianma_tm070rvhg71_timing = {
+ .pixelclock = { 27700000, 29200000, 39600000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 12, 40, 212 },
+ .hback_porch = { 88, 88, 88 },
+ .hsync_len = { 1, 1, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 1, 13, 88 },
+ .vback_porch = { 32, 32, 32 },
+ .vsync_len = { 1, 1, 3 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc tianma_tm070rvhg71 = {
+ .timings = &tianma_tm070rvhg71_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
@@ -1993,6 +2095,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g104sn02",
+ .data = &auo_g104sn02,
+ }, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
@@ -2068,6 +2173,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "koe,tx31d200vm0baa",
+ .data = &koe_tx31d200vm0baa,
+ }, {
.compatible = "kyo,tcg121xglp",
.data = &kyo_tcg121xglp,
}, {
@@ -2086,6 +2194,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "mitsubishi,aa070mc01-ca1",
+ .data = &mitsubishi_aa070mc01,
+ }, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
}, {
@@ -2143,6 +2254,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "tianma,tm070rvhg71",
+ .data = &tianma_tm070rvhg71,
+ }, {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 06c4bf7..3106464 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -50,6 +50,41 @@ irqreturn_t pl111_irq(int irq, void *data)
return status;
}
+static enum drm_mode_status
+pl111_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *drm = crtc->dev;
+ struct pl111_drm_dev_private *priv = drm->dev_private;
+ u32 cpp = priv->variant->fb_bpp / 8;
+ u64 bw;
+
+ /*
+ * We use the pixelclock to also account for interlaced modes, the
+ * resulting bandwidth is in bytes per second.
+ */
+ bw = mode->clock * 1000; /* In Hz */
+ bw = bw * mode->hdisplay * mode->vdisplay * cpp;
+ bw = div_u64(bw, mode->htotal * mode->vtotal);
+
+ /*
+ * If no bandwidth constraints, anything goes, else
+ * check if we are too fast.
+ */
+ if (priv->memory_bw && (bw > priv->memory_bw)) {
+ DRM_DEBUG_KMS("%d x %d @ %d Hz, %d cpp, bw %llu too fast\n",
+ mode->hdisplay, mode->vdisplay,
+ mode->clock * 1000, cpp, bw);
+
+ return MODE_BAD;
+ }
+ DRM_DEBUG_KMS("%d x %d @ %d Hz, %d cpp, bw %llu bytes/s OK\n",
+ mode->hdisplay, mode->vdisplay,
+ mode->clock * 1000, cpp, bw);
+
+ return MODE_OK;
+}
+
static int pl111_display_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *pstate,
struct drm_crtc_state *cstate)
@@ -94,6 +129,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
struct drm_connector *connector = priv->connector;
+ struct drm_bridge *bridge = priv->bridge;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
@@ -137,17 +173,46 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
tim2 = readl(priv->regs + CLCD_TIM2);
tim2 &= (TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK);
+ if (priv->variant->broken_clockdivider)
+ tim2 |= TIM2_BCD;
+
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
tim2 |= TIM2_IHS;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
tim2 |= TIM2_IVS;
- if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
- tim2 |= TIM2_IOE;
+ if (connector) {
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ tim2 |= TIM2_IOE;
+
+ if (connector->display_info.bus_flags &
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ tim2 |= TIM2_IPC;
+ }
- if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- tim2 |= TIM2_IPC;
+ if (bridge) {
+ const struct drm_bridge_timings *btimings = bridge->timings;
+
+ /*
+ * Here is when things get really fun. Sometimes the bridge
+ * timings are such that the signal out from PL11x is not
+ * stable before the receiving bridge (such as a dumb VGA DAC
+ * or similar) samples it. If that happens, we compensate by
+ * the only method we have: output the data on the opposite
+ * edge of the clock so it is for sure stable when it gets
+ * sampled.
+ *
+ * The PL111 manual does not contain proper timining diagrams
+ * or data for these details, but we know from experiments
+ * that the setup time is more than 3000 picoseconds (3 ns).
+ * If we have a bridge that requires the signal to be stable
+ * earlier than 3000 ps before the clock pulse, we have to
+ * output the data on the opposite edge to avoid flicker.
+ */
+ if (btimings && btimings->setup_time_ps >= 3000)
+ tim2 ^= TIM2_IPC;
+ }
tim2 |= cpl << 16;
writel(tim2, priv->regs + CLCD_TIM2);
@@ -172,10 +237,17 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
cntl |= CNTL_LCDBPP24 | CNTL_BGR;
break;
case DRM_FORMAT_BGR565:
- cntl |= CNTL_LCDBPP16_565;
+ if (priv->variant->is_pl110)
+ cntl |= CNTL_LCDBPP16;
+ else
+ cntl |= CNTL_LCDBPP16_565;
break;
case DRM_FORMAT_RGB565:
- cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
+ if (priv->variant->is_pl110)
+ cntl |= CNTL_LCDBPP16;
+ else
+ cntl |= CNTL_LCDBPP16_565;
+ cntl |= CNTL_BGR;
break;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_XBGR1555:
@@ -199,6 +271,10 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
break;
}
+ /* The PL110 in Integrator/Versatile does the BGR routing externally */
+ if (priv->variant->external_bgr)
+ cntl &= ~CNTL_BGR;
+
/* Power sequence: first enable and chill */
writel(cntl, priv->regs + priv->ctrl);
@@ -215,7 +291,8 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
cntl |= CNTL_LCDPWR;
writel(cntl, priv->regs + priv->ctrl);
- drm_crtc_vblank_on(crtc);
+ if (!priv->variant->broken_vblank)
+ drm_crtc_vblank_on(crtc);
}
void pl111_display_disable(struct drm_simple_display_pipe *pipe)
@@ -225,7 +302,8 @@ void pl111_display_disable(struct drm_simple_display_pipe *pipe)
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cntl;
- drm_crtc_vblank_off(crtc);
+ if (!priv->variant->broken_vblank)
+ drm_crtc_vblank_off(crtc);
/* Power Down */
cntl = readl(priv->regs + priv->ctrl);
@@ -278,8 +356,10 @@ static void pl111_display_update(struct drm_simple_display_pipe *pipe,
}
}
-int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc)
+static int pl111_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + priv->ienb);
@@ -287,8 +367,10 @@ int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc)
return 0;
}
-void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc)
+static void pl111_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
writel(0, priv->regs + priv->ienb);
@@ -300,7 +382,8 @@ static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
-static const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
+static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
+ .mode_valid = pl111_mode_valid,
.check = pl111_display_check,
.enable = pl111_display_enable,
.disable = pl111_display_disable,
@@ -417,6 +500,11 @@ pl111_init_clock_divider(struct drm_device *drm)
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
+ /* If the clock divider is broken, use the parent directly */
+ if (priv->variant->broken_clockdivider) {
+ priv->clk = parent;
+ return 0;
+ }
parent_name = __clk_get_name(parent);
spin_lock_init(&priv->tim2_lock);
@@ -454,6 +542,11 @@ int pl111_display_init(struct drm_device *drm)
if (ret)
return ret;
+ if (!priv->variant->broken_vblank) {
+ pl111_display_funcs.enable_vblank = pl111_display_enable_vblank;
+ pl111_display_funcs.disable_vblank = pl111_display_disable_vblank;
+ }
+
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
priv->variant->formats,
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 440f53e..8639b2d 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -36,14 +36,24 @@ struct drm_minor;
* struct pl111_variant_data - encodes IP differences
* @name: the name of this variant
* @is_pl110: this is the early PL110 variant
+ * @external_bgr: this is the Versatile Pl110 variant with external
+ * BGR/RGB routing
+ * @broken_clockdivider: the clock divider is broken and we need to
+ * use the supplied clock directly
+ * @broken_vblank: the vblank IRQ is broken on this variant
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
+ * @fb_bpp: desired bits per pixel on the default framebuffer
*/
struct pl111_variant_data {
const char *name;
bool is_pl110;
+ bool external_bgr;
+ bool broken_clockdivider;
+ bool broken_vblank;
const u32 *formats;
unsigned int nformats;
+ unsigned int fb_bpp;
};
struct pl111_drm_dev_private {
@@ -53,9 +63,9 @@ struct pl111_drm_dev_private {
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
- struct drm_fbdev_cma *fbdev;
void *regs;
+ u32 memory_bw;
u32 ienb;
u32 ctrl;
/* The pixel clock (a reference to our clock divider off of CLCDCLK). */
@@ -72,8 +82,6 @@ struct pl111_drm_dev_private {
};
int pl111_display_init(struct drm_device *dev);
-int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc);
-void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc);
irqreturn_t pl111_irq(int irq, void *data);
int pl111_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 201d57d..4621259 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -58,12 +58,15 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_bridge.h>
@@ -84,9 +87,13 @@ static int pl111_modeset_init(struct drm_device *dev)
{
struct drm_mode_config *mode_config;
struct pl111_drm_dev_private *priv = dev->dev_private;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct device_node *np = dev->dev->of_node;
+ struct device_node *remote;
+ struct drm_panel *panel = NULL;
+ struct drm_bridge *bridge = NULL;
+ bool defer = false;
int ret = 0;
+ int i;
drm_mode_config_init(dev);
mode_config = &dev->mode_config;
@@ -96,10 +103,54 @@ static int pl111_modeset_init(struct drm_device *dev)
mode_config->min_height = 1;
mode_config->max_height = 768;
- ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
- 0, 0, &panel, &bridge);
- if (ret && ret != -ENODEV)
- return ret;
+ i = 0;
+ for_each_endpoint_of_node(np, remote) {
+ struct drm_panel *tmp_panel;
+ struct drm_bridge *tmp_bridge;
+
+ dev_dbg(dev->dev, "checking endpoint %d\n", i);
+
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
+ 0, i,
+ &tmp_panel,
+ &tmp_bridge);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ /*
+ * Something deferred, but that is often just
+ * another way of saying -ENODEV, but let's
+ * cast a vote for later deferral.
+ */
+ defer = true;
+ } else if (ret != -ENODEV) {
+ /* Continue, maybe something else is working */
+ dev_err(dev->dev,
+ "endpoint %d returns %d\n", i, ret);
+ }
+ }
+
+ if (tmp_panel) {
+ dev_info(dev->dev,
+ "found panel on endpoint %d\n", i);
+ panel = tmp_panel;
+ }
+ if (tmp_bridge) {
+ dev_info(dev->dev,
+ "found bridge on endpoint %d\n", i);
+ bridge = tmp_bridge;
+ }
+
+ i++;
+ }
+
+ /*
+ * If we can't find neither panel nor bridge on any of the
+ * endpoints, and any of them retured -EPROBE_DEFER, then
+ * let's defer this driver too.
+ */
+ if ((!panel && !bridge) && defer)
+ return -EPROBE_DEFER;
+
if (panel) {
bridge = drm_panel_bridge_add(panel,
DRM_MODE_CONNECTOR_Unknown);
@@ -107,11 +158,17 @@ static int pl111_modeset_init(struct drm_device *dev)
ret = PTR_ERR(bridge);
goto out_config;
}
- /*
- * TODO: when we are using a different bridge than a panel
- * (such as a dumb VGA connector) we need to devise a different
- * method to get the connector out of the bridge.
- */
+ } else if (bridge) {
+ dev_info(dev->dev, "Using non-panel bridge\n");
+ } else {
+ dev_err(dev->dev, "No bridge, exiting\n");
+ return -ENODEV;
+ }
+
+ priv->bridge = bridge;
+ if (panel) {
+ priv->panel = panel;
+ priv->connector = panel->connector;
}
ret = pl111_display_init(dev);
@@ -125,20 +182,17 @@ static int pl111_modeset_init(struct drm_device *dev)
if (ret)
return ret;
- priv->bridge = bridge;
- priv->panel = panel;
- priv->connector = panel->connector;
-
- ret = drm_vblank_init(dev, 1);
- if (ret != 0) {
- dev_err(dev->dev, "Failed to init vblank\n");
- goto out_bridge;
+ if (!priv->variant->broken_vblank) {
+ ret = drm_vblank_init(dev, 1);
+ if (ret != 0) {
+ dev_err(dev->dev, "Failed to init vblank\n");
+ goto out_bridge;
+ }
}
drm_mode_config_reset(dev);
- priv->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
+ drm_fb_cma_fbdev_init(dev, priv->variant->fb_bpp, 0);
drm_kms_helper_poll_init(dev);
@@ -155,17 +209,10 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
-static void pl111_lastclose(struct drm_device *dev)
-{
- struct pl111_drm_dev_private *priv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static struct drm_driver pl111_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = pl111_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
.name = "pl111",
@@ -177,10 +224,6 @@ static struct drm_driver pl111_drm_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
-
- .enable_vblank = pl111_enable_vblank,
- .disable_vblank = pl111_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
@@ -198,7 +241,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
{
struct device *dev = &amba_dev->dev;
struct pl111_drm_dev_private *priv;
- struct pl111_variant_data *variant = id->data;
+ const struct pl111_variant_data *variant = id->data;
struct drm_device *drm;
int ret;
@@ -214,27 +257,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
drm->dev_private = priv;
priv->variant = variant;
- /*
- * The PL110 and PL111 variants have two registers
- * swapped: interrupt enable and control. For this reason
- * we use offsets that we can change per variant.
- */
+ if (of_property_read_u32(dev->of_node, "max-memory-bandwidth",
+ &priv->memory_bw)) {
+ dev_info(dev, "no max memory bandwidth specified, assume unlimited\n");
+ priv->memory_bw = 0;
+ }
+
+ /* The two variants swap this register */
if (variant->is_pl110) {
- /*
- * The ARM Versatile boards are even more special:
- * their PrimeCell ID say they are PL110 but the
- * control and interrupt enable registers are anyway
- * swapped to the PL111 order so they are not following
- * the PL110 datasheet.
- */
- if (of_machine_is_compatible("arm,versatile-ab") ||
- of_machine_is_compatible("arm,versatile-pb")) {
- priv->ienb = CLCD_PL111_IENB;
- priv->ctrl = CLCD_PL111_CNTL;
- } else {
- priv->ienb = CLCD_PL110_IENB;
- priv->ctrl = CLCD_PL110_CNTL;
- }
+ priv->ienb = CLCD_PL110_IENB;
+ priv->ctrl = CLCD_PL110_CNTL;
} else {
priv->ienb = CLCD_PL111_IENB;
priv->ctrl = CLCD_PL111_CNTL;
@@ -246,6 +278,11 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
return PTR_ERR(priv->regs);
}
+ /* This may override some variant settings */
+ ret = pl111_versatile_init(dev, priv);
+ if (ret)
+ goto dev_unref;
+
/* turn off interrupts before requesting the irq */
writel(0, priv->regs + priv->ienb);
@@ -256,10 +293,6 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
return ret;
}
- ret = pl111_versatile_init(dev, priv);
- if (ret)
- goto dev_unref;
-
ret = pl111_modeset_init(drm);
if (ret != 0)
goto dev_unref;
@@ -281,8 +314,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
struct pl111_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
@@ -292,8 +324,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
}
/*
- * This variant exist in early versions like the ARM Integrator
- * and this version lacks the 565 and 444 pixel formats.
+ * This early variant lacks the 565 and 444 pixel formats.
*/
static const u32 pl110_pixel_formats[] = {
DRM_FORMAT_ABGR8888,
@@ -311,6 +342,7 @@ static const struct pl111_variant_data pl110_variant = {
.is_pl110 = true,
.formats = pl110_pixel_formats,
.nformats = ARRAY_SIZE(pl110_pixel_formats),
+ .fb_bpp = 16,
};
/* RealView, Versatile Express etc use this modern variant */
@@ -335,6 +367,7 @@ static const struct pl111_variant_data pl111_variant = {
.name = "PL111",
.formats = pl111_pixel_formats,
.nformats = ARRAY_SIZE(pl111_pixel_formats),
+ .fb_bpp = 32,
};
static const struct amba_id pl111_id_table[] = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 97d4af6..9302f51 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -1,3 +1,4 @@
+#include <linux/amba/clcd-regs.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -64,10 +65,8 @@ static const struct of_device_id versatile_clcd_of_match[] = {
#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
-/* Bits 11,12,13 controls the LCD type */
-#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
+/* Bits 11,12,13 controls the LCD or VGA bridge type */
#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
-#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
@@ -82,16 +81,7 @@ static const struct of_device_id versatile_clcd_of_match[] = {
/* 0 = 24bit VGA, 1 = 18bit VGA */
#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
-#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
- INTEGRATOR_CLCD_LCDBIASUP | \
- INTEGRATOR_CLCD_LCDBIASDN | \
- INTEGRATOR_CLCD_LCDMUX_MASK | \
- INTEGRATOR_CLCD_LCD0_EN | \
- INTEGRATOR_CLCD_LCD1_EN | \
- INTEGRATOR_CLCD_LCD_STATIC1 | \
- INTEGRATOR_CLCD_LCD_STATIC2 | \
- INTEGRATOR_CLCD_LCD_STATIC | \
- INTEGRATOR_CLCD_LCD_N24BITEN)
+#define INTEGRATOR_CLCD_MASK GENMASK(19, 8)
static void pl111_integrator_enable(struct drm_device *drm, u32 format)
{
@@ -106,11 +96,8 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
- break;
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB565:
- /* truecolor RGB565 */
- val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
+ /* 24bit formats */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA24;
break;
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_XRGB1555:
@@ -217,6 +204,88 @@ static void pl111_realview_clcd_enable(struct drm_device *drm, u32 format)
SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
}
+/* PL110 pixel formats for Integrator, vanilla PL110 */
+static const u32 pl110_integrator_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+};
+
+/* Extended PL110 pixel formats for Integrator and Versatile */
+static const u32 pl110_versatile_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565, /* Uses external PLD */
+ DRM_FORMAT_RGB565, /* Uses external PLD */
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+};
+
+static const u32 pl111_realview_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+/*
+ * The Integrator variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_integrator = {
+ .name = "PL110 Integrator",
+ .is_pl110 = true,
+ .broken_clockdivider = true,
+ .broken_vblank = true,
+ .formats = pl110_integrator_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
+ * This is the in-between PL110 variant found in the ARM Versatile,
+ * supporting RGB565/BGR565
+ */
+static const struct pl111_variant_data pl110_versatile = {
+ .name = "PL110 Versatile",
+ .is_pl110 = true,
+ .external_bgr = true,
+ .formats = pl110_versatile_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_versatile_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
+ * RealView PL111 variant, the only real difference from the vanilla
+ * PL111 is that we select 16bpp framebuffer by default to be able
+ * to get 1024x768 without saturating the memory bus.
+ */
+static const struct pl111_variant_data pl111_realview = {
+ .name = "PL111 RealView",
+ .formats = pl111_realview_pixel_formats,
+ .nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
+ .fb_bpp = 16,
+};
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
{
const struct of_device_id *clcd_id;
@@ -241,14 +310,24 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
switch (versatile_clcd_type) {
case INTEGRATOR_CLCD_CM:
versatile_syscon_map = map;
+ priv->variant = &pl110_integrator;
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
+ /* This can do RGB565 with external PLD */
+ priv->variant = &pl110_versatile;
priv->variant_display_enable = pl111_versatile_enable;
priv->variant_display_disable = pl111_versatile_disable;
- dev_info(dev, "set up callbacks for Versatile PL110+\n");
+ /*
+ * The Versatile has a variant halfway between PL110
+ * and PL111 where these two registers have already been
+ * swapped.
+ */
+ priv->ienb = CLCD_PL111_IENB;
+ priv->ctrl = CLCD_PL111_CNTL;
+ dev_info(dev, "set up callbacks for Versatile PL110\n");
break;
case REALVIEW_CLCD_EB:
case REALVIEW_CLCD_PB1176:
@@ -256,6 +335,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
case REALVIEW_CLCD_PBA8:
case REALVIEW_CLCD_PBX:
versatile_syscon_map = map;
+ priv->variant = &pl111_realview;
priv->variant_display_enable = pl111_realview_clcd_enable;
priv->variant_display_disable = pl111_realview_clcd_disable;
dev_info(dev, "set up callbacks for RealView PL111\n");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 4756b3c..ecb35ed 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
{
struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+ qxl_bo_unref(&qxl_crtc->cursor_bo);
drm_crtc_cleanup(crtc);
kfree(qxl_crtc);
}
@@ -308,7 +309,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
WARN_ON(bo->shadow);
- drm_gem_object_unreference_unlocked(qxl_fb->obj);
+ drm_gem_object_put_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
}
@@ -495,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
return 0;
}
+static int qxl_primary_apply_cursor(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
+ struct qxl_cursor_cmd *cmd;
+ struct qxl_release *release;
+ int ret = 0;
+
+ if (!qcrtc->cursor_bo)
+ return 0;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+ QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_list_add(release, qcrtc->cursor_bo);
+ if (ret)
+ goto out_free_release;
+
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_release;
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_SET;
+ cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
+ cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
+
+ cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
+
+ cmd->u.set.visible = 1;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_fence_buffer_objects(release);
+
+ return ret;
+
+out_free_release:
+ qxl_release_free(qdev, release);
+ return ret;
+}
+
static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -510,6 +558,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
.x2 = qfb->base.width,
.y2 = qfb->base.height
};
+ int ret;
bool same_shadow = false;
if (old_state->fb) {
@@ -531,6 +580,11 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
if (!same_shadow)
qxl_io_destroy_primary(qdev);
bo_old->is_primary = false;
+
+ ret = qxl_primary_apply_cursor(plane);
+ if (ret)
+ DRM_ERROR(
+ "could not set cursor after creating primary");
}
if (!bo->is_primary) {
@@ -571,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct qxl_device *qdev = dev->dev_private;
struct drm_framebuffer *fb = plane->state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
struct qxl_cursor *cursor;
struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo, *user_bo = NULL;
+ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
int ret;
void *user_ptr;
int size = 64*64*4;
@@ -628,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.set.shape = qxl_bo_physical_address(qdev,
cursor_bo, 0);
cmd->type = QXL_CURSOR_SET;
+
+ qxl_bo_unref(&qcrtc->cursor_bo);
+ qcrtc->cursor_bo = cursor_bo;
+ cursor_bo = NULL;
} else {
ret = qxl_release_reserve_list(release, true);
@@ -645,6 +704,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_bo_unref(&cursor_bo);
+
return;
out_backoff:
@@ -1154,7 +1215,7 @@ qxl_user_framebuffer_create(struct drm_device *dev,
ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
if (ret) {
kfree(qxl_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 08752c0..00a1a66 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -111,6 +111,8 @@ struct qxl_bo_list {
struct qxl_crtc {
struct drm_crtc base;
int index;
+
+ struct qxl_bo *cursor_bo;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 11085ab..c666b89 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -82,6 +82,6 @@ int qxl_mode_dumb_mmap(struct drm_file *file_priv,
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
*offset_p = qxl_bo_mmap_offset(qobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 23af3e3..3388914 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -95,7 +95,7 @@ static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
qxl_bo_kunmap(qbo);
qxl_bo_unpin(qbo);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
@@ -316,11 +316,11 @@ out_unref:
qxl_bo_unpin(qbo);
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 85f5467..f5c1e78 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -98,7 +98,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
return r;
/* drop reference from allocate - handle holds it now */
*qobj = gem_to_qxl_bo(gobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 31effed..e238a1a 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -121,7 +121,7 @@ static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -309,6 +309,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
int ret;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL;
+ struct ttm_operation_ctx ctx = { true, false };
if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom)
@@ -326,8 +327,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
if (!qobj->pin_count) {
qxl_ttm_placement_from_domain(qobj, qobj->type, false);
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out;
}
@@ -343,7 +343,7 @@ out2:
qxl_bo_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 0a67ddf..6a30196 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -109,7 +109,7 @@ int qxl_bo_create(struct qxl_device *qdev,
qxl_ttm_placement_from_domain(bo, domain, pinned);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, NULL, size,
+ &bo->placement, 0, !kernel, size,
NULL, NULL, &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
@@ -211,18 +211,19 @@ void qxl_bo_unref(struct qxl_bo **bo)
if ((*bo) == NULL)
return;
- drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
+ drm_gem_object_put_unlocked(&(*bo)->gem_base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- drm_gem_object_reference(&bo->gem_base);
+ drm_gem_object_get(&bo->gem_base);
return bo;
}
static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
int r;
@@ -233,7 +234,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
return 0;
}
qxl_ttm_placement_from_domain(bo, domain, true);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -246,6 +247,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
static int __qxl_bo_unpin(struct qxl_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
int r, i;
@@ -258,7 +260,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r != 0))
dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -316,7 +318,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index a6da6fa..5d84a66 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -230,12 +230,12 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
static int qxl_release_validate_bo(struct qxl_bo *bo)
{
+ struct ttm_operation_ctx ctx = { true, false };
int ret;
if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type, false);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
}
@@ -458,7 +458,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
trace_dma_fence_emit(&release->base);
driver = bdev->driver;
- glob = bo->glob;
+ glob = bdev->glob;
spin_lock(&glob->lru_lock);
@@ -468,7 +468,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index ab48238..ee2340e3 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -123,11 +123,8 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
struct qxl_device *qdev;
int r;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
- pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
- __func__, vma->vm_pgoff);
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
- }
file_priv = filp->private_data;
qdev = file_priv->minor->dev->dev_private;
@@ -294,40 +291,19 @@ static struct ttm_backend_func qxl_backend_func = {
.destroy = &qxl_ttm_backend_destroy,
};
-static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
-{
- int r;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- r = ttm_pool_populate(ttm);
- if (r)
- return r;
-
- return 0;
-}
-
-static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
-static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct qxl_device *qdev;
struct qxl_ttm_tt *gtt;
- qdev = qxl_get_qdev(bdev);
+ qdev = qxl_get_qdev(bo->bdev);
gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
gtt->ttm.ttm.func = &qxl_backend_func;
gtt->qdev = qdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
- dummy_read_page)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
@@ -344,15 +320,14 @@ static void qxl_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int qxl_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -361,8 +336,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
- new_mem);
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -383,8 +357,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .ttm_tt_populate = &qxl_ttm_tt_populate,
- .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
.invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -393,7 +365,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a651191..7c73bc7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1627,8 +1627,6 @@ static const u32 godavari_golden_registers[] =
static void cik_init_golden_registers(struct radeon_device *rdev)
{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
@@ -1703,7 +1701,6 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
default:
break;
}
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3120,7 +3117,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -3132,7 +3128,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3143,7 +3138,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
rdev->config.cik.backend_enable_mask = enabled_rbs;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3171,7 +3165,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3228,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
- if ((rdev->pdev->device == 0x1304) ||
- (rdev->pdev->device == 0x1305) ||
- (rdev->pdev->device == 0x130C) ||
- (rdev->pdev->device == 0x130F) ||
- (rdev->pdev->device == 0x1310) ||
- (rdev->pdev->device == 0x1311) ||
- (rdev->pdev->device == 0x131C)) {
- rdev->config.cik.max_cu_per_sh = 8;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1309) ||
- (rdev->pdev->device == 0x130A) ||
- (rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313) ||
- (rdev->pdev->device == 0x131D)) {
- rdev->config.cik.max_cu_per_sh = 6;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1306) ||
- (rdev->pdev->device == 0x1307) ||
- (rdev->pdev->device == 0x130B) ||
- (rdev->pdev->device == 0x130E) ||
- (rdev->pdev->device == 0x1315) ||
- (rdev->pdev->device == 0x1318) ||
- (rdev->pdev->device == 0x131B)) {
- rdev->config.cik.max_cu_per_sh = 4;
- rdev->config.cik.max_backends_per_se = 1;
- } else {
- rdev->config.cik.max_cu_per_sh = 3;
- rdev->config.cik.max_backends_per_se = 1;
- }
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
rdev->config.cik.max_sh_per_se = 1;
rdev->config.cik.max_texture_channel_caches = 4;
rdev->config.cik.max_gprs = 256;
@@ -3391,12 +3357,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
- mutex_lock(&rdev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3452,7 +3412,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
- mutex_unlock(&rdev->grbm_idx_mutex);
udelay(50);
}
@@ -4432,11 +4391,12 @@ static int cik_mec_init(struct radeon_device *rdev)
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
- * Nonetheless, we assign only 1 pipe because all other pipes will
- * be handled by KFD
*/
- rdev->mec.num_mec = 1;
- rdev->mec.num_pipe = 1;
+ if (rdev->family == CHIP_KAVERI)
+ rdev->mec.num_mec = 2;
+ else
+ rdev->mec.num_mec = 1;
+ rdev->mec.num_pipe = 4;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4579,8 +4539,11 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
- for (i = 0; i < rdev->mec.num_pipe; ++i) {
- cik_srbm_select(rdev, 0, i, 0, 0);
+ for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); ++i) {
+ int me = (i < 4) ? 1 : 2;
+ int pipe = (i < 4) ? i : (i - 4);
+
+ cik_srbm_select(rdev, me, pipe, 0, 0);
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2) ;
/* write the EOP addr */
@@ -4597,6 +4560,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
+ cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
@@ -5830,7 +5794,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
u32 i, j, k;
u32 mask;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -5842,7 +5805,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
@@ -5977,12 +5939,10 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
- mutex_unlock(&rdev->grbm_idx_mutex);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
@@ -6049,13 +6009,11 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6098,13 +6056,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6148,13 +6104,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
}
@@ -6583,12 +6537,10 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
u32 mask = 0, tmp, tmp1;
int i;
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
tmp &= 0xffff0000;
@@ -7074,7 +7026,8 @@ static int cik_irq_init(struct radeon_device *rdev)
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
- u32 cp_m1p0;
+ u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
+ u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -7107,6 +7060,13 @@ int cik_irq_set(struct radeon_device *rdev)
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -7121,6 +7081,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
@@ -7137,6 +7124,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
@@ -7217,6 +7231,13 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
+ WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
+ WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
+ WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
+ WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
+ WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
+ WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
+ WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index 4e883fd..318377d 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,8 +147,6 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
-
#define SQ_IND_INDEX 0x8DE0
#define SQ_CMD 0x8DEC
#define SQ_IND_DATA 0x8DE4
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 24fe66c..5712d63 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3513,6 +3513,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
}
+ rdev->config.evergreen.backend_map = tmp;
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index c21d8fa..ba70463 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -43,10 +43,6 @@ struct list_head {
struct list_head *next, *prev;
};
-#define LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define LIST_HEAD(name) \
- struct list_head name = LIST_HEAD_INIT(name)
static inline void INIT_LIST_HEAD(struct list_head *list)
{
@@ -75,19 +71,6 @@ extern void __list_add(struct list_head *new,
#endif
/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-
-/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
@@ -100,250 +83,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
__list_add(new, head->prev, head);
}
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_del(struct list_head *prev, struct list_head *next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void list_del(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
- entry->next = (void *)0xDEADBEEF;
- entry->prev = (void *)0xBEEFDEAD;
-}
-#else
-extern void list_del(struct list_head *entry);
-#endif
-
-/**
- * list_replace - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * If @old was empty, it will be overwritten.
- */
-static inline void list_replace(struct list_head *old, struct list_head *new)
-{
- new->next = old->next;
- new->next->prev = new;
- new->prev = old->prev;
- new->prev->next = new;
-}
-
-static inline void list_replace_init(struct list_head *old,
- struct list_head *new)
-{
- list_replace(old, new);
- INIT_LIST_HEAD(old);
-}
-
-/**
- * list_del_init - deletes entry from list and reinitialize it.
- * @entry: the element to delete from the list.
- */
-static inline void list_del_init(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
- INIT_LIST_HEAD(entry);
-}
-
-/**
- * list_move - delete from one list and add as another's head
- * @list: the entry to move
- * @head: the head that will precede our entry
- */
-static inline void list_move(struct list_head *list, struct list_head *head)
-{
- __list_del(list->prev, list->next);
- list_add(list, head);
-}
-
-/**
- * list_move_tail - delete from one list and add as another's tail
- * @list: the entry to move
- * @head: the head that will follow our entry
- */
-static inline void list_move_tail(struct list_head *list,
- struct list_head *head)
-{
- __list_del(list->prev, list->next);
- list_add_tail(list, head);
-}
-
-/**
- * list_is_last - tests whether @list is the last entry in list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
-{
- return list->next == head;
-}
-
-/**
- * list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int list_empty(const struct list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * list_empty_careful - tests whether a list is empty and not being modified
- * @head: the list to test
- *
- * Description:
- * tests whether a list is empty _and_ checks that no other CPU might be
- * in the process of modifying either member (next or prev)
- *
- * NOTE: using list_empty_careful() without synchronization
- * can only be safe if the only activity that can happen
- * to the list entry is list_del_init(). Eg. it cannot be used
- * if another CPU could re-list_add() it.
- */
-static inline int list_empty_careful(const struct list_head *head)
-{
- struct list_head *next = head->next;
- return (next == head) && (next == head->prev);
-}
-
-/**
- * list_is_singular - tests whether a list has just one entry.
- * @head: the list to test.
- */
-static inline int list_is_singular(const struct list_head *head)
-{
- return !list_empty(head) && (head->next == head->prev);
-}
-
-static inline void __list_cut_position(struct list_head *list,
- struct list_head *head,
- struct list_head *entry)
-{
- struct list_head *new_first = entry->next;
- list->next = head->next;
- list->next->prev = list;
- list->prev = entry;
- entry->next = list;
- head->next = new_first;
- new_first->prev = head;
-}
-
-/**
- * list_cut_position - cut a list into two
- * @list: a new list to add all removed entries
- * @head: a list with entries
- * @entry: an entry within head, could be the head itself
- * and if so we won't cut the list
- *
- * This helper moves the initial part of @head, up to and
- * including @entry, from @head to @list. You should
- * pass on @entry an element you know is on @head. @list
- * should be an empty list or a list you do not care about
- * losing its data.
- *
- */
-static inline void list_cut_position(struct list_head *list,
- struct list_head *head,
- struct list_head *entry)
-{
- if (list_empty(head))
- return;
- if (list_is_singular(head) && (head->next != entry && head != entry))
- return;
- if (entry == head)
- INIT_LIST_HEAD(list);
- else
- __list_cut_position(list, head, entry);
-}
-
-static inline void __list_splice(const struct list_head *list,
- struct list_head *prev, struct list_head *next)
-{
- struct list_head *first = list->next;
- struct list_head *last = list->prev;
-
- first->prev = prev;
- prev->next = first;
-
- last->next = next;
- next->prev = last;
-}
-
-/**
- * list_splice - join two lists, this is designed for stacks
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void list_splice(const struct list_head *list,
- struct list_head *head)
-{
- if (!list_empty(list))
- __list_splice(list, head, head->next);
-}
-
-/**
- * list_splice_tail - join two lists, each list being a queue
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void list_splice_tail(struct list_head *list,
- struct list_head *head)
-{
- if (!list_empty(list))
- __list_splice(list, head->prev, head);
-}
-
-/**
- * list_splice_init - join two lists and reinitialise the emptied list.
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * The list at @list is reinitialised
- */
-static inline void list_splice_init(struct list_head *list,
- struct list_head *head)
-{
- if (!list_empty(list)) {
- __list_splice(list, head, head->next);
- INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * list_splice_tail_init - join two lists and reinitialise the emptied list
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * Each of the lists is a queue.
- * The list at @list is reinitialised
- */
-static inline void list_splice_tail_init(struct list_head *list,
- struct list_head *head)
-{
- if (!list_empty(list)) {
- __list_splice(list, head->prev, head);
- INIT_LIST_HEAD(list);
- }
-}
-
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
@@ -354,56 +93,6 @@ static inline void list_splice_tail_init(struct list_head *list,
container_of(ptr, type, member)
/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- list_entry((ptr)->next, type, member)
-
-/**
- * list_for_each - iterate over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each(pos, head) \
- for (pos = (head)->next; prefetch(pos->next), pos != (head); \
- pos = pos->next)
-
-/**
- * list_for_each_prev - iterate over a list backwards
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
- pos = pos->prev)
-
-/**
- * list_for_each_safe - iterate over a list safe against removal of list entry
- * @pos: the &struct list_head to use as a loop cursor.
- * @n: another &struct list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-
-/**
- * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
- * @pos: the &struct list_head to use as a loop cursor.
- * @n: another &struct list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; \
- prefetch(pos->prev), pos != (head); \
- pos = n, n = pos->prev)
-
-/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
@@ -414,128 +103,6 @@ static inline void list_splice_tail_init(struct list_head *list,
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
-/**
- * list_for_each_entry_reverse - iterate backwards over list of given type.
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- */
-#define list_for_each_entry_reverse(pos, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
- * @pos: the type * to use as a start point
- * @head: the head of the list
- * @member: the name of the list_head within the struct.
- *
- * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
- */
-#define list_prepare_entry(pos, head, member) \
- ((pos) ? : list_entry(head, typeof(*pos), member))
-
-/**
- * list_for_each_entry_continue - continue iteration over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Continue to iterate over list of given type, continuing after
- * the current position.
- */
-#define list_for_each_entry_continue(pos, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * list_for_each_entry_continue_reverse - iterate backwards from the given point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Start to iterate over list of given type backwards, continuing after
- * the current position.
- */
-#define list_for_each_entry_continue_reverse(pos, head, member) \
- for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_for_each_entry_from - iterate over list of given type from the current point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Iterate over list of given type, continuing from current position.
- */
-#define list_for_each_entry_from(pos, head, member) \
- for (; prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- */
-#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_continue
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Iterate over list of given type, continuing after current point,
- * safe against removal of list entry.
- */
-#define list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_from
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-
-/**
- * list_for_each_entry_safe_reverse
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_head within the struct.
- *
- * Iterate backwards over list of given type, safe against removal
- * of list entry.
- */
-#define list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member), \
- n = list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.prev, typeof(*n), member))
-
struct offset {
struct list_head list;
unsigned offset;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9eccd0c..381b0255 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1148,6 +1148,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
}
+ rdev->config.cayman.backend_map = tmp;
WREG32(GB_BACKEND_MAP, tmp);
cgts_tcc_disable = 0xffff0000;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a8e5465..4a2eb40 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -731,10 +731,6 @@ struct radeon_doorbell {
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
/*
* IRQS.
@@ -2391,6 +2387,7 @@ struct radeon_device {
struct radeon_dummy_page dummy_page;
bool shutdown;
bool need_dma32;
+ bool need_swiotlb;
bool accel_working;
bool fastfb_working; /* IGP feature*/
bool needs_reset, in_reset;
@@ -2442,8 +2439,6 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
- /* GRBM index mutex. Protects concurrents access to GRBM index */
- struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 59dcefb..df9469a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -392,7 +385,6 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
ret = drm_add_edid_modes(connector, radeon_connector->edid);
- drm_edid_to_eld(connector, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(connector, NULL);
@@ -900,9 +892,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -925,8 +919,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1040,9 +1038,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1109,8 +1109,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1174,9 +1176,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1188,8 +1192,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1252,9 +1260,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (radeon_connector->detected_hpd_without_ddc) {
force = true;
@@ -1437,8 +1447,10 @@ out:
}
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1689,9 +1701,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst)
return connector_status_disconnected;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1778,8 +1792,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ffc10ca..e415d2c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_cache.h>
#include <drm/radeon_drm.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
@@ -392,37 +393,6 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
__clear_bit(doorbell, rdev->doorbell.used);
}
-/**
- * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup KFD
- *
- * @rdev: radeon_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for radeon.
- *
- * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
- * takes doorbells required for its own rings and reports the setup to KFD.
- * Radeon reserved doorbells are at the start of the doorbell aperture.
- */
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /* The first num_doorbells are used by radeon.
- * KFD takes whatever's left in the aperture. */
- if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = rdev->doorbell.base;
- *aperture_size = rdev->doorbell.size;
- *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
-
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
@@ -1341,7 +1311,6 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
- mutex_init(&rdev->grbm_idx_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1397,6 +1366,10 @@ int radeon_device_init(struct radeon_device *rdev,
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+ if (rdev->family == CHIP_CEDAR)
+ rdev->need_dma32 = true;
+#endif
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
@@ -1410,6 +1383,7 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
pr_warn("radeon: No coherent DMA available\n");
}
+ rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ddfe91ef..26129b2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_edid.h>
@@ -569,7 +570,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
base &= ~7;
}
work->base = base;
- work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
dev->driver->get_vblank_counter(dev, work->crtc_id);
/* We borrow the event spin lock for protecting flip_work */
@@ -1362,15 +1363,9 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return &radeon_fb->base;
}
-static void radeon_output_poll_changed(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- radeon_fb_output_poll_changed(rdev);
-}
-
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .output_poll_changed = radeon_output_poll_changed
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2917ea1..cd8a3ee 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -197,7 +197,6 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -329,7 +328,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
-const struct drm_dp_mst_topology_cbs mst_cbs = {
+static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
.register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector,
@@ -719,7 +718,7 @@ radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
DP_SINK_COUNT_ESI, esi, 8);
go_again:
if (dret == 8) {
- DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi %3ph\n", esi);
ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
if (handled) {
@@ -734,7 +733,7 @@ go_again:
dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
DP_SINK_COUNT_ESI, esi, 8);
if (dret == 8) {
- DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
goto go_again;
}
} else
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 31dd04f..b28288a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -415,7 +415,6 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
ret = radeon_suspend_kms(drm_dev, false, false, false);
pci_save_state(pdev);
@@ -452,7 +451,6 @@ static int radeon_pmops_runtime_resume(struct device *dev)
ret = radeon_resume_kms(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 33b821d..57c5404 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -306,12 +306,6 @@ out:
return ret;
}
-void radeon_fb_output_poll_changed(struct radeon_device *rdev)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
-}
-
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct radeon_framebuffer *rfb = &rfbdev->rfb;
@@ -422,19 +416,3 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector
if (rdev->mode_info.rfbdev)
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
-
-void radeon_fbdev_restore_mode(struct radeon_device *rdev)
-{
- struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!rfbdev)
- return;
-
- fb_helper = &rfbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index cf3deb2..27d8e7d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
@@ -285,6 +283,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_userptr *args = data;
struct drm_gem_object *gobj;
@@ -343,7 +342,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
up_read(&current->mm->mmap_sem);
if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index cde037f..dec1e08 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
#include "radeon.h"
#include <drm/radeon_drm.h>
#include "radeon_asic.h"
@@ -629,9 +630,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*/
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_fbdev_restore_mode(rdev);
+ drm_fb_helper_lastclose(dev);
vga_switcheroo_process_delayed_switch();
}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 1d62288..abd2497 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -124,6 +124,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
@@ -157,7 +158,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ca0a7ed..3243e5e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -984,9 +984,6 @@ int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
-void radeon_fbdev_restore_mode(struct radeon_device *rdev);
-
-void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0935949..edbb4cd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va));
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -204,11 +206,7 @@ int radeon_bo_create(struct radeon_device *rdev,
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
- if (unlikely(r)) {
- kfree(bo);
- return r;
- }
+ drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -238,9 +236,10 @@ int radeon_bo_create(struct radeon_device *rdev,
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
-
+#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
+#endif
if (bo->flags & RADEON_GEM_GTT_WC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
@@ -258,8 +257,8 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, !kernel, acc_size,
+ sg, resv, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
@@ -329,6 +328,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
@@ -371,7 +371,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -393,6 +393,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
int radeon_bo_unpin(struct radeon_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
@@ -406,7 +407,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
@@ -531,6 +532,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj;
struct list_head duplicates;
int r;
@@ -572,7 +574,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
radeon_uvd_force_into_uvd_segment(bo, allowed);
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
initial_bytes_moved;
@@ -792,6 +794,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev;
struct radeon_bo *rbo;
unsigned long offset, size, lpfn;
@@ -823,10 +826,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
(!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
rbo->placements[i].lpfn = lpfn;
}
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &rbo->placement, false, false);
+ return ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 326ad06..4b65425 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
- /* allow new DPM state to be picked */
- radeon_pm_compute_clocks_dpm(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = rdev->pm.dpm.ac_power ?
- POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
restart_search:
/* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6ada64d..8689fcc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -311,6 +311,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
@@ -328,8 +329,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
if (unlikely(r)) {
return r;
}
@@ -339,7 +339,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
- r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -347,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -358,6 +358,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
@@ -375,12 +376,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -393,17 +393,16 @@ out_cleanup:
return r;
}
-static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct radeon_bo *rbo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
- r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (r)
return r;
@@ -433,19 +432,20 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
- r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
+ ctx->no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
- r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
+ ctx->no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
+ new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
@@ -686,18 +686,17 @@ static struct ttm_backend_func radeon_backend_func = {
.destroy = &radeon_ttm_backend_destroy,
};
-static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
- rdev = radeon_get_rdev(bdev);
+ rdev = radeon_get_rdev(bo->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
- size, page_flags, dummy_read_page);
+ return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
+ page_flags);
}
#endif
@@ -707,7 +706,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
}
gtt->ttm.ttm.func = &radeon_backend_func;
gtt->rdev = rdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
@@ -721,15 +720,13 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
return (struct radeon_ttm_tt *)ttm;
}
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
- if (ttm->state != tt_unpopulated)
- return 0;
-
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
@@ -750,17 +747,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
+ return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -787,7 +784,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
#endif
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
return;
}
@@ -844,7 +841,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -1155,7 +1151,7 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
count = ARRAY_SIZE(radeon_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
+ if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
--count;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index d34d1cf..95f4db7 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
/* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even);
- if (vclk_div > pd_max)
+ if (dclk_div > pd_max)
break; /* vco is too big, it has to stop */
/* calc score with current vco freq */
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e5c0e63..7f1a9c7 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -387,6 +387,7 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
@@ -396,7 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error_unreserve;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index ee3e742..97a0a63 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ if ((rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6665)) {
+ max_sclk = 60000;
+ max_mclk = 80000;
+ }
} else if (rdev->family == CHIP_OLAND) {
if ((rdev->pdev->revision == 0xC7) ||
(rdev->pdev->revision == 0x80) ||
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 8a50dab..edde8d4 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -19,14 +19,17 @@ config DRM_RCAR_DW_HDMI
Enable support for R-Car Gen3 internal HDMI encoder.
config DRM_RCAR_LVDS
- bool "R-Car DU LVDS Encoder Support"
- depends on DRM_RCAR_DU
+ tristate "R-Car DU LVDS Encoder Support"
+ depends on DRM && DRM_BRIDGE && OF
select DRM_PANEL
+ select OF_FLATTREE
+ select OF_OVERLAY
help
Enable support for the R-Car Display Unit embedded LVDS encoders.
config DRM_RCAR_VSP
- bool "R-Car DU VSP Compositor Support"
+ bool "R-Car DU VSP Compositor Support" if ARM
+ default y if ARM64
depends on DRM_RCAR_DU
depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 0cf5c11..3e58ed9 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,12 +4,16 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_encoder.o \
rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_lvdscon.o \
rcar_du_plane.o
-rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
-
+rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
+ rcar_du_of_lvds_r8a7790.dtb.o \
+ rcar_du_of_lvds_r8a7791.dtb.o \
+ rcar_du_of_lvds_r8a7793.dtb.o \
+ rcar_du_of_lvds_r8a7795.dtb.o \
+ rcar_du_of_lvds_r8a7796.dtb.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
+obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 301ea1a..c442053 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -125,14 +125,55 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
unsigned int m;
unsigned int n;
- for (n = 39; n < 120; n++) {
- for (m = 0; m < 4; m++) {
+ /*
+ * fin fvco fout fclkout
+ * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
+ * +-> | | |
+ * | |
+ * +---------------- [1/N] <------------+
+ *
+ * fclkout = fvco / P / FDPLL -- (1)
+ *
+ * fin/M = fvco/P/N
+ *
+ * fvco = fin * P * N / M -- (2)
+ *
+ * (1) + (2) indicates
+ *
+ * fclkout = fin * N / M / FDPLL
+ *
+ * NOTES
+ * N : (n + 1)
+ * M : (m + 1)
+ * FDPLL : (fdpll + 1)
+ * P : 2
+ * 2kHz < fvco < 4096MHz
+ *
+ * To minimize the jitter,
+ * N : as large as possible
+ * M : as small as possible
+ */
+ for (m = 0; m < 4; m++) {
+ for (n = 119; n > 38; n--) {
+ /*
+ * This code only runs on 64-bit architectures, the
+ * unsigned long type can thus be used for 64-bit
+ * computation. It will still compile without any
+ * warning on 32-bit architectures.
+ *
+ * To optimize calculations, use fout instead of fvco
+ * to verify the VCO frequency constraint.
+ */
+ unsigned long fout = input * (n + 1) / (m + 1);
+
+ if (fout < 1000 || fout > 2048 * 1000 * 1000U)
+ continue;
+
for (fdpll = 1; fdpll < 32; fdpll++) {
unsigned long output;
- output = input * (n + 1) / (m + 1)
- / (fdpll + 1);
- if (output >= 400000000)
+ output = fout / (fdpll + 1);
+ if (output >= 400 * 1000 * 1000)
continue;
diff = abs((long)output - (long)target);
@@ -319,7 +360,8 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
struct rcar_du_plane *plane = &rcrtc->group->planes[i];
unsigned int j;
- if (plane->plane.state->crtc != &rcrtc->crtc)
+ if (plane->plane.state->crtc != &rcrtc->crtc ||
+ !plane->plane.state->visible)
continue;
/* Insert the plane in the sorted planes array. */
@@ -557,41 +599,6 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
rcar_du_group_start_stop(rcrtc->group, false);
}
-void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
-{
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
- rcar_du_vsp_disable(rcrtc);
-
- rcar_du_crtc_stop(rcrtc);
- rcar_du_crtc_put(rcrtc);
-}
-
-void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
-{
- unsigned int i;
-
- if (!rcrtc->crtc.state->active)
- return;
-
- rcar_du_crtc_get(rcrtc);
- rcar_du_crtc_setup(rcrtc);
-
- /* Commit the planes state. */
- if (!rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
- for (i = 0; i < rcrtc->group->num_planes; ++i) {
- struct rcar_du_plane *plane = &rcrtc->group->planes[i];
-
- if (plane->plane.state->crtc != &rcrtc->crtc)
- continue;
-
- rcar_du_plane_setup(plane);
- }
- }
-
- rcar_du_crtc_update_planes(rcrtc);
- rcar_du_crtc_start(rcrtc);
-}
-
/* -----------------------------------------------------------------------------
* CRTC Functions
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index d2f29e6..3917d83 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -22,18 +22,61 @@
#include <linux/wait.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
+#include "rcar_du_of.h"
#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Device Information
*/
+static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /*
+ * R8A7743 has one RGB output and one LVDS output
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(1) | BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 1,
+ },
+ },
+ .num_lvds = 1,
+};
+
+static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ .num_crtcs = 2,
+ .routes = {
+ /*
+ * R8A7745 has two RGB outputs
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ },
+};
+
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 2,
.features = 0,
@@ -52,14 +95,13 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.port = 1,
},
},
- .num_lvds = 0,
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
- .quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
+ .quirks = RCAR_DU_QUIRK_ALIGN_128B,
.num_crtcs = 3,
.routes = {
/*
@@ -121,7 +163,6 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
.port = 1,
},
},
- .num_lvds = 0,
};
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
@@ -143,7 +184,6 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.port = 1,
},
},
- .num_lvds = 0,
};
static const struct rcar_du_device_info rcar_du_r8a7795_info = {
@@ -206,7 +246,29 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.dpll_ch = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a77970_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 1,
+ .routes = {
+ /* R8A77970 has one RGB output and one LVDS output. */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 1,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
+ { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+ { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -215,6 +277,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
+ { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
{ }
};
@@ -265,9 +328,19 @@ static struct drm_driver rcar_du_driver = {
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
+ struct drm_atomic_state *state;
drm_kms_helper_poll_disable(rcdu->ddev);
- /* TODO Suspend the CRTC */
+ drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, true);
+
+ state = drm_atomic_helper_suspend(rcdu->ddev);
+ if (IS_ERR(state)) {
+ drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
+ drm_kms_helper_poll_enable(rcdu->ddev);
+ return PTR_ERR(state);
+ }
+
+ rcdu->suspend_state = state;
return 0;
}
@@ -276,9 +349,10 @@ static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- /* TODO Resume the CRTC */
-
+ drm_atomic_helper_resume(rcdu->ddev, rcdu->suspend_state);
+ drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
drm_kms_helper_poll_enable(rcdu->ddev);
+
return 0;
}
#endif
@@ -378,7 +452,19 @@ static struct platform_driver rcar_du_platform_driver = {
},
};
-module_platform_driver(rcar_du_platform_driver);
+static int __init rcar_du_init(void)
+{
+ rcar_du_of_init(rcar_du_of_table);
+
+ return platform_driver_register(&rcar_du_platform_driver);
+}
+module_init(rcar_du_init);
+
+static void __exit rcar_du_exit(void)
+{
+ platform_driver_unregister(&rcar_du_platform_driver);
+}
+module_exit(rcar_du_exit);
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_DESCRIPTION("Renesas R-Car Display Unit DRM Driver");
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index f8cd794..5c7ec15 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -26,14 +26,12 @@ struct device;
struct drm_device;
struct drm_fbdev_cma;
struct rcar_du_device;
-struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */
#define RCAR_DU_FEATURE_VSP1_SOURCE (1 << 2) /* Has inputs from VSP1 */
#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
-#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
/*
* struct rcar_du_output_routing - Output routing specification
@@ -70,7 +68,6 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_CRTCS 4
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
-#define RCAR_DU_MAX_LVDS 2
#define RCAR_DU_MAX_VSPS 4
struct rcar_du_device {
@@ -81,6 +78,7 @@ struct rcar_du_device {
struct drm_device *ddev;
struct drm_fbdev_cma *fbdev;
+ struct drm_atomic_state *suspend_state;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
@@ -95,8 +93,6 @@ struct rcar_du_device {
unsigned int dpad0_source;
unsigned int vspd1_sink;
-
- struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
};
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index ba8d280..f9c933d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -21,134 +21,22 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvdscon.h"
-#include "rcar_du_lvdsenc.h"
/* -----------------------------------------------------------------------------
* Encoder
*/
-static void rcar_du_encoder_disable(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->connector && renc->connector->panel) {
- drm_panel_disable(renc->connector->panel);
- drm_panel_unprepare(renc->connector->panel);
- }
-
- if (renc->lvds)
- rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
-}
-
-static void rcar_du_encoder_enable(struct drm_encoder *encoder)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- if (renc->lvds)
- rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
-
- if (renc->connector && renc->connector->panel) {
- drm_panel_prepare(renc->connector->panel);
- drm_panel_enable(renc->connector->panel);
- }
-}
-
-static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- const struct drm_display_mode *mode = &crtc_state->mode;
- struct drm_connector *connector = conn_state->connector;
- struct drm_device *dev = encoder->dev;
-
- /*
- * Only panel-related encoder types require validation here, everything
- * else is handled by the bridge drivers.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- const struct drm_display_mode *panel_mode;
-
- if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "encoder: empty modes list\n");
- return -EINVAL;
- }
-
- panel_mode = list_first_entry(&connector->modes,
- struct drm_display_mode, head);
-
- /* We're not allowed to modify the resolution. */
- if (mode->hdisplay != panel_mode->hdisplay ||
- mode->vdisplay != panel_mode->vdisplay)
- return -EINVAL;
-
- /*
- * The flat panel mode is fixed, just copy it to the adjusted
- * mode.
- */
- drm_mode_copy(adjusted_mode, panel_mode);
- }
-
- if (renc->lvds)
- rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode);
-
- return 0;
-}
-
static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
- struct drm_display_info *info = &conn_state->connector->display_info;
- enum rcar_lvds_mode mode;
rcar_du_crtc_route_output(crtc_state->crtc, renc->output);
-
- if (!renc->lvds) {
- /*
- * The DU driver creates connectors only for the outputs of the
- * internal LVDS encoders.
- */
- renc->connector = NULL;
- return;
- }
-
- renc->connector = to_rcar_connector(conn_state->connector);
-
- if (!info->num_bus_formats || !info->bus_formats) {
- dev_err(encoder->dev->dev, "no LVDS bus format reported\n");
- return;
- }
-
- switch (info->bus_formats[0]) {
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- mode = RCAR_LVDS_MODE_JEIDA;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- mode = RCAR_LVDS_MODE_VESA;
- break;
- default:
- dev_err(encoder->dev->dev,
- "unsupported LVDS bus format 0x%04x\n",
- info->bus_formats[0]);
- return;
- }
-
- if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
- mode |= RCAR_LVDS_MODE_MIRROR;
-
- rcar_du_lvdsenc_set_mode(renc->lvds, mode);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.atomic_mode_set = rcar_du_encoder_mode_set,
- .disable = rcar_du_encoder_disable,
- .enable = rcar_du_encoder_enable,
- .atomic_check = rcar_du_encoder_atomic_check,
};
static const struct drm_encoder_funcs encoder_funcs = {
@@ -172,33 +60,14 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
renc->output = output;
encoder = rcar_encoder_to_drm_encoder(renc);
- switch (output) {
- case RCAR_DU_OUTPUT_LVDS0:
- renc->lvds = rcdu->lvds[0];
- break;
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+ enc_node, output);
- case RCAR_DU_OUTPUT_LVDS1:
- renc->lvds = rcdu->lvds[1];
- break;
-
- default:
- break;
- }
-
- if (enc_node) {
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
-
- /* Locate the DRM bridge from the encoder DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
- }
- } else {
- dev_dbg(rcdu->dev,
- "initializing internal encoder for output %u\n",
- output);
+ /* Locate the DRM bridge from the encoder DT node. */
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
+ goto done;
}
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
@@ -208,28 +77,14 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
- if (bridge) {
- /*
- * Attach the bridge to the encoder. The bridge will create the
- * connector.
- */
- ret = drm_bridge_attach(encoder, bridge, NULL);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
- } else {
- /* There's no bridge, create the connector manually. */
- switch (output) {
- case RCAR_DU_OUTPUT_LVDS0:
- case RCAR_DU_OUTPUT_LVDS1:
- ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
}
done:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 5422fa4..2d2abca 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -19,13 +19,10 @@
struct drm_panel;
struct rcar_du_device;
-struct rcar_du_lvdsenc;
struct rcar_du_encoder {
struct drm_encoder base;
enum rcar_du_output output;
- struct rcar_du_connector *connector;
- struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
@@ -33,15 +30,6 @@ struct rcar_du_encoder {
#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
-struct rcar_du_connector {
- struct drm_connector connector;
- struct rcar_du_encoder *encoder;
- struct drm_panel *panel;
-};
-
-#define to_rcar_connector(c) \
- container_of(c, struct rcar_du_connector, connector)
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 566d1a9..0329b35 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -27,7 +27,6 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvdsenc.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
@@ -341,11 +340,10 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
of_node_put(entity_ep_node);
if (!encoder) {
- /*
- * If no encoder has been found the entity must be the
- * connector.
- */
- connector = entity;
+ dev_warn(rcdu->dev,
+ "no encoder found for endpoint %pOF, skipping\n",
+ ep->local_node);
+ return -ENODEV;
}
ret = rcar_du_encoder_init(rcdu, output, encoder, connector);
@@ -595,10 +593,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
}
/* Initialize the encoders. */
- ret = rcar_du_lvdsenc_init(rcdu);
- if (ret < 0)
- return ret;
-
ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
deleted file mode 100644
index b373ad4..0000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_panel.h>
-
-#include <video/display_timing.h>
-#include <video/of_display_timing.h>
-#include <video/videomode.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
-#include "rcar_du_lvdscon.h"
-
-static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- return drm_panel_get_modes(rcon->panel);
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = rcar_du_lvds_connector_get_modes,
-};
-
-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- drm_panel_detach(rcon->panel);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_lvds_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- const struct device_node *np)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_connector *rcon;
- struct drm_connector *connector;
- int ret;
-
- rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
- if (rcon == NULL)
- return -ENOMEM;
-
- connector = &rcon->connector;
-
- rcon->panel = of_drm_find_panel(np);
- if (!rcon->panel)
- return -EPROBE_DEFER;
-
- ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret < 0)
- return ret;
-
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_object_property_set_value(&connector->base,
- rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
-
- ret = drm_panel_attach(rcon->panel, connector);
- if (ret < 0)
- return ret;
-
- rcon->encoder = renc;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
deleted file mode 100644
index 639071d..0000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_LVDSCON_H__
-#define __RCAR_DU_LVDSCON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- const struct device_node *np);
-
-#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
deleted file mode 100644
index 12d22f3..0000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_lvdsenc.h"
-#include "rcar_lvds_regs.h"
-
-struct rcar_du_lvdsenc {
- struct rcar_du_device *dev;
-
- unsigned int index;
- void __iomem *mmio;
- struct clk *clock;
- bool enabled;
-
- enum rcar_lvds_input input;
- enum rcar_lvds_mode mode;
-};
-
-static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
-{
- iowrite32(data, lvds->mmio + reg);
-}
-
-static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
- struct rcar_du_crtc *rcrtc)
-{
- const struct drm_display_mode *mode = &rcrtc->crtc.mode;
- unsigned int freq = mode->clock;
- u32 lvdcr0;
- u32 pllcr;
-
- /* PLL clock configuration */
- if (freq < 39000)
- pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
- else if (freq < 61000)
- pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
- else if (freq < 121000)
- pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
- else
- pllcr = LVDPLLCR_PLLDLYCNT_150M;
-
- rcar_lvds_write(lvds, LVDPLLCR, pllcr);
-
- /*
- * Select the input, hardcode mode 0, enable LVDS operation and turn
- * bias circuitry on.
- */
- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
- if (rcrtc->index == 2)
- lvdcr0 |= LVDCR0_DUSEL;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN2(3) | LVDCR1_CHSTBY_GEN2(2) |
- LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
- LVDCR1_CLKSTBY_GEN2);
-
- /*
- * Turn the PLL on, wait for the startup delay, and turn the output
- * on.
- */
- lvdcr0 |= LVDCR0_PLLON;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- usleep_range(100, 150);
-
- lvdcr0 |= LVDCR0_LVRES;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-}
-
-static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
- struct rcar_du_crtc *rcrtc)
-{
- const struct drm_display_mode *mode = &rcrtc->crtc.mode;
- unsigned int freq = mode->clock;
- u32 lvdcr0;
- u32 pllcr;
-
- /* PLL clock configuration */
- if (freq < 42000)
- pllcr = LVDPLLCR_PLLDIVCNT_42M;
- else if (freq < 85000)
- pllcr = LVDPLLCR_PLLDIVCNT_85M;
- else if (freq < 128000)
- pllcr = LVDPLLCR_PLLDIVCNT_128M;
- else
- pllcr = LVDPLLCR_PLLDIVCNT_148M;
-
- rcar_lvds_write(lvds, LVDPLLCR, pllcr);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1,
- LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
- LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
- LVDCR1_CLKSTBY_GEN3);
-
- /*
- * Turn the PLL on, set it to LVDS normal mode, wait for the startup
- * delay and turn the output on.
- */
- lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- lvdcr0 |= LVDCR0_PWD;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- usleep_range(100, 150);
-
- lvdcr0 |= LVDCR0_LVRES;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-}
-
-static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
- struct rcar_du_crtc *rcrtc)
-{
- u32 lvdhcr;
- int ret;
-
- if (lvds->enabled)
- return 0;
-
- ret = clk_prepare_enable(lvds->clock);
- if (ret < 0)
- return ret;
-
- /*
- * Hardcode the channels and control signals routing for now.
- *
- * HSYNC -> CTRL0
- * VSYNC -> CTRL1
- * DISP -> CTRL2
- * 0 -> CTRL3
- */
- rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
- LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
- LVDCTRCR_CTR0SEL_HSYNC);
-
- if (rcar_du_needs(lvds->dev, RCAR_DU_QUIRK_LVDS_LANES))
- lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
- | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
- else
- lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
- | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
-
- rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
-
- /* Perform generation-specific initialization. */
- if (lvds->dev->info->gen < 3)
- rcar_du_lvdsenc_start_gen2(lvds, rcrtc);
- else
- rcar_du_lvdsenc_start_gen3(lvds, rcrtc);
-
- lvds->enabled = true;
-
- return 0;
-}
-
-static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
-{
- if (!lvds->enabled)
- return;
-
- rcar_lvds_write(lvds, LVDCR0, 0);
- rcar_lvds_write(lvds, LVDCR1, 0);
-
- clk_disable_unprepare(lvds->clock);
-
- lvds->enabled = false;
-}
-
-int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc,
- bool enable)
-{
- if (!enable) {
- rcar_du_lvdsenc_stop(lvds);
- return 0;
- } else if (crtc) {
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- return rcar_du_lvdsenc_start(lvds, rcrtc);
- } else
- return -EINVAL;
-}
-
-void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
- struct drm_display_mode *mode)
-{
- struct rcar_du_device *rcdu = lvds->dev;
-
- /*
- * The internal LVDS encoder has a restricted clock frequency operating
- * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp
- * the clock accordingly.
- */
- if (rcdu->info->gen < 3)
- mode->clock = clamp(mode->clock, 30000, 150000);
- else
- mode->clock = clamp(mode->clock, 25175, 148500);
-}
-
-void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
- enum rcar_lvds_mode mode)
-{
- lvds->mode = mode;
-}
-
-static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
- struct platform_device *pdev)
-{
- struct resource *mem;
- char name[7];
-
- sprintf(name, "lvds.%u", lvds->index);
-
- mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(lvds->mmio))
- return PTR_ERR(lvds->mmio);
-
- lvds->clock = devm_clk_get(&pdev->dev, name);
- if (IS_ERR(lvds->clock)) {
- dev_err(&pdev->dev, "failed to get clock for %s\n", name);
- return PTR_ERR(lvds->clock);
- }
-
- return 0;
-}
-
-int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
-{
- struct platform_device *pdev = to_platform_device(rcdu->dev);
- struct rcar_du_lvdsenc *lvds;
- unsigned int i;
- int ret;
-
- for (i = 0; i < rcdu->info->num_lvds; ++i) {
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL)
- return -ENOMEM;
-
- lvds->dev = rcdu;
- lvds->index = i;
- lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
- lvds->enabled = false;
-
- ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
- if (ret < 0)
- return ret;
-
- rcdu->lvds[i] = lvds;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
deleted file mode 100644
index 7218ac8..0000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_LVDSENC_H__
-#define __RCAR_DU_LVDSENC_H__
-
-#include <linux/io.h>
-#include <linux/module.h>
-
-struct rcar_drm_crtc;
-struct rcar_du_lvdsenc;
-
-enum rcar_lvds_input {
- RCAR_LVDS_INPUT_DU0,
- RCAR_LVDS_INPUT_DU1,
- RCAR_LVDS_INPUT_DU2,
-};
-
-/* Keep in sync with the LVDCR0.LVMD hardware register values. */
-enum rcar_lvds_mode {
- RCAR_LVDS_MODE_JEIDA = 0,
- RCAR_LVDS_MODE_MIRROR = 1,
- RCAR_LVDS_MODE_VESA = 4,
-};
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
-void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
- enum rcar_lvds_mode mode);
-int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, bool enable);
-void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
- struct drm_display_mode *mode);
-#else
-static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
-{
- return 0;
-}
-static inline void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
- enum rcar_lvds_mode mode)
-{
-}
-static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
- struct drm_crtc *crtc, bool enable)
-{
- return 0;
-}
-static inline void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
- struct drm_display_mode *mode)
-{
-}
-#endif
-
-#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of.c b/drivers/gpu/drm/rcar-du/rcar_du_of.c
new file mode 100644
index 0000000..68a0b82
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of.c - Legacy DT bindings compatibility
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * Based on work from Jyri Sarha <jsarha@ti.com>
+ * Copyright (C) 2015 Texas Instruments
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_graph.h>
+#include <linux/slab.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+
+/* -----------------------------------------------------------------------------
+ * Generic Overlay Handling
+ */
+
+struct rcar_du_of_overlay {
+ const char *compatible;
+ void *begin;
+ void *end;
+};
+
+#define RCAR_DU_OF_DTB(type, soc) \
+ extern char __dtb_rcar_du_of_##type##_##soc##_begin[]; \
+ extern char __dtb_rcar_du_of_##type##_##soc##_end[]
+
+#define RCAR_DU_OF_OVERLAY(type, soc) \
+ { \
+ .compatible = "renesas,du-" #soc, \
+ .begin = __dtb_rcar_du_of_##type##_##soc##_begin, \
+ .end = __dtb_rcar_du_of_##type##_##soc##_end, \
+ }
+
+static int __init rcar_du_of_apply_overlay(const struct rcar_du_of_overlay *dtbs,
+ const char *compatible)
+{
+ const struct rcar_du_of_overlay *dtb = NULL;
+ unsigned int i;
+ int ovcs_id;
+
+ for (i = 0; dtbs[i].compatible; ++i) {
+ if (!strcmp(dtbs[i].compatible, compatible)) {
+ dtb = &dtbs[i];
+ break;
+ }
+ }
+
+ if (!dtb)
+ return -ENODEV;
+
+ ovcs_id = 0;
+ return of_overlay_fdt_apply(dtb->begin, dtb->end - dtb->begin,
+ &ovcs_id);
+}
+
+static int __init rcar_du_of_add_property(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *name, const void *value,
+ int length)
+{
+ struct property *prop;
+ int ret = -ENOMEM;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ prop->name = kstrdup(name, GFP_KERNEL);
+ if (!prop->name)
+ goto out_err;
+
+ prop->value = kmemdup(value, length, GFP_KERNEL);
+ if (!prop->value)
+ goto out_err;
+
+ of_property_set_flag(prop, OF_DYNAMIC);
+
+ prop->length = length;
+
+ ret = of_changeset_add_property(ocs, np, prop);
+ if (!ret)
+ return 0;
+
+out_err:
+ kfree(prop->value);
+ kfree(prop->name);
+ kfree(prop);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * LVDS Overlays
+ */
+
+RCAR_DU_OF_DTB(lvds, r8a7790);
+RCAR_DU_OF_DTB(lvds, r8a7791);
+RCAR_DU_OF_DTB(lvds, r8a7793);
+RCAR_DU_OF_DTB(lvds, r8a7795);
+RCAR_DU_OF_DTB(lvds, r8a7796);
+
+static const struct rcar_du_of_overlay rcar_du_lvds_overlays[] __initconst = {
+ RCAR_DU_OF_OVERLAY(lvds, r8a7790),
+ RCAR_DU_OF_OVERLAY(lvds, r8a7791),
+ RCAR_DU_OF_OVERLAY(lvds, r8a7793),
+ RCAR_DU_OF_OVERLAY(lvds, r8a7795),
+ RCAR_DU_OF_OVERLAY(lvds, r8a7796),
+ { /* Sentinel */ },
+};
+
+static struct of_changeset rcar_du_lvds_changeset;
+
+static void __init rcar_du_of_lvds_patch_one(struct device_node *lvds,
+ const struct of_phandle_args *clk,
+ struct device_node *local,
+ struct device_node *remote)
+{
+ unsigned int psize;
+ unsigned int i;
+ __be32 value[4];
+ int ret;
+
+ /*
+ * Set the LVDS clocks property. This can't be performed by the overlay
+ * as the structure of the clock specifier has changed over time, and we
+ * don't know at compile time which binding version the system we will
+ * run on uses.
+ */
+ if (clk->args_count >= ARRAY_SIZE(value) - 1)
+ return;
+
+ of_changeset_init(&rcar_du_lvds_changeset);
+
+ value[0] = cpu_to_be32(clk->np->phandle);
+ for (i = 0; i < clk->args_count; ++i)
+ value[i + 1] = cpu_to_be32(clk->args[i]);
+
+ psize = (clk->args_count + 1) * 4;
+ ret = rcar_du_of_add_property(&rcar_du_lvds_changeset, lvds,
+ "clocks", value, psize);
+ if (ret < 0)
+ goto done;
+
+ /*
+ * Insert the node in the OF graph: patch the LVDS ports remote-endpoint
+ * properties to point to the endpoints of the sibling nodes in the
+ * graph. This can't be performed by the overlay: on the input side the
+ * overlay would contain a phandle for the DU LVDS output port that
+ * would clash with the system DT, and on the output side the connection
+ * is board-specific.
+ */
+ value[0] = cpu_to_be32(local->phandle);
+ value[1] = cpu_to_be32(remote->phandle);
+
+ for (i = 0; i < 2; ++i) {
+ struct device_node *endpoint;
+
+ endpoint = of_graph_get_endpoint_by_regs(lvds, i, 0);
+ if (!endpoint) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = rcar_du_of_add_property(&rcar_du_lvds_changeset,
+ endpoint, "remote-endpoint",
+ &value[i], sizeof(value[i]));
+ of_node_put(endpoint);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = of_changeset_apply(&rcar_du_lvds_changeset);
+
+done:
+ if (ret < 0)
+ of_changeset_destroy(&rcar_du_lvds_changeset);
+}
+
+struct lvds_of_data {
+ struct resource res;
+ struct of_phandle_args clkspec;
+ struct device_node *local;
+ struct device_node *remote;
+};
+
+static void __init rcar_du_of_lvds_patch(const struct of_device_id *of_ids)
+{
+ const struct rcar_du_device_info *info;
+ const struct of_device_id *match;
+ struct lvds_of_data lvds_data[2] = { };
+ struct device_node *lvds_node;
+ struct device_node *soc_node;
+ struct device_node *du_node;
+ char compatible[22];
+ const char *soc_name;
+ unsigned int i;
+ int ret;
+
+ /* Get the DU node and exit if not present or disabled. */
+ du_node = of_find_matching_node_and_match(NULL, of_ids, &match);
+ if (!du_node || !of_device_is_available(du_node)) {
+ of_node_put(du_node);
+ return;
+ }
+
+ info = match->data;
+ soc_node = of_get_parent(du_node);
+
+ if (WARN_ON(info->num_lvds > ARRAY_SIZE(lvds_data)))
+ goto done;
+
+ /*
+ * Skip if the LVDS nodes already exists.
+ *
+ * The nodes are searched based on the compatible string, which we
+ * construct from the SoC name found in the DU compatible string. As a
+ * match has been found we know the compatible string matches the
+ * expected format and can thus skip some of the string manipulation
+ * normal safety checks.
+ */
+ soc_name = strchr(match->compatible, '-') + 1;
+ sprintf(compatible, "renesas,%s-lvds", soc_name);
+ lvds_node = of_find_compatible_node(NULL, NULL, compatible);
+ if (lvds_node) {
+ of_node_put(lvds_node);
+ return;
+ }
+
+ /*
+ * Parse the DU node and store the register specifier, the clock
+ * specifier and the local and remote endpoint of the LVDS link for
+ * later use.
+ */
+ for (i = 0; i < info->num_lvds; ++i) {
+ struct lvds_of_data *lvds = &lvds_data[i];
+ unsigned int port;
+ char name[7];
+ int index;
+
+ sprintf(name, "lvds.%u", i);
+ index = of_property_match_string(du_node, "clock-names", name);
+ if (index < 0)
+ continue;
+
+ ret = of_parse_phandle_with_args(du_node, "clocks",
+ "#clock-cells", index,
+ &lvds->clkspec);
+ if (ret < 0)
+ continue;
+
+ port = info->routes[RCAR_DU_OUTPUT_LVDS0 + i].port;
+
+ lvds->local = of_graph_get_endpoint_by_regs(du_node, port, 0);
+ if (!lvds->local)
+ continue;
+
+ lvds->remote = of_graph_get_remote_endpoint(lvds->local);
+ if (!lvds->remote)
+ continue;
+
+ index = of_property_match_string(du_node, "reg-names", name);
+ if (index < 0)
+ continue;
+
+ of_address_to_resource(du_node, index, &lvds->res);
+ }
+
+ /* Parse and apply the overlay. This will resolve phandles. */
+ ret = rcar_du_of_apply_overlay(rcar_du_lvds_overlays,
+ match->compatible);
+ if (ret < 0)
+ goto done;
+
+ /* Patch the newly created LVDS encoder nodes. */
+ for_each_child_of_node(soc_node, lvds_node) {
+ struct resource res;
+
+ if (!of_device_is_compatible(lvds_node, compatible))
+ continue;
+
+ /* Locate the lvds_data entry based on the resource start. */
+ ret = of_address_to_resource(lvds_node, 0, &res);
+ if (ret < 0)
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(lvds_data); ++i) {
+ if (lvds_data[i].res.start == res.start)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lvds_data))
+ continue;
+
+ /* Patch the LVDS encoder. */
+ rcar_du_of_lvds_patch_one(lvds_node, &lvds_data[i].clkspec,
+ lvds_data[i].local,
+ lvds_data[i].remote);
+ }
+
+done:
+ for (i = 0; i < info->num_lvds; ++i) {
+ of_node_put(lvds_data[i].clkspec.np);
+ of_node_put(lvds_data[i].local);
+ of_node_put(lvds_data[i].remote);
+ }
+
+ of_node_put(soc_node);
+ of_node_put(du_node);
+}
+
+void __init rcar_du_of_init(const struct of_device_id *of_ids)
+{
+ rcar_du_of_lvds_patch(of_ids);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of.h b/drivers/gpu/drm/rcar-du/rcar_du_of.h
new file mode 100644
index 0000000..c2e65a7
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rcar_du_of.h - Legacy DT bindings compatibility
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+#ifndef __RCAR_DU_OF_H__
+#define __RCAR_DU_OF_H__
+
+#include <linux/init.h>
+
+struct of_device_id;
+
+#ifdef CONFIG_DRM_RCAR_LVDS
+void __init rcar_du_of_init(const struct of_device_id *of_ids);
+#else
+static inline void rcar_du_of_init(const struct of_device_id *of_ids) { }
+#endif /* CONFIG_DRM_RCAR_LVDS */
+
+#endif /* __RCAR_DU_OF_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
new file mode 100644
index 0000000..579753e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7790.dts
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of_lvds_r8a7790.dts - Legacy LVDS DT bindings conversion for R8A7790
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/dts-v1/;
+/plugin/;
+/ {
+ fragment@0 {
+ target-path = "/";
+ __overlay__ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ lvds@feb90000 {
+ compatible = "renesas,r8a7790-lvds";
+ reg = <0 0xfeb90000 0 0x1c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ lvds@feb94000 {
+ compatible = "renesas,r8a7790-lvds";
+ reg = <0 0xfeb94000 0 0x1c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds1_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds1_out: endpoint {
+ };
+ };
+ };
+ };
+ };
+ };
+
+ fragment@1 {
+ target-path = "/display@feb00000/ports";
+ __overlay__ {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&lvds0_input>;
+ };
+ };
+ port@2 {
+ endpoint {
+ remote-endpoint = <&lvds1_input>;
+ };
+ };
+ };
+ };
+};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
new file mode 100644
index 0000000..cb9da1f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7791.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of_lvds_r8a7791.dts - Legacy LVDS DT bindings conversion for R8A7791
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/dts-v1/;
+/plugin/;
+/ {
+ fragment@0 {
+ target-path = "/";
+ __overlay__ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ lvds@feb90000 {
+ compatible = "renesas,r8a7791-lvds";
+ reg = <0 0xfeb90000 0 0x1c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+ };
+ };
+
+ fragment@1 {
+ target-path = "/display@feb00000/ports";
+ __overlay__ {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&lvds0_input>;
+ };
+ };
+ };
+ };
+};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
new file mode 100644
index 0000000..e7b8804
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7793.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of_lvds_r8a7793.dts - Legacy LVDS DT bindings conversion for R8A7793
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/dts-v1/;
+/plugin/;
+/ {
+ fragment@0 {
+ target-path = "/";
+ __overlay__ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ lvds@feb90000 {
+ compatible = "renesas,r8a7793-lvds";
+ reg = <0 0xfeb90000 0 0x1c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+ };
+ };
+
+ fragment@1 {
+ target-path = "/display@feb00000/ports";
+ __overlay__ {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&lvds0_input>;
+ };
+ };
+ };
+ };
+};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
new file mode 100644
index 0000000..a132744
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7795.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of_lvds_r8a7795.dts - Legacy LVDS DT bindings conversion for R8A7795
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/dts-v1/;
+/plugin/;
+/ {
+ fragment@0 {
+ target-path = "/soc";
+ __overlay__ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ lvds@feb90000 {
+ compatible = "renesas,r8a7795-lvds";
+ reg = <0 0xfeb90000 0 0x14>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+ };
+ };
+
+ fragment@1 {
+ target-path = "/soc/display@feb00000/ports";
+ __overlay__ {
+ port@3 {
+ endpoint {
+ remote-endpoint = <&lvds0_input>;
+ };
+ };
+ };
+ };
+};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
new file mode 100644
index 0000000..b23d646
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_of_lvds_r8a7796.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_of_lvds_r8a7796.dts - Legacy LVDS DT bindings conversion for R8A7796
+ *
+ * Copyright (C) 2018 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/dts-v1/;
+/plugin/;
+/ {
+ fragment@0 {
+ target-path = "/soc";
+ __overlay__ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ lvds@feb90000 {
+ compatible = "renesas,r8a7796-lvds";
+ reg = <0 0xfeb90000 0 0x14>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ lvds0_input: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ lvds0_out: endpoint {
+ };
+ };
+ };
+ };
+ };
+ };
+
+ fragment@1 {
+ target-path = "/soc/display@feb00000/ports";
+ __overlay__ {
+ port@3 {
+ endpoint {
+ remote-endpoint = <&lvds0_input>;
+ };
+ };
+ };
+ };
+};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 61833cc..68556bd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -332,8 +332,8 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp,
static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
const struct rcar_du_plane_state *state)
{
- unsigned int src_x = state->state.src_x >> 16;
- unsigned int src_y = state->state.src_y >> 16;
+ unsigned int src_x = state->state.src.x1 >> 16;
+ unsigned int src_y = state->state.src.y1 >> 16;
unsigned int index = state->hwindex;
unsigned int pitch;
bool interlaced;
@@ -357,7 +357,7 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
dma[i] = gem->paddr + fb->offsets[i];
}
} else {
- pitch = state->state.src_w >> 16;
+ pitch = drm_rect_width(&state->state.src) >> 16;
dma[0] = 0;
dma[1] = 0;
}
@@ -521,6 +521,7 @@ static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp,
const struct rcar_du_plane_state *state)
{
struct rcar_du_device *rcdu = rgrp->dev;
+ const struct drm_rect *dst = &state->state.dst;
if (rcdu->info->gen < 3)
rcar_du_plane_setup_format_gen2(rgrp, index, state);
@@ -528,10 +529,10 @@ static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp,
rcar_du_plane_setup_format_gen3(rgrp, index, state);
/* Destination position and size */
- rcar_du_plane_write(rgrp, index, PnDSXR, state->state.crtc_w);
- rcar_du_plane_write(rgrp, index, PnDSYR, state->state.crtc_h);
- rcar_du_plane_write(rgrp, index, PnDPXR, state->state.crtc_x);
- rcar_du_plane_write(rgrp, index, PnDPYR, state->state.crtc_y);
+ rcar_du_plane_write(rgrp, index, PnDSXR, drm_rect_width(dst));
+ rcar_du_plane_write(rgrp, index, PnDSYR, drm_rect_height(dst));
+ rcar_du_plane_write(rgrp, index, PnDPXR, dst->x1);
+ rcar_du_plane_write(rgrp, index, PnDPYR, dst->y1);
if (rcdu->info->gen < 3) {
/* Wrap-around and blinking, disabled */
@@ -565,27 +566,43 @@ void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
}
}
-static int rcar_du_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+int __rcar_du_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ const struct rcar_du_format_info **format)
{
- struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
- struct rcar_du_plane *rplane = to_rcar_plane(plane);
- struct rcar_du_device *rcdu = rplane->group->dev;
+ struct drm_device *dev = plane->dev;
+ struct drm_crtc_state *crtc_state;
+ int ret;
- if (!state->fb || !state->crtc) {
- rstate->format = NULL;
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_plane_helper_check_state(), set it manually.
+ */
+ state->visible = false;
+ *format = NULL;
return 0;
}
- if (state->src_w >> 16 != state->crtc_w ||
- state->src_h >> 16 != state->crtc_h) {
- dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
- return -EINVAL;
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret < 0)
+ return ret;
+
+ if (!state->visible) {
+ *format = NULL;
+ return 0;
}
- rstate->format = rcar_du_format_info(state->fb->format->format);
- if (rstate->format == NULL) {
- dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+ *format = rcar_du_format_info(state->fb->format->format);
+ if (*format == NULL) {
+ dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
state->fb->format->format);
return -EINVAL;
}
@@ -593,6 +610,14 @@ static int rcar_du_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int rcar_du_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
+
+ return __rcar_du_plane_atomic_check(plane, state, &rstate->format);
+}
+
static void rcar_du_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -600,7 +625,7 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane,
struct rcar_du_plane_state *old_rstate;
struct rcar_du_plane_state *new_rstate;
- if (!plane->state->crtc)
+ if (!plane->state->visible)
return;
rcar_du_plane_setup(rplane);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index f62e09f..890321b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -73,6 +73,10 @@ to_rcar_plane_state(struct drm_plane_state *state)
int rcar_du_atomic_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+int __rcar_du_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ const struct rcar_du_format_info **format);
+
int rcar_du_planes_init(struct rcar_du_group *rgrp);
void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 2c96147..2c260c3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -55,14 +55,14 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
struct rcar_du_plane_state state = {
.state = {
.crtc = &crtc->crtc,
- .crtc_x = 0,
- .crtc_y = 0,
- .crtc_w = mode->hdisplay,
- .crtc_h = mode->vdisplay,
- .src_x = 0,
- .src_y = 0,
- .src_w = mode->hdisplay << 16,
- .src_h = mode->vdisplay << 16,
+ .dst.x1 = 0,
+ .dst.y1 = 0,
+ .dst.x2 = mode->hdisplay,
+ .dst.y2 = mode->vdisplay,
+ .src.x1 = 0,
+ .src.y1 = 0,
+ .src.x2 = mode->hdisplay << 16,
+ .src.y2 = mode->vdisplay << 16,
.zpos = 0,
},
.format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
@@ -178,15 +178,15 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
};
unsigned int i;
- cfg.src.left = state->state.src_x >> 16;
- cfg.src.top = state->state.src_y >> 16;
- cfg.src.width = state->state.src_w >> 16;
- cfg.src.height = state->state.src_h >> 16;
+ cfg.src.left = state->state.src.x1 >> 16;
+ cfg.src.top = state->state.src.y1 >> 16;
+ cfg.src.width = drm_rect_width(&state->state.src) >> 16;
+ cfg.src.height = drm_rect_height(&state->state.src) >> 16;
- cfg.dst.left = state->state.crtc_x;
- cfg.dst.top = state->state.crtc_y;
- cfg.dst.width = state->state.crtc_w;
- cfg.dst.height = state->state.crtc_h;
+ cfg.dst.left = state->state.dst.x1;
+ cfg.dst.top = state->state.dst.y1;
+ cfg.dst.width = drm_rect_width(&state->state.dst);
+ cfg.dst.height = drm_rect_height(&state->state.dst);
for (i = 0; i < state->format->planes; ++i)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
@@ -212,7 +212,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
unsigned int i;
int ret;
- if (!state->fb)
+ /*
+ * There's no need to prepare (and unprepare) the framebuffer when the
+ * plane is not visible, as it will not be displayed.
+ */
+ if (!state->visible)
return 0;
for (i = 0; i < rstate->format->planes; ++i) {
@@ -253,7 +257,7 @@ static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
unsigned int i;
- if (!state->fb)
+ if (!state->visible)
return;
for (i = 0; i < rstate->format->planes; ++i) {
@@ -268,28 +272,8 @@ static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
- struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
- struct rcar_du_device *rcdu = rplane->vsp->dev;
- if (!state->fb || !state->crtc) {
- rstate->format = NULL;
- return 0;
- }
-
- if (state->src_w >> 16 != state->crtc_w ||
- state->src_h >> 16 != state->crtc_h) {
- dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
- return -EINVAL;
- }
-
- rstate->format = rcar_du_format_info(state->fb->format->format);
- if (rstate->format == NULL) {
- dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->format->format);
- return -EINVAL;
- }
-
- return 0;
+ return __rcar_du_plane_atomic_check(plane, state, &rstate->format);
}
static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
@@ -298,7 +282,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc);
- if (plane->state->crtc)
+ if (plane->state->visible)
rcar_du_vsp_plane_setup(rplane);
else
vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index f876c51..4c5d7bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -45,7 +45,6 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
* @format: information about the pixel format used by the plane
* @sg_tables: scatter-gather tables for the frame buffer memory
* @alpha: value of the plane alpha property
- * @zpos: value of the plane zpos property
*/
struct rcar_du_vsp_plane_state {
struct drm_plane_state state;
@@ -54,7 +53,6 @@ struct rcar_du_vsp_plane_state {
struct sg_table sg_tables[3];
unsigned int alpha;
- unsigned int zpos;
};
static inline struct rcar_du_vsp_plane_state *
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
index dc85b53..76210ae 100644
--- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -68,12 +68,22 @@ static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
static int rcar_dw_hdmi_probe(struct platform_device *pdev)
{
- return dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data);
+ struct dw_hdmi *hdmi;
+
+ hdmi = dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
}
static int rcar_dw_hdmi_remove(struct platform_device *pdev)
{
- dw_hdmi_remove(pdev);
+ struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ dw_hdmi_remove(hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
new file mode 100644
index 0000000..3d2d3bb
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_lvds.c -- R-Car LVDS Encoder
+ *
+ * Copyright (C) 2013-2018 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "rcar_lvds_regs.h"
+
+/* Keep in sync with the LVDCR0.LVMD hardware register values. */
+enum rcar_lvds_mode {
+ RCAR_LVDS_MODE_JEIDA = 0,
+ RCAR_LVDS_MODE_MIRROR = 1,
+ RCAR_LVDS_MODE_VESA = 4,
+};
+
+#define RCAR_LVDS_QUIRK_LANES (1 << 0) /* LVDS lanes 1 and 3 inverted */
+#define RCAR_LVDS_QUIRK_GEN2_PLLCR (1 << 1) /* LVDPLLCR has gen2 layout */
+#define RCAR_LVDS_QUIRK_GEN3_LVEN (1 << 2) /* LVEN bit needs to be set */
+ /* on R8A77970/R8A7799x */
+
+struct rcar_lvds_device_info {
+ unsigned int gen;
+ unsigned int quirks;
+};
+
+struct rcar_lvds {
+ struct device *dev;
+ const struct rcar_lvds_device_info *info;
+
+ struct drm_bridge bridge;
+
+ struct drm_bridge *next_bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+
+ void __iomem *mmio;
+ struct clk *clock;
+ bool enabled;
+
+ struct drm_display_mode display_mode;
+ enum rcar_lvds_mode mode;
+};
+
+#define bridge_to_rcar_lvds(bridge) \
+ container_of(bridge, struct rcar_lvds, bridge)
+
+#define connector_to_rcar_lvds(connector) \
+ container_of(connector, struct rcar_lvds, connector)
+
+static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
+{
+ iowrite32(data, lvds->mmio + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector & Panel
+ */
+
+static int rcar_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
+
+ return drm_panel_get_modes(lvds->panel);
+}
+
+static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
+ const struct drm_display_mode *panel_mode;
+ struct drm_crtc_state *crtc_state;
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(lvds->dev, "connector: empty modes list\n");
+ return -EINVAL;
+ }
+
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+
+ /* We're not allowed to modify the resolution. */
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->mode.hdisplay != panel_mode->hdisplay ||
+ crtc_state->mode.vdisplay != panel_mode->vdisplay)
+ return -EINVAL;
+
+ /* The flat panel mode is fixed, just copy it to the adjusted mode. */
+ drm_mode_copy(&crtc_state->adjusted_mode, panel_mode);
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs rcar_lvds_conn_helper_funcs = {
+ .get_modes = rcar_lvds_connector_get_modes,
+ .atomic_check = rcar_lvds_connector_atomic_check,
+};
+
+static const struct drm_connector_funcs rcar_lvds_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge
+ */
+
+static u32 rcar_lvds_lvdpllcr_gen2(unsigned int freq)
+{
+ if (freq < 39000)
+ return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+ else if (freq < 61000)
+ return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+ else if (freq < 121000)
+ return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+ else
+ return LVDPLLCR_PLLDLYCNT_150M;
+}
+
+static u32 rcar_lvds_lvdpllcr_gen3(unsigned int freq)
+{
+ if (freq < 42000)
+ return LVDPLLCR_PLLDIVCNT_42M;
+ else if (freq < 85000)
+ return LVDPLLCR_PLLDIVCNT_85M;
+ else if (freq < 128000)
+ return LVDPLLCR_PLLDIVCNT_128M;
+ else
+ return LVDPLLCR_PLLDIVCNT_148M;
+}
+
+static void rcar_lvds_enable(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ const struct drm_display_mode *mode = &lvds->display_mode;
+ /*
+ * FIXME: We should really retrieve the CRTC through the state, but how
+ * do we get a state pointer?
+ */
+ struct drm_crtc *crtc = lvds->bridge.encoder->crtc;
+ u32 lvdpllcr;
+ u32 lvdhcr;
+ u32 lvdcr0;
+ int ret;
+
+ WARN_ON(lvds->enabled);
+
+ ret = clk_prepare_enable(lvds->clock);
+ if (ret < 0)
+ return;
+
+ /*
+ * Hardcode the channels and control signals routing for now.
+ *
+ * HSYNC -> CTRL0
+ * VSYNC -> CTRL1
+ * DISP -> CTRL2
+ * 0 -> CTRL3
+ */
+ rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
+ LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
+ LVDCTRCR_CTR0SEL_HSYNC);
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_LANES)
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1);
+ else
+ lvdhcr = LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 1)
+ | LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 3);
+
+ rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
+
+ /* PLL clock configuration. */
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN2_PLLCR)
+ lvdpllcr = rcar_lvds_lvdpllcr_gen2(mode->clock);
+ else
+ lvdpllcr = rcar_lvds_lvdpllcr_gen3(mode->clock);
+ rcar_lvds_write(lvds, LVDPLLCR, lvdpllcr);
+
+ /* Set the LVDS mode and select the input. */
+ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
+ if (drm_crtc_index(crtc) == 2)
+ lvdcr0 |= LVDCR0_DUSEL;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
+ LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
+
+ if (lvds->info->gen < 3) {
+ /* Enable LVDS operation and turn the bias circuitry on. */
+ lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ /* Turn the PLL on. */
+ lvdcr0 |= LVDCR0_PLLON;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ if (lvds->info->gen > 2) {
+ /* Set LVDS normal mode. */
+ lvdcr0 |= LVDCR0_PWD;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
+ /* Turn on the LVDS PHY. */
+ lvdcr0 |= LVDCR0_LVEN;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ /* Wait for the startup delay. */
+ usleep_range(100, 150);
+
+ /* Turn the output on. */
+ lvdcr0 |= LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ if (lvds->panel) {
+ drm_panel_prepare(lvds->panel);
+ drm_panel_enable(lvds->panel);
+ }
+
+ lvds->enabled = true;
+}
+
+static void rcar_lvds_disable(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ WARN_ON(!lvds->enabled);
+
+ if (lvds->panel) {
+ drm_panel_disable(lvds->panel);
+ drm_panel_unprepare(lvds->panel);
+ }
+
+ rcar_lvds_write(lvds, LVDCR0, 0);
+ rcar_lvds_write(lvds, LVDCR1, 0);
+
+ clk_disable_unprepare(lvds->clock);
+
+ lvds->enabled = false;
+}
+
+static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * The internal LVDS encoder has a restricted clock frequency operating
+ * range (31MHz to 148.5MHz). Clamp the clock accordingly.
+ */
+ adjusted_mode->clock = clamp(adjusted_mode->clock, 31000, 148500);
+
+ return true;
+}
+
+static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds)
+{
+ struct drm_display_info *info = &lvds->connector.display_info;
+ enum rcar_lvds_mode mode;
+
+ /*
+ * There is no API yet to retrieve LVDS mode from a bridge, only panels
+ * are supported.
+ */
+ if (!lvds->panel)
+ return;
+
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_err(lvds->dev, "no LVDS bus format reported\n");
+ return;
+ }
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ mode = RCAR_LVDS_MODE_JEIDA;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ mode = RCAR_LVDS_MODE_VESA;
+ break;
+ default:
+ dev_err(lvds->dev, "unsupported LVDS bus format 0x%04x\n",
+ info->bus_formats[0]);
+ return;
+ }
+
+ if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+ mode |= RCAR_LVDS_MODE_MIRROR;
+
+ lvds->mode = mode;
+}
+
+static void rcar_lvds_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ WARN_ON(lvds->enabled);
+
+ lvds->display_mode = *adjusted_mode;
+
+ rcar_lvds_get_lvds_mode(lvds);
+}
+
+static int rcar_lvds_attach(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ struct drm_connector *connector = &lvds->connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ int ret;
+
+ /* If we have a next bridge just attach it. */
+ if (lvds->next_bridge)
+ return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ bridge);
+
+ /* Otherwise we have a panel, create a connector. */
+ ret = drm_connector_init(bridge->dev, connector, &rcar_lvds_conn_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
+
+ return drm_panel_attach(lvds->panel, connector);
+}
+
+static void rcar_lvds_detach(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ if (lvds->panel)
+ drm_panel_detach(lvds->panel);
+}
+
+static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
+ .attach = rcar_lvds_attach,
+ .detach = rcar_lvds_detach,
+ .enable = rcar_lvds_enable,
+ .disable = rcar_lvds_disable,
+ .mode_fixup = rcar_lvds_mode_fixup,
+ .mode_set = rcar_lvds_mode_set,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
+{
+ struct device_node *local_output = NULL;
+ struct device_node *remote_input = NULL;
+ struct device_node *remote = NULL;
+ struct device_node *node;
+ bool is_bridge = false;
+ int ret = 0;
+
+ local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0);
+ if (!local_output) {
+ dev_dbg(lvds->dev, "unconnected port@1\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Locate the connected entity and infer its type from the number of
+ * endpoints.
+ */
+ remote = of_graph_get_remote_port_parent(local_output);
+ if (!remote) {
+ dev_dbg(lvds->dev, "unconnected endpoint %pOF\n", local_output);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!of_device_is_available(remote)) {
+ dev_dbg(lvds->dev, "connected entity %pOF is disabled\n",
+ remote);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ remote_input = of_graph_get_remote_endpoint(local_output);
+
+ for_each_endpoint_of_node(remote, node) {
+ if (node != remote_input) {
+ /*
+ * We've found one endpoint other than the input, this
+ * must be a bridge.
+ */
+ is_bridge = true;
+ of_node_put(node);
+ break;
+ }
+ }
+
+ if (is_bridge) {
+ lvds->next_bridge = of_drm_find_bridge(remote);
+ if (!lvds->next_bridge)
+ ret = -EPROBE_DEFER;
+ } else {
+ lvds->panel = of_drm_find_panel(remote);
+ if (!lvds->panel)
+ ret = -EPROBE_DEFER;
+ }
+
+done:
+ of_node_put(local_output);
+ of_node_put(remote_input);
+ of_node_put(remote);
+
+ return ret;
+}
+
+static int rcar_lvds_probe(struct platform_device *pdev)
+{
+ struct rcar_lvds *lvds;
+ struct resource *mem;
+ int ret;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (lvds == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lvds);
+
+ lvds->dev = &pdev->dev;
+ lvds->info = of_device_get_match_data(&pdev->dev);
+ lvds->enabled = false;
+
+ ret = rcar_lvds_parse_dt(lvds);
+ if (ret < 0)
+ return ret;
+
+ lvds->bridge.driver_private = lvds;
+ lvds->bridge.funcs = &rcar_lvds_bridge_ops;
+ lvds->bridge.of_node = pdev->dev.of_node;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(lvds->mmio))
+ return PTR_ERR(lvds->mmio);
+
+ lvds->clock = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lvds->clock)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(lvds->clock);
+ }
+
+ drm_bridge_add(&lvds->bridge);
+
+ return 0;
+}
+
+static int rcar_lvds_remove(struct platform_device *pdev)
+{
+ struct rcar_lvds *lvds = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&lvds->bridge);
+
+ return 0;
+}
+
+static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR,
+};
+
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR | RCAR_LVDS_QUIRK_LANES,
+};
+
+static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
+ .gen = 3,
+};
+
+static const struct rcar_lvds_device_info rcar_lvds_r8a77970_info = {
+ .gen = 3,
+ .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR | RCAR_LVDS_QUIRK_GEN3_LVEN,
+};
+
+static const struct of_device_id rcar_lvds_of_table[] = {
+ { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
+ { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
+ { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, rcar_lvds_of_table);
+
+static struct platform_driver rcar_lvds_platform_driver = {
+ .probe = rcar_lvds_probe,
+ .remove = rcar_lvds_remove,
+ .driver = {
+ .name = "rcar-lvds",
+ .of_match_table = rcar_lvds_of_table,
+ },
+};
+
+module_platform_driver(rcar_lvds_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas R-Car LVDS Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
index d7d294b..2896835 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -26,10 +26,8 @@
#define LVDCR1 0x0004
#define LVDCR1_CKSEL (1 << 15) /* Gen2 only */
-#define LVDCR1_CHSTBY_GEN2(n) (3 << (2 + (n) * 2)) /* Gen2 only */
-#define LVDCR1_CHSTBY_GEN3(n) (1 << (2 + (n) * 2)) /* Gen3 only */
-#define LVDCR1_CLKSTBY_GEN2 (3 << 0) /* Gen2 only */
-#define LVDCR1_CLKSTBY_GEN3 (1 << 0) /* Gen3 only */
+#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
+#define LVDCR1_CLKSTBY (3 << 0)
#define LVDPLLCR 0x0008
#define LVDPLLCR_CEEN (1 << 14)
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 93b7102..3e8bf79 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -71,40 +71,22 @@ struct rockchip_dp_device {
struct regmap *grf;
struct reset_control *rst;
- struct work_struct psr_work;
- struct mutex psr_lock;
- unsigned int psr_state;
-
const struct rockchip_dp_chip_data *data;
+ struct analogix_dp_device *adp;
struct analogix_dp_plat_data plat_data;
};
static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
{
struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
- if (!analogix_dp_psr_supported(dp->dev))
+ if (!analogix_dp_psr_enabled(dp->adp))
return;
DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
- mutex_lock(&dp->psr_lock);
- if (enabled)
- dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
- else
- dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
-
- schedule_work(&dp->psr_work);
- mutex_unlock(&dp->psr_lock);
-}
-
-static void analogix_dp_psr_work(struct work_struct *work)
-{
- struct rockchip_dp_device *dp =
- container_of(work, typeof(*dp), psr_work);
- int ret;
-
ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
if (ret) {
@@ -112,12 +94,10 @@ static void analogix_dp_psr_work(struct work_struct *work)
return;
}
- mutex_lock(&dp->psr_lock);
- if (dp->psr_state == EDP_VSC_PSR_STATE_ACTIVE)
- analogix_dp_enable_psr(dp->dev);
+ if (enabled)
+ analogix_dp_enable_psr(dp->adp);
else
- analogix_dp_disable_psr(dp->dev);
- mutex_unlock(&dp->psr_lock);
+ analogix_dp_disable_psr(dp->adp);
}
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -134,8 +114,6 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
struct rockchip_dp_device *dp = to_dp(plat_data);
int ret;
- cancel_work_sync(&dp->psr_work);
-
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
@@ -149,12 +127,17 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
return ret;
}
- return 0;
+ return rockchip_drm_psr_activate(&dp->encoder);
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = to_dp(plat_data);
+ int ret;
+
+ ret = rockchip_drm_psr_deactivate(&dp->encoder);
+ if (ret != 0)
+ return ret;
clk_disable_unprepare(dp->pclk);
@@ -258,20 +241,14 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
-static void rockchip_dp_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
- .destroy = rockchip_dp_drm_encoder_destroy,
+ .destroy = drm_encoder_cleanup,
};
-static int rockchip_dp_init(struct rockchip_dp_device *dp)
+static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
{
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
- int ret;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
@@ -301,19 +278,6 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
return PTR_ERR(dp->rst);
}
- ret = clk_prepare_enable(dp->pclk);
- if (ret < 0) {
- DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
- return ret;
- }
-
- ret = rockchip_dp_pre_init(dp);
- if (ret < 0) {
- DRM_DEV_ERROR(dp->dev, "failed to pre init %d\n", ret);
- clk_disable_unprepare(dp->pclk);
- return ret;
- }
-
return 0;
}
@@ -348,21 +312,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
int ret;
- /*
- * Just like the probe function said, we don't need the
- * device drvrate anymore, we should leave the charge to
- * analogix dp driver, set the device drvdata to NULL.
- */
- dev_set_drvdata(dev, NULL);
-
dp_data = of_device_get_match_data(dev);
if (!dp_data)
return -ENODEV;
- ret = rockchip_dp_init(dp);
- if (ret < 0)
- return ret;
-
dp->data = dp_data;
dp->drm_dev = drm_dev;
@@ -379,13 +332,22 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- mutex_init(&dp->psr_lock);
- dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
- INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
+ ret = rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+ if (ret < 0)
+ goto err_cleanup_encoder;
- rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+ dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+ if (IS_ERR(dp->adp)) {
+ ret = PTR_ERR(dp->adp);
+ goto err_unreg_psr;
+ }
- return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+ return 0;
+err_unreg_psr:
+ rockchip_drm_psr_unregister(&dp->encoder);
+err_cleanup_encoder:
+ dp->encoder.funcs->destroy(&dp->encoder);
+ return ret;
}
static void rockchip_dp_unbind(struct device *dev, struct device *master,
@@ -393,10 +355,9 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+ analogix_dp_unbind(dp->adp);
rockchip_drm_psr_unregister(&dp->encoder);
-
- analogix_dp_unbind(dev, master, data);
- clk_disable_unprepare(dp->pclk);
+ dp->encoder.funcs->destroy(&dp->encoder);
}
static const struct component_ops rockchip_dp_component_ops = {
@@ -412,7 +373,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
int ret;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret)
+ if (ret < 0)
return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -420,14 +381,12 @@ static int rockchip_dp_probe(struct platform_device *pdev)
return -ENOMEM;
dp->dev = dev;
-
dp->plat_data.panel = panel;
- /*
- * We just use the drvdata until driver run into component
- * add function, and then we would set drvdata to null, so
- * that analogix dp driver could take charge of the drvdata.
- */
+ ret = rockchip_dp_of_probe(dp);
+ if (ret < 0)
+ return ret;
+
platform_set_drvdata(pdev, dp);
return component_add(dev, &rockchip_dp_component_ops);
@@ -440,10 +399,26 @@ static int rockchip_dp_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_dp_suspend(struct device *dev)
+{
+ struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+ return analogix_dp_suspend(dp->adp);
+}
+
+static int rockchip_dp_resume(struct device *dev)
+{
+ struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+ return analogix_dp_resume(dp->adp);
+}
+#endif
+
static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend = analogix_dp_suspend,
- .resume_early = analogix_dp_resume,
+ .suspend = rockchip_dp_suspend,
+ .resume_early = rockchip_dp_resume,
#endif
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 275844d..c6fbdcd 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -43,8 +43,6 @@
#define GRF_SOC_CON9 0x6224
#define DP_SEL_VOP_LIT BIT(12)
#define GRF_SOC_CON26 0x6268
-#define UPHY_SEL_BIT 3
-#define UPHY_SEL_MASK BIT(19)
#define DPTX_HPD_SEL (3 << 12)
#define DPTX_HPD_DEL (2 << 12)
#define DPTX_HPD_SEL_MASK (3 << 28)
@@ -276,11 +274,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
dp->sink_has_audio = drm_detect_monitor_audio(edid);
ret = drm_add_edid_modes(connector, edid);
- if (ret) {
+ if (ret)
drm_mode_connector_update_edid_property(connector,
edid);
- drm_edid_to_eld(connector, edid);
- }
}
mutex_unlock(&dp->lock);
@@ -396,11 +392,6 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
union extcon_property_value property;
int ret;
- ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
- (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
- if (ret)
- return ret;
-
if (!port->phy_enabled) {
ret = phy_power_on(port->phy);
if (ret) {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index b1fe063..d53d5a0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -1202,9 +1202,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
dsi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->base))
return PTR_ERR(dsi->base);
@@ -1305,8 +1302,8 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
err_mipi_dsi_host:
mipi_dsi_host_unregister(&dsi->dsi_host);
err_cleanup:
- drm_encoder_cleanup(&dsi->encoder);
- drm_connector_cleanup(&dsi->connector);
+ dsi->connector.funcs->destroy(&dsi->connector);
+ dsi->encoder.funcs->destroy(&dsi->encoder);
err_pllref:
clk_disable_unprepare(dsi->pllref_clk);
return ret;
@@ -1319,6 +1316,10 @@ static void dw_mipi_dsi_unbind(struct device *dev, struct device *master,
mipi_dsi_host_unregister(&dsi->dsi_host);
pm_runtime_disable(dev);
+
+ dsi->connector.funcs->destroy(&dsi->connector);
+ dsi->encoder.funcs->destroy(&dsi->encoder);
+
clk_disable_unprepare(dsi->pllref_clk);
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 1eb02a8..11309a2 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -48,6 +48,7 @@ struct rockchip_hdmi {
const struct rockchip_hdmi_chip_data *chip_data;
struct clk *vpll_clk;
struct clk *grf_clk;
+ struct dw_hdmi *hdmi;
};
#define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x)
@@ -164,7 +165,6 @@ static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
{
struct device_node *np = hdmi->dev->of_node;
- int ret;
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
@@ -192,13 +192,6 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
return PTR_ERR(hdmi->grf_clk);
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Failed to enable HDMI vpll: %d\n", ret);
- return ret;
- }
-
return 0;
}
@@ -373,18 +366,30 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- ret = dw_hdmi_bind(pdev, encoder, plat_data);
+ platform_set_drvdata(pdev, hdmi);
+
+ hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
/*
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
* which would have called the encoder cleanup. Do it manually.
*/
- if (ret)
+ if (IS_ERR(hdmi->hdmi)) {
+ ret = PTR_ERR(hdmi->hdmi);
drm_encoder_cleanup(encoder);
+ clk_disable_unprepare(hdmi->vpll_clk);
+ }
return ret;
}
@@ -392,7 +397,10 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
void *data)
{
- return dw_hdmi_unbind(dev);
+ struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_unbind(hdmi->hdmi);
+ clk_disable_unprepare(hdmi->vpll_clk);
}
static const struct component_ops dw_hdmi_rockchip_ops = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index ee584d8..88d0774 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -282,6 +282,7 @@ static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
int rc;
rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ &hdmi->connector,
mode);
return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
@@ -830,9 +831,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->drm_dev = drm;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
hdmi->regs = devm_ioremap_resource(dev, iores);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
@@ -851,8 +849,10 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_clk;
+ }
inno_hdmi_reset(hdmi);
@@ -860,7 +860,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(hdmi->ddc)) {
ret = PTR_ERR(hdmi->ddc);
hdmi->ddc = NULL;
- return ret;
+ goto err_disable_clk;
}
/*
@@ -874,7 +874,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
ret = inno_hdmi_register(drm, hdmi);
if (ret)
- return ret;
+ goto err_put_adapter;
dev_set_drvdata(dev, hdmi);
@@ -884,7 +884,17 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
inno_hdmi_irq, IRQF_SHARED,
dev_name(dev), hdmi);
+ if (ret < 0)
+ goto err_cleanup_hdmi;
+ return 0;
+err_cleanup_hdmi:
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+err_put_adapter:
+ i2c_put_adapter(hdmi->ddc);
+err_disable_clk:
+ clk_disable_unprepare(hdmi->pclk);
return ret;
}
@@ -896,8 +906,8 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master,
hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.funcs->destroy(&hdmi->encoder);
- clk_disable_unprepare(hdmi->pclk);
i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->pclk);
}
static const struct component_ops inno_hdmi_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 76d63de..f814d37 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -134,7 +134,7 @@ static int rockchip_drm_bind(struct device *dev)
drm_dev->dev_private = private;
INIT_LIST_HEAD(&private->psr_list);
- spin_lock_init(&private->psr_list_lock);
+ mutex_init(&private->psr_list_lock);
ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
@@ -207,13 +207,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_unref(drm_dev);
}
-static void rockchip_drm_lastclose(struct drm_device *dev)
-{
- struct rockchip_drm_private *priv = dev->dev_private;
-
- drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
-}
-
static const struct file_operations rockchip_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -228,7 +221,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = rockchip_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
@@ -237,6 +230,7 @@ static struct drm_driver rockchip_drm_driver = {
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
.gem_prime_vmap = rockchip_gem_prime_vmap,
.gem_prime_vunmap = rockchip_gem_prime_vunmap,
.gem_prime_mmap = rockchip_gem_mmap_buf,
@@ -320,6 +314,14 @@ static int compare_dev(struct device *dev, void *data)
return dev == (struct device *)data;
}
+static void rockchip_drm_match_remove(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_link_del(link);
+}
+
static struct component_match *rockchip_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
@@ -337,10 +339,15 @@ static struct component_match *rockchip_drm_match_add(struct device *dev)
if (!d)
break;
+
+ device_link_add(dev, d, DL_FLAG_STATELESS);
component_match_add(dev, &match, compare_dev, d);
} while (true);
}
+ if (IS_ERR(match))
+ rockchip_drm_match_remove(dev);
+
return match ?: ERR_PTR(-ENODEV);
}
@@ -417,13 +424,21 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
if (IS_ERR(match))
return PTR_ERR(match);
- return component_master_add_with_match(dev, &rockchip_drm_ops, match);
+ ret = component_master_add_with_match(dev, &rockchip_drm_ops, match);
+ if (ret < 0) {
+ rockchip_drm_match_remove(dev);
+ return ret;
+ }
+
+ return 0;
}
static int rockchip_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &rockchip_drm_ops);
+ rockchip_drm_match_remove(&pdev->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 498dfbc..9c064a4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -55,7 +55,7 @@ struct rockchip_drm_private {
struct mutex mm_lock;
struct drm_mm mm;
struct list_head psr_list;
- spinlock_t psr_list_lock;
+ struct mutex psr_list_lock;
};
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index cd2ace0..e266539 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -167,20 +167,13 @@ err_gem_object_unreference:
return ERR_PTR(ret);
}
-static void rockchip_drm_output_poll_changed(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
-
- drm_fb_helper_hotplug_event(&private->fbdev_helper);
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_user_fb_create,
- .output_poll_changed = rockchip_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 1d96555..074db7a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -16,6 +16,8 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/drm_vma_manager.h>
+
+#include <linux/dma-buf.h>
#include <linux/iommu.h>
#include "rockchip_drm_drv.h"
@@ -262,7 +264,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
if (rk_obj->pages)
ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
@@ -297,6 +298,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (ret)
return ret;
+ /*
+ * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
+ * whole buffer from the start.
+ */
+ vma->vm_pgoff = 0;
+
obj = vma->vm_private_data;
return rockchip_drm_gem_object_mmap(obj, vma);
@@ -309,12 +316,10 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
}
struct rockchip_gem_object *
- rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
- bool alloc_kmap)
+ rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
struct drm_gem_object *obj;
- int ret;
size = round_up(size, PAGE_SIZE);
@@ -326,6 +331,20 @@ struct rockchip_gem_object *
drm_gem_object_init(drm, obj, size);
+ return rk_obj;
+}
+
+struct rockchip_gem_object *
+rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
+ bool alloc_kmap)
+{
+ struct rockchip_gem_object *rk_obj;
+ int ret;
+
+ rk_obj = rockchip_gem_alloc_object(drm, size);
+ if (IS_ERR(rk_obj))
+ return rk_obj;
+
ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
if (ret)
goto err_free_rk_obj;
@@ -343,11 +362,21 @@ err_free_rk_obj:
*/
void rockchip_gem_free_object(struct drm_gem_object *obj)
{
- struct rockchip_gem_object *rk_obj;
-
- rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- rockchip_gem_free_buf(rk_obj);
+ if (obj->import_attach) {
+ if (private->domain) {
+ rockchip_gem_iommu_unmap(rk_obj);
+ } else {
+ dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+ drm_prime_gem_destroy(obj, rk_obj->sgt);
+ } else {
+ rockchip_gem_free_buf(rk_obj);
+ }
rockchip_gem_release_object(rk_obj);
}
@@ -451,6 +480,86 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
return sgt;
}
+static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
+ int count)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, count, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+static int
+rockchip_gem_iommu_map_sg(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ struct rockchip_gem_object *rk_obj)
+{
+ rk_obj->sgt = sg;
+ return rockchip_gem_iommu_map(rk_obj);
+}
+
+static int
+rockchip_gem_dma_map_sg(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ struct rockchip_gem_object *rk_obj)
+{
+ int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
+ DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
+ DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
+ dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
+ DMA_BIDIRECTIONAL);
+ return -EINVAL;
+ }
+
+ rk_obj->dma_addr = sg_dma_address(sg->sgl);
+ rk_obj->sgt = sg;
+ return 0;
+}
+
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct rockchip_drm_private *private = drm->dev_private;
+ struct rockchip_gem_object *rk_obj;
+ int ret;
+
+ rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
+ if (IS_ERR(rk_obj))
+ return ERR_CAST(rk_obj);
+
+ if (private->domain)
+ ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
+ else
+ ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
+
+ if (ret < 0) {
+ DRM_ERROR("failed to import sg table: %d\n", ret);
+ goto err_free_rk_obj;
+ }
+
+ return &rk_obj->base;
+
+err_free_rk_obj:
+ rockchip_gem_release_object(rk_obj);
+ return ERR_PTR(ret);
+}
+
void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index f237375..d41fa65 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -36,8 +36,9 @@ struct rockchip_gem_object {
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
-rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
- struct sg_table *sgt);
+rockchip_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 3acfd57..b339ca9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -18,7 +18,7 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_psr.h"
-#define PSR_FLUSH_TIMEOUT msecs_to_jiffies(100)
+#define PSR_FLUSH_TIMEOUT_MS 100
enum psr_state {
PSR_FLUSH,
@@ -30,11 +30,11 @@ struct psr_drv {
struct list_head list;
struct drm_encoder *encoder;
- spinlock_t lock;
+ struct mutex lock;
bool active;
enum psr_state state;
- struct timer_list flush_timer;
+ struct delayed_work flush_work;
void (*set)(struct drm_encoder *encoder, bool enable);
};
@@ -43,9 +43,8 @@ static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
{
struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
struct psr_drv *psr;
- unsigned long flags;
- spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ mutex_lock(&drm_drv->psr_list_lock);
list_for_each_entry(psr, &drm_drv->psr_list, list) {
if (psr->encoder->crtc == crtc)
goto out;
@@ -53,7 +52,24 @@ static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
psr = ERR_PTR(-ENODEV);
out:
- spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+ mutex_unlock(&drm_drv->psr_list_lock);
+ return psr;
+}
+
+static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
+{
+ struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+ struct psr_drv *psr;
+
+ mutex_lock(&drm_drv->psr_list_lock);
+ list_for_each_entry(psr, &drm_drv->psr_list, list) {
+ if (psr->encoder == encoder)
+ goto out;
+ }
+ psr = ERR_PTR(-ENODEV);
+
+out:
+ mutex_unlock(&drm_drv->psr_list_lock);
return psr;
}
@@ -94,43 +110,40 @@ static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
static void psr_set_state(struct psr_drv *psr, enum psr_state state)
{
- unsigned long flags;
-
- spin_lock_irqsave(&psr->lock, flags);
+ mutex_lock(&psr->lock);
psr_set_state_locked(psr, state);
- spin_unlock_irqrestore(&psr->lock, flags);
+ mutex_unlock(&psr->lock);
}
-static void psr_flush_handler(struct timer_list *t)
+static void psr_flush_handler(struct work_struct *work)
{
- struct psr_drv *psr = from_timer(psr, t, flush_timer);
- unsigned long flags;
+ struct psr_drv *psr = container_of(to_delayed_work(work),
+ struct psr_drv, flush_work);
/* If the state has changed since we initiated the flush, do nothing */
- spin_lock_irqsave(&psr->lock, flags);
+ mutex_lock(&psr->lock);
if (psr->state == PSR_FLUSH)
psr_set_state_locked(psr, PSR_ENABLE);
- spin_unlock_irqrestore(&psr->lock, flags);
+ mutex_unlock(&psr->lock);
}
/**
* rockchip_drm_psr_activate - activate PSR on the given pipe
- * @crtc: CRTC to obtain the PSR encoder
+ * @encoder: encoder to obtain the PSR encoder
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int rockchip_drm_psr_activate(struct drm_crtc *crtc)
+int rockchip_drm_psr_activate(struct drm_encoder *encoder)
{
- struct psr_drv *psr = find_psr_by_crtc(crtc);
- unsigned long flags;
+ struct psr_drv *psr = find_psr_by_encoder(encoder);
if (IS_ERR(psr))
return PTR_ERR(psr);
- spin_lock_irqsave(&psr->lock, flags);
+ mutex_lock(&psr->lock);
psr->active = true;
- spin_unlock_irqrestore(&psr->lock, flags);
+ mutex_unlock(&psr->lock);
return 0;
}
@@ -138,23 +151,22 @@ EXPORT_SYMBOL(rockchip_drm_psr_activate);
/**
* rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
- * @crtc: CRTC to obtain the PSR encoder
+ * @encoder: encoder to obtain the PSR encoder
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int rockchip_drm_psr_deactivate(struct drm_crtc *crtc)
+int rockchip_drm_psr_deactivate(struct drm_encoder *encoder)
{
- struct psr_drv *psr = find_psr_by_crtc(crtc);
- unsigned long flags;
+ struct psr_drv *psr = find_psr_by_encoder(encoder);
if (IS_ERR(psr))
return PTR_ERR(psr);
- spin_lock_irqsave(&psr->lock, flags);
+ mutex_lock(&psr->lock);
psr->active = false;
- spin_unlock_irqrestore(&psr->lock, flags);
- del_timer_sync(&psr->flush_timer);
+ mutex_unlock(&psr->lock);
+ cancel_delayed_work_sync(&psr->flush_work);
return 0;
}
@@ -162,9 +174,8 @@ EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
static void rockchip_drm_do_flush(struct psr_drv *psr)
{
- mod_timer(&psr->flush_timer,
- round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT));
psr_set_state(psr, PSR_FLUSH);
+ mod_delayed_work(system_wq, &psr->flush_work, PSR_FLUSH_TIMEOUT_MS);
}
/**
@@ -201,12 +212,11 @@ void rockchip_drm_psr_flush_all(struct drm_device *dev)
{
struct rockchip_drm_private *drm_drv = dev->dev_private;
struct psr_drv *psr;
- unsigned long flags;
- spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ mutex_lock(&drm_drv->psr_list_lock);
list_for_each_entry(psr, &drm_drv->psr_list, list)
rockchip_drm_do_flush(psr);
- spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+ mutex_unlock(&drm_drv->psr_list_lock);
}
EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
@@ -223,7 +233,6 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
{
struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
struct psr_drv *psr;
- unsigned long flags;
if (!encoder || !psr_set)
return -EINVAL;
@@ -232,17 +241,17 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
if (!psr)
return -ENOMEM;
- timer_setup(&psr->flush_timer, psr_flush_handler, 0);
- spin_lock_init(&psr->lock);
+ INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
+ mutex_init(&psr->lock);
psr->active = true;
psr->state = PSR_DISABLE;
psr->encoder = encoder;
psr->set = psr_set;
- spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ mutex_lock(&drm_drv->psr_list_lock);
list_add_tail(&psr->list, &drm_drv->psr_list);
- spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+ mutex_unlock(&drm_drv->psr_list_lock);
return 0;
}
@@ -260,16 +269,15 @@ void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
{
struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
struct psr_drv *psr, *n;
- unsigned long flags;
- spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ mutex_lock(&drm_drv->psr_list_lock);
list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
if (psr->encoder == encoder) {
- del_timer(&psr->flush_timer);
+ cancel_delayed_work_sync(&psr->flush_work);
list_del(&psr->list);
kfree(psr);
}
}
- spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+ mutex_unlock(&drm_drv->psr_list_lock);
}
EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
index b420cf1..b1ea015 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -18,8 +18,8 @@
void rockchip_drm_psr_flush_all(struct drm_device *dev);
int rockchip_drm_psr_flush(struct drm_crtc *crtc);
-int rockchip_drm_psr_activate(struct drm_crtc *crtc);
-int rockchip_drm_psr_deactivate(struct drm_crtc *crtc);
+int rockchip_drm_psr_activate(struct drm_encoder *encoder);
+int rockchip_drm_psr_deactivate(struct drm_encoder *encoder);
int rockchip_drm_psr_register(struct drm_encoder *encoder,
void (*psr_set)(struct drm_encoder *, bool enable));
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 19128b4..53d4afe 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -95,9 +95,6 @@ struct vop {
struct drm_device *drm_dev;
bool is_enabled;
- /* mutex vsync_ work */
- struct mutex vsync_mutex;
- bool vsync_work_pending;
struct completion dsp_hold_completion;
/* protected by dev->event_lock */
@@ -120,6 +117,8 @@ struct vop {
spinlock_t reg_lock;
/* lock vop irq reg */
spinlock_t irq_lock;
+ /* protects crtc enable/disable */
+ struct mutex vop_lock;
unsigned int irq;
@@ -253,23 +252,15 @@ static bool is_yuv_support(uint32_t format)
}
}
-static bool is_alpha_support(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- return true;
- default:
- return false;
- }
-}
-
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
{
uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+ if (vskiplines)
+ *vskiplines = 0;
+
if (is_horizontal) {
if (mode == SCALE_UP)
val = GET_SCL_FT_BIC(src, dst);
@@ -310,7 +301,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint16_t vsu_mode;
uint16_t lb_mode;
uint32_t val;
- int vskiplines = 0;
+ int vskiplines;
if (dst_w > 3840) {
DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
@@ -528,7 +519,10 @@ static int vop_enable(struct drm_crtc *crtc)
goto err_disable_aclk;
}
- memcpy(vop->regs, vop->regsbak, vop->len);
+ spin_lock(&vop->reg_lock);
+ for (i = 0; i < vop->len; i += 4)
+ writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
+
/*
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
@@ -538,10 +532,9 @@ static int vop_enable(struct drm_crtc *crtc)
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
- spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
- spin_unlock(&vop->reg_lock);
}
+ spin_unlock(&vop->reg_lock);
vop_cfg_done(vop);
@@ -580,8 +573,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
- rockchip_drm_psr_deactivate(&vop->crtc);
-
+ mutex_lock(&vop->vop_lock);
drm_crtc_vblank_off(crtc);
/*
@@ -617,6 +609,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable(vop->aclk);
clk_disable(vop->hclk);
pm_runtime_put(vop->dev);
+ mutex_unlock(&vop->vop_lock);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
@@ -641,7 +634,6 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int ret;
- struct drm_rect clip;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
DRM_PLANE_HELPER_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
@@ -654,14 +646,9 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
-
- ret = drm_plane_helper_check_state(state, &clip,
- min_scale, max_scale,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
if (ret)
return ret;
@@ -790,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
rb_swap = has_rb_swapped(fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
- if (is_alpha_support(fb->format->format)) {
+ if (fb->format->has_alpha) {
VOP_WIN_SET(vop, win, dst_alpha_ctl,
DST_FACTOR_M0(ALPHA_SRC_INVERSE));
val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
@@ -887,10 +874,13 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
uint32_t pin_pol, val;
int ret;
+ mutex_lock(&vop->vop_lock);
+
WARN_ON(vop->event);
ret = vop_enable(crtc);
if (ret) {
+ mutex_unlock(&vop->vop_lock);
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
@@ -954,8 +944,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
VOP_REG_SET(vop, common, standby, 0);
-
- rockchip_drm_psr_activate(&vop->crtc);
+ mutex_unlock(&vop->vop_lock);
}
static bool vop_fs_irq_is_pending(struct vop *vop)
@@ -1158,15 +1147,14 @@ static void vop_handle_vblank(struct vop *vop)
{
struct drm_device *drm = vop->drm_dev;
struct drm_crtc *crtc = &vop->crtc;
- unsigned long flags;
- spin_lock_irqsave(&drm->event_lock, flags);
+ spin_lock(&drm->event_lock);
if (vop->event) {
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
}
- spin_unlock_irqrestore(&drm->event_lock, flags);
+ spin_unlock(&drm->event_lock);
if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
@@ -1177,21 +1165,20 @@ static irqreturn_t vop_isr(int irq, void *data)
struct vop *vop = data;
struct drm_crtc *crtc = &vop->crtc;
uint32_t active_irqs;
- unsigned long flags;
int ret = IRQ_NONE;
/*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
- spin_lock_irqsave(&vop->irq_lock, flags);
+ spin_lock(&vop->irq_lock);
active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
/* Clear all active interrupt sources */
if (active_irqs)
VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
- spin_unlock_irqrestore(&vop->irq_lock, flags);
+ spin_unlock(&vop->irq_lock);
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
@@ -1414,7 +1401,11 @@ static int vop_initial(struct vop *vop)
usleep_range(10, 20);
reset_control_deassert(ahb_rst);
- memcpy(vop->regsbak, vop->regs, vop->len);
+ VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
+ VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
+
+ for (i = 0; i < vop->len; i += sizeof(u32))
+ vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
VOP_REG_SET(vop, misc, global_regdone_en, 1);
VOP_REG_SET(vop, common, dsp_blank, 0);
@@ -1494,15 +1485,21 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
{
struct vop *vop = to_vop(crtc);
unsigned long jiffies_left;
+ int ret = 0;
if (!crtc || !vop->is_enabled)
return -ENODEV;
- if (mstimeout <= 0)
- return -EINVAL;
+ mutex_lock(&vop->vop_lock);
+ if (mstimeout <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (vop_line_flag_irq_is_enabled(vop))
- return -EBUSY;
+ if (vop_line_flag_irq_is_enabled(vop)) {
+ ret = -EBUSY;
+ goto out;
+ }
reinit_completion(&vop->line_flag_completion);
vop_line_flag_irq_enable(vop);
@@ -1513,10 +1510,13 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
if (jiffies_left == 0) {
DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- return 0;
+out:
+ mutex_unlock(&vop->vop_lock);
+ return ret;
}
EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
@@ -1566,20 +1566,11 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
spin_lock_init(&vop->reg_lock);
spin_lock_init(&vop->irq_lock);
-
- mutex_init(&vop->vsync_mutex);
-
- ret = devm_request_irq(dev, vop->irq, vop_isr,
- IRQF_SHARED, dev_name(dev), vop);
- if (ret)
- return ret;
-
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
+ mutex_init(&vop->vop_lock);
ret = vop_create_crtc(vop);
if (ret)
- goto err_enable_irq;
+ return ret;
pm_runtime_enable(&pdev->dev);
@@ -1590,13 +1581,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
goto err_disable_pm_runtime;
}
+ ret = devm_request_irq(dev, vop->irq, vop_isr,
+ IRQF_SHARED, dev_name(dev), vop);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
+
return 0;
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
vop_destroy_crtc(vop);
-err_enable_irq:
- enable_irq(vop->irq); /* To balance out the disable_irq above */
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 84911bd..e67f4ea 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/devinfo.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 4a39049..2e4eea3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -149,6 +149,34 @@ static const struct vop_data rk3036_vop = {
.win_size = ARRAY_SIZE(rk3036_vop_win_data),
};
+static const struct vop_win_phy rk3126_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
+ .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
+ .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
+ .dsp_info = VOP_REG(RK3126_WIN1_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3126_WIN1_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3126_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3126_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3036_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &rk3126_win1_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_data rk3126_vop = {
+ .intr = &rk3036_intr,
+ .common = &rk3036_common,
+ .modeset = &rk3036_modeset,
+ .output = &rk3036_output,
+ .win = rk3126_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3126_vop_win_data),
+};
+
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -510,6 +538,8 @@ static const struct vop_data rk3328_vop = {
static const struct of_device_id vop_driver_dt_match[] = {
{ .compatible = "rockchip,rk3036-vop",
.data = &rk3036_vop },
+ { .compatible = "rockchip,rk3126-vop",
+ .data = &rk3126_vop },
{ .compatible = "rockchip,rk3288-vop",
.data = &rk3288_vop },
{ .compatible = "rockchip,rk3368-vop",
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 4a4799f..f81b510 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -878,4 +878,10 @@
#define RK3036_HWC_LUT_ADDR 0x800
/* rk3036 register definition end */
+/* rk3126 register definition */
+#define RK3126_WIN1_MST 0x4c
+#define RK3126_WIN1_DSP_INFO 0x50
+#define RK3126_WIN1_DSP_ST 0x54
+/* rk3126 register definition end */
+
#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
new file mode 100644
index 0000000..bd0377c
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -0,0 +1,26 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+ccflags-y := -Iinclude/drm
+gpu-sched-y := gpu_scheduler.o sched_fence.o
+
+obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 92ec663..0d95888 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -19,32 +19,36 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
+#include <drm/spsc_queue.h>
#define CREATE_TRACE_POINTS
-#include "gpu_sched_trace.h"
+#include <drm/gpu_scheduler_trace.h>
+
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
-static void amd_sched_rq_init(struct amd_sched_rq *rq)
+static void drm_sched_rq_init(struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
}
-static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (!list_empty(&entity->list))
return;
@@ -53,8 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
spin_unlock(&rq->lock);
}
-static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (list_empty(&entity->list))
return;
@@ -72,17 +76,17 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
*
* Try to find a ready entity, returns NULL if none found.
*/
-static struct amd_sched_entity *
-amd_sched_rq_select_entity(struct amd_sched_rq *rq)
+static struct drm_sched_entity *
+drm_sched_rq_select_entity(struct drm_sched_rq *rq)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -92,7 +96,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
list_for_each_entry(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -111,39 +115,37 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
* Init a context entity used by scheduler when submit to HW ring.
*
* @sched The pointer to the scheduler
- * @entity The pointer to a valid amd_sched_entity
+ * @entity The pointer to a valid drm_sched_entity
* @rq The run queue this entity belongs
* @kernel If this is an entity for the kernel
* @jobs The max number of jobs in the job queue
*
* return 0 if succeed. negative error code on failure
*/
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
- uint32_t jobs)
+int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ uint32_t jobs, atomic_t *guilty)
{
- int r;
-
if (!(sched && entity && rq))
return -EINVAL;
- memset(entity, 0, sizeof(struct amd_sched_entity));
+ memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
+ entity->guilty = guilty;
spin_lock_init(&entity->rq_lock);
spin_lock_init(&entity->queue_lock);
- r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
- if (r)
- return r;
+ spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
+EXPORT_SYMBOL(drm_sched_entity_init);
/**
* Query if entity is initialized
@@ -153,8 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
*
* return true if entity is initialized, false otherwise
*/
-static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
return entity->sched == sched &&
entity->rq != NULL;
@@ -167,10 +169,10 @@ static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
*
* Return true if entity don't has any unscheduled jobs.
*/
-static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (kfifo_is_empty(&entity->job_queue))
+ if (spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
@@ -183,9 +185,9 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
*
* Return true if entity could provide a job.
*/
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
- if (kfifo_is_empty(&entity->job_queue))
+ if (spsc_queue_peek(&entity->job_queue) == NULL)
return false;
if (READ_ONCE(entity->dependency))
@@ -202,12 +204,12 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
*
* Cleanup and free the allocated resources.
*/
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
int r;
- if (!amd_sched_entity_is_initialized(sched, entity))
+ if (!drm_sched_entity_is_initialized(sched, entity))
return;
/**
* The client will not queue more IBs during this fini, consume existing
@@ -217,48 +219,55 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
r = -ERESTARTSYS;
else
r = wait_event_killable(sched->job_scheduled,
- amd_sched_entity_is_idle(entity));
- amd_sched_entity_set_rq(entity, NULL);
+ drm_sched_entity_is_idle(entity));
+ drm_sched_entity_set_rq(entity, NULL);
if (r) {
- struct amd_sched_job *job;
+ struct drm_sched_job *job;
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
*/
kthread_park(sched->thread);
kthread_unpark(sched->thread);
- while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
- struct amd_sched_fence *s_fence = job->s_fence;
- amd_sched_fence_scheduled(s_fence);
+ if (entity->dependency) {
+ dma_fence_remove_callback(entity->dependency,
+ &entity->cb);
+ dma_fence_put(entity->dependency);
+ entity->dependency = NULL;
+ }
+
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+ drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
+ WARN_ON(s_fence->parent);
dma_fence_put(&s_fence->finished);
sched->ops->free_job(job);
}
-
}
- kfifo_free(&entity->job_queue);
}
+EXPORT_SYMBOL(drm_sched_entity_fini);
-static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- amd_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
}
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq)
+void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
@@ -266,37 +275,39 @@ void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
spin_lock(&entity->rq_lock);
if (entity->rq)
- amd_sched_rq_remove_entity(entity->rq, entity);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
if (rq)
- amd_sched_rq_add_entity(rq, entity);
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
+EXPORT_SYMBOL(drm_sched_entity_set_rq);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity)
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_fence *s_fence;
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
return false;
if (fence->context == entity->fence_context)
return true;
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched)
return true;
return false;
}
+EXPORT_SYMBOL(drm_sched_dependency_optimized);
-static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->sched;
struct dma_fence * fence = entity->dependency;
- struct amd_sched_fence *s_fence;
+ struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
@@ -304,7 +315,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
return false;
}
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
/*
@@ -315,7 +326,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
dma_fence_put(entity->dependency);
entity->dependency = fence;
if (!dma_fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ drm_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
@@ -324,48 +335,52 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ drm_sched_entity_wakeup))
return true;
dma_fence_put(entity->dependency);
return false;
}
-static struct amd_sched_job *
-amd_sched_entity_peek_job(struct amd_sched_entity *entity)
+static struct drm_sched_job *
+drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_job *sched_job;
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_job *sched_job = to_drm_sched_job(
+ spsc_queue_peek(&entity->job_queue));
- if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
+ if (!sched_job)
return NULL;
- while ((entity->dependency = sched->ops->dependency(sched_job)))
- if (amd_sched_entity_add_dependency_cb(entity))
+ while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
+ if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
+ /* skip jobs from entity that marked guilty */
+ if (entity->guilty && atomic_read(entity->guilty))
+ dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+
+ spsc_queue_pop(&entity->job_queue);
return sched_job;
}
/**
- * Helper to submit a job to the job queue
+ * Submit a job to the job queue
*
* @sched_job The pointer to job required to submit
*
- * Returns true if we could submit the job.
+ * Returns 0 for success, negative error code otherwise.
*/
-static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = sched_job->sched;
- struct amd_sched_entity *entity = sched_job->s_entity;
- bool added, first = false;
+ struct drm_gpu_scheduler *sched = sched_job->sched;
+ bool first = false;
- spin_lock(&entity->queue_lock);
- added = kfifo_in(&entity->job_queue, &sched_job,
- sizeof(sched_job)) == sizeof(sched_job);
+ trace_drm_sched_job(sched_job, entity);
- if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
- first = true;
+ spin_lock(&entity->queue_lock);
+ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
spin_unlock(&entity->queue_lock);
@@ -373,26 +388,26 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
- amd_sched_rq_add_entity(entity->rq, entity);
+ drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
- amd_sched_wakeup(sched);
+ drm_sched_wakeup(sched);
}
- return added;
}
+EXPORT_SYMBOL(drm_sched_entity_push_job);
/* job_finish is called after hw fence signaled
*/
-static void amd_sched_job_finish(struct work_struct *work)
+static void drm_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct amd_sched_job *next;
+ struct drm_sched_job *next;
spin_unlock(&sched->job_list_lock);
cancel_delayed_work_sync(&s_job->work_tdr);
@@ -400,7 +415,7 @@ static void amd_sched_job_finish(struct work_struct *work)
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
@@ -410,41 +425,43 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct dma_fence *f,
+static void drm_sched_job_finish_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
schedule_work(&job->finish_work);
}
-static void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
- amd_sched_job_finish_cb);
+ drm_sched_job_finish_cb);
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node) == s_job)
+ struct drm_sched_job, node) == s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
spin_unlock(&sched->job_list_lock);
}
-static void amd_sched_job_timedout(struct work_struct *work)
+static void drm_sched_job_timedout(struct work_struct *work)
{
- struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(work, struct drm_sched_job,
work_tdr.work);
job->sched->ops->timedout_job(job);
}
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
- struct amd_sched_job *s_job;
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *entity, *tmp;
+ int i;
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
@@ -457,31 +474,57 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
}
}
spin_unlock(&sched->job_list_lock);
-}
-void amd_sched_job_kickout(struct amd_sched_job *s_job)
-{
- struct amd_gpu_scheduler *sched = s_job->sched;
-
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- spin_unlock(&sched->job_list_lock);
+ if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+ atomic_inc(&bad->karma);
+ /* don't increase @bad's karma if it's from KERNEL RQ,
+ * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+ * corrupt but keep in mind that kernel jobs always considered good.
+ */
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ spin_lock(&rq->lock);
+ list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+ if (bad->s_fence->scheduled.context == entity->fence_context) {
+ if (atomic_read(&bad->karma) > bad->sched->hang_limit)
+ if (entity->guilty)
+ atomic_set(entity->guilty, 1);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+ if (&entity->list != &rq->entities)
+ break;
+ }
+ }
}
+EXPORT_SYMBOL(drm_sched_hw_job_reset);
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_job *s_job, *tmp;
+ struct drm_sched_job *s_job, *tmp;
+ bool found_guilty = false;
int r;
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
- struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
+ uint64_t guilty_context;
+
+ if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
+ found_guilty = true;
+ guilty_context = s_job->s_fence->scheduled.context;
+ }
+
+ if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+ dma_fence_set_error(&s_fence->finished, -ECANCELED);
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
@@ -489,62 +532,47 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- DRM_ERROR("Failed to run job!\n");
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
}
-
-/**
- * Submit a job to the job queue
- *
- * @sched_job The pointer to job required to submit
- *
- * Returns 0 for success, negative error code otherwise.
- */
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
-{
- struct amd_sched_entity *entity = sched_job->s_entity;
-
- trace_amd_sched_job(sched_job);
- wait_event(entity->sched->job_scheduled,
- amd_sched_entity_in(sched_job));
-}
+EXPORT_SYMBOL(drm_sched_job_recovery);
/* init a sched_job with basic field */
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
void *owner)
{
job->sched = sched;
- job->s_entity = entity;
- job->s_fence = amd_sched_fence_create(entity, owner);
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->s_fence = drm_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
- INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
+ INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
return 0;
}
+EXPORT_SYMBOL(drm_sched_job_init);
/**
* Return ture if we can push more jobs to the hw.
*/
-static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
return atomic_read(&sched->hw_rq_count) <
sched->hw_submission_limit;
@@ -553,27 +581,27 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
/**
* Wake up the scheduler when it is ready
*/
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
- if (amd_sched_ready(sched))
+ if (drm_sched_ready(sched))
wake_up_interruptible(&sched->wake_up_worker);
}
/**
* Select next entity to process
*/
-static struct amd_sched_entity *
-amd_sched_select_entity(struct amd_gpu_scheduler *sched)
+static struct drm_sched_entity *
+drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
int i;
- if (!amd_sched_ready(sched))
+ if (!drm_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
}
@@ -581,22 +609,22 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_fence *s_fence =
- container_of(cb, struct amd_sched_fence, cb);
- struct amd_gpu_scheduler *sched = s_fence->sched;
+ struct drm_sched_fence *s_fence =
+ container_of(cb, struct drm_sched_fence, cb);
+ struct drm_gpu_scheduler *sched = s_fence->sched;
dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
- trace_amd_sched_process_job(s_fence);
+ trace_drm_sched_process_job(s_fence);
dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
-static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
kthread_parkme();
@@ -606,58 +634,54 @@ static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
return false;
}
-static int amd_sched_main(void *param)
+static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
- struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
- int r, count;
+ struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
+ int r;
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity = NULL;
- struct amd_sched_fence *s_fence;
- struct amd_sched_job *sched_job;
+ struct drm_sched_entity *entity = NULL;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (!amd_sched_blocked(sched) &&
- (entity = amd_sched_select_entity(sched))) ||
+ (!drm_sched_blocked(sched) &&
+ (entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (!entity)
continue;
- sched_job = amd_sched_entity_peek_job(entity);
+ sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job)
continue;
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_begin(sched_job);
+ drm_sched_job_begin(sched_job);
fence = sched->ops->run_job(sched_job);
- amd_sched_fence_scheduled(s_fence);
+ drm_sched_fence_scheduled(s_fence);
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- DRM_ERROR("Failed to run job!\n");
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
- count = kfifo_out(&entity->job_queue, &sched_job,
- sizeof(sched_job));
- WARN_ON(count != sizeof(sched_job));
wake_up(&sched->job_scheduled);
}
return 0;
@@ -673,17 +697,21 @@ static int amd_sched_main(void *param)
*
* Return 0 on success, otherwise error code.
*/
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
- unsigned hw_submission, long timeout, const char *name)
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ unsigned hw_submission,
+ unsigned hang_limit,
+ long timeout,
+ const char *name)
{
int i;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
sched->timeout = timeout;
- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
- amd_sched_rq_init(&sched->sched_rq[i]);
+ sched->hang_limit = hang_limit;
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
+ drm_sched_rq_init(&sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -693,7 +721,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+ sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for %s.\n", name);
return PTR_ERR(sched->thread);
@@ -701,14 +729,16 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
return 0;
}
+EXPORT_SYMBOL(drm_sched_init);
/**
* Destroy a gpu scheduler
*
* @sched The pointer to the scheduler
*/
-void amd_sched_fini(struct amd_gpu_scheduler *sched)
+void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
}
+EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 33f54d0..69aab086 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -19,20 +19,20 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
-int amd_sched_fence_slab_init(void)
+static int __init drm_sched_fence_slab_init(void)
{
sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
@@ -40,36 +40,13 @@ int amd_sched_fence_slab_init(void)
return 0;
}
-void amd_sched_fence_slab_fini(void)
+static void __exit drm_sched_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(sched_fence_slab);
}
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
- void *owner)
-{
- struct amd_sched_fence *fence = NULL;
- unsigned seq;
-
- fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->owner = owner;
- fence->sched = entity->sched;
- spin_lock_init(&fence->lock);
-
- seq = atomic_inc_return(&entity->fence_seq);
- dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
-
- return fence;
-}
-
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->scheduled);
@@ -81,7 +58,7 @@ void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
"was already signaled\n");
}
-void amd_sched_fence_finished(struct amd_sched_fence *fence)
+void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->finished);
@@ -93,18 +70,18 @@ void amd_sched_fence_finished(struct amd_sched_fence *fence)
"was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
+static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
{
- return "amd_sched";
+ return "drm_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
+static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
+static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -116,10 +93,10 @@ static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
*
* Free up the fence memory after the RCU grace period.
*/
-static void amd_sched_fence_free(struct rcu_head *rcu)
+static void drm_sched_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
@@ -133,11 +110,11 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct dma_fence *f)
+static void drm_sched_fence_release_scheduled(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
- call_rcu(&fence->finished.rcu, amd_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free);
}
/**
@@ -147,27 +124,68 @@ static void amd_sched_fence_release_scheduled(struct dma_fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct dma_fence *f)
+static void drm_sched_fence_release_finished(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_scheduled,
+ .release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops amd_sched_fence_ops_finished = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_finished = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_finished,
+ .release = drm_sched_fence_release_finished,
};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
+{
+ if (f->ops == &drm_sched_fence_ops_scheduled)
+ return container_of(f, struct drm_sched_fence, scheduled);
+
+ if (f->ops == &drm_sched_fence_ops_finished)
+ return container_of(f, struct drm_sched_fence, finished);
+
+ return NULL;
+}
+EXPORT_SYMBOL(to_drm_sched_fence);
+
+struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
+ void *owner)
+{
+ struct drm_sched_fence *fence = NULL;
+ unsigned seq;
+
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->owner = owner;
+ fence->sched = entity->sched;
+ spin_lock_init(&fence->lock);
+
+ seq = atomic_inc_return(&entity->fence_seq);
+ dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
+
+ return fence;
+}
+
+module_init(drm_sched_fence_slab_init);
+module_exit(drm_sched_fence_slab_fini);
+
+MODULE_DESCRIPTION("DRM GPU scheduler");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 86eb4c1..7cc935d 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -682,6 +682,8 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
+ cond_resched();
}
ret = 0;
@@ -944,6 +946,8 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
+ cond_resched();
}
ret = 0;
@@ -1068,6 +1072,7 @@ static int igt_align(void *ignored)
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
cond_resched();
}
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index acd7286..cca4b3c 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -8,5 +8,6 @@ config DRM_STI
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
+ select OF
help
Choose this option to enable DRM on STM stiH4xx chipset
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index 2da7d68..7c5a783 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include "sti_awg_utils.h"
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.h b/drivers/gpu/drm/sti/sti_awg_utils.h
index 45d599b..258a568 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.h
+++ b/drivers/gpu/drm/sti/sti_awg_utils.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_AWG_UTILS_H_
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 6e4bf68..021b8fc 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 2952a2d..ac4bb38 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_COMPOSITOR_H_
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e8a4d48..21e50d7 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index 3f2d89a..d87c488 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_CRTC_H_
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index b709ebb..df0a282 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Vincent Abriou <vincent.abriou@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 2ee5c10..067feda 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2013
* Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_CURSOR_H_
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 9e93431..55b6967 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <drm/drmP.h>
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
@@ -138,16 +139,9 @@ static int sti_atomic_check(struct drm_device *dev,
return ret;
}
-static void sti_output_poll_changed(struct drm_device *ddev)
-{
- struct sti_private *private = ddev->dev_private;
-
- drm_fbdev_cma_hotplug_event(private->fbdev);
-}
-
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = sti_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = sti_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -230,11 +224,7 @@ static void sti_cleanup(struct drm_device *ddev)
{
struct sti_private *private = ddev->dev_private;
- if (private->fbdev) {
- drm_fbdev_cma_fini(private->fbdev);
- private->fbdev = NULL;
- }
-
+ drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
@@ -244,8 +234,6 @@ static void sti_cleanup(struct drm_device *ddev)
static int sti_bind(struct device *dev)
{
struct drm_device *ddev;
- struct sti_private *private;
- struct drm_fbdev_cma *fbdev;
int ret;
ddev = drm_dev_alloc(&sti_driver, dev);
@@ -266,15 +254,10 @@ static int sti_bind(struct device *dev)
drm_mode_config_reset(ddev);
- private = ddev->dev_private;
if (ddev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(ddev, 32,
- ddev->mode_config.num_connector);
- if (IS_ERR(fbdev)) {
+ ret = drm_fb_cma_fbdev_init(ddev, 32, 0);
+ if (ret)
DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n");
- fbdev = NULL;
- }
- private->fbdev = fbdev;
}
return 0;
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 6502ed2..4b41142 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_DRV_H_
@@ -24,7 +24,6 @@ struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
- struct drm_fbdev_cma *fbdev;
};
extern struct platform_driver sti_tvout_driver;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 83314ae..a5979cd 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index b65eea4..9b2c470 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 73947a4..d3e8ebf 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_GDP_H_
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index cf65e32..67bbdb4 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 30f02d2..58f4311 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
@@ -515,7 +515,9 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
DRM_DEBUG_DRIVER("\n");
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe, mode);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe,
+ hdmi->drm_connector,
+ mode);
if (ret < 0) {
/*
* Going into that statement does not means vendor infoframe
@@ -976,7 +978,6 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
- drm_edid_to_eld(connector, edid);
kfree(edid);
return count;
@@ -1414,6 +1415,11 @@ static int sti_hdmi_probe(struct platform_device *pdev)
init_waitqueue_head(&hdmi->wait_event);
hdmi->irq = platform_get_irq_byname(pdev, "irq");
+ if (hdmi->irq < 0) {
+ DRM_ERROR("Cannot get HDMI irq\n");
+ ret = hdmi->irq;
+ goto release_adapter;
+ }
ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index c6469b5..63a2494 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_HDMI_H_
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
index 8e0ceb0..01699af 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include "sti_hdmi_tx3g4c28phy.h"
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
index f99a7ff..d261947 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_HDMI_TX3G4C28PHY_H_
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b19b343..106be8c 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
index 619af7f..57cccd9 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp_lut.h
+++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_HQVDP_LUT_H_
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 2bd1d46..a4f45c7 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index e64a00e..4cb3cfd 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_MIXER_H_
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 427d8f5..b074609 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index c36c13f..b8d7fae 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_PLANE_H_
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 8959fcc..ea4a3b8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Vincent Abriou <vincent.abriou@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 577a334..2aac36c 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index fdc90f9..9dbd784 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_VID_H_
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 2dcba1d..6c42164 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -1,14 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* Vincent Abriou <vincent.abriou@st.com>
* for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
@@ -72,8 +73,6 @@
#define AWG_DELAY_ED (-8)
#define AWG_DELAY_SD (-7)
-static LIST_HEAD(vtg_lookup);
-
/*
* STI VTG register offset structure
*
@@ -123,42 +122,31 @@ struct sti_vtg_sync_params {
/**
* STI VTG structure
*
- * @dev: pointer to device driver
- * @np: device node
* @regs: register mapping
* @sync_params: synchronisation parameters used to generate timings
* @irq: VTG irq
* @irq_status: store the IRQ status value
* @notifier_list: notifier callback
* @crtc: the CRTC for vblank event
- * @link: List node to link the structure in lookup list
*/
struct sti_vtg {
- struct device *dev;
- struct device_node *np;
void __iomem *regs;
struct sti_vtg_sync_params sync_params[VTG_MAX_SYNC_OUTPUT];
int irq;
u32 irq_status;
struct raw_notifier_head notifier_list;
struct drm_crtc *crtc;
- struct list_head link;
};
-static void vtg_register(struct sti_vtg *vtg)
-{
- list_add_tail(&vtg->link, &vtg_lookup);
-}
-
struct sti_vtg *of_vtg_find(struct device_node *np)
{
- struct sti_vtg *vtg;
+ struct platform_device *pdev;
- list_for_each_entry(vtg, &vtg_lookup, link) {
- if (vtg->np == np)
- return vtg;
- }
- return NULL;
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return NULL;
+
+ return (struct sti_vtg *)platform_get_drvdata(pdev);
}
static void vtg_reset(struct sti_vtg *vtg)
@@ -397,9 +385,6 @@ static int vtg_probe(struct platform_device *pdev)
if (!vtg)
return -ENOMEM;
- vtg->dev = dev;
- vtg->np = pdev->dev.of_node;
-
/* Get Memory ressources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -428,16 +413,10 @@ static int vtg_probe(struct platform_device *pdev)
return ret;
}
- vtg_register(vtg);
platform_set_drvdata(pdev, vtg);
- DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev));
-
- return 0;
-}
+ DRM_INFO("%s %s\n", __func__, dev_name(dev));
-static int vtg_remove(struct platform_device *pdev)
-{
return 0;
}
@@ -454,7 +433,6 @@ struct platform_driver sti_vtg_driver = {
.of_match_table = vtg_of_match,
},
.probe = vtg_probe,
- .remove = vtg_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
index f1dcdf9..d177129 100644
--- a/drivers/gpu/drm/sti/sti_vtg.h
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2014
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STI_VTG_H_
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index c857663..9ab00a8 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
@@ -5,8 +6,6 @@
* Yannick Fertre <yannick.fertre@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* Mickael Reulier <mickael.reulier@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/component.h>
@@ -15,6 +14,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -24,27 +24,27 @@
#define STM_MAX_FB_WIDTH 2048
#define STM_MAX_FB_HEIGHT 2048 /* same as width to handle orientation */
-static void drv_output_poll_changed(struct drm_device *ddev)
-{
- struct ltdc_device *ldev = ddev->dev_private;
-
- drm_fbdev_cma_hotplug_event(ldev->fbdev);
-}
-
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drv_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
-static void drv_lastclose(struct drm_device *ddev)
+static int stm_gem_cma_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
- struct ltdc_device *ldev = ddev->dev_private;
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- DRM_DEBUG("%s\n", __func__);
+ /*
+ * in order to optimize data transfer, pitch is aligned on
+ * 128 bytes, height is aligned on 4 bytes
+ */
+ args->pitch = roundup(min_pitch, 128);
+ args->height = roundup(args->height, 4);
- drm_fbdev_cma_restore_mode(ldev->fbdev);
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
}
DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
@@ -52,7 +52,7 @@ DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drv_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -60,7 +60,7 @@ static struct drm_driver drv_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = stm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -79,7 +79,6 @@ static struct drm_driver drv_driver = {
static int drv_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
- struct drm_fbdev_cma *fbdev;
struct ltdc_device *ldev;
int ret;
@@ -112,14 +111,9 @@ static int drv_load(struct drm_device *ddev)
drm_kms_helper_poll_init(ddev);
if (ddev->mode_config.num_connector) {
- ldev = ddev->dev_private;
- fbdev = drm_fbdev_cma_init(ddev, 16,
- ddev->mode_config.num_connector);
- if (IS_ERR(fbdev)) {
+ ret = drm_fb_cma_fbdev_init(ddev, 16, 0);
+ if (ret)
DRM_DEBUG("Warning: fails to create fbdev\n");
- fbdev = NULL;
- }
- ldev->fbdev = fbdev;
}
platform_set_drvdata(pdev, ddev);
@@ -132,14 +126,9 @@ err:
static void drv_unload(struct drm_device *ddev)
{
- struct ltdc_device *ldev = ddev->dev_private;
-
DRM_DEBUG("%s\n", __func__);
- if (ldev->fbdev) {
- drm_fbdev_cma_fini(ldev->fbdev);
- ldev->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
drm_mode_config_cleanup(ddev);
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index e5b6310..a514b59 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <philippe.cornu@st.com>
* Yannick Fertre <yannick.fertre@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
@@ -15,7 +14,14 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <video/mipi_display.h>
-/* DSI wrapper register & bit definitions */
+#define HWVER_130 0x31333000 /* IP version 1.30 */
+#define HWVER_131 0x31333100 /* IP version 1.31 */
+
+/* DSI digital registers & bit definitions */
+#define DSI_VERSION 0x00
+#define VERSION GENMASK(31, 8)
+
+/* DSI wrapper registers & bit definitions */
/* Note: registers are named as in the Reference Manual */
#define DSI_WCFGR 0x0400 /* Wrapper ConFiGuration Reg */
#define WCFGR_DSIM BIT(0) /* DSI Mode */
@@ -66,6 +72,10 @@ enum dsi_color {
struct dw_mipi_dsi_stm {
void __iomem *base;
struct clk *pllref_clk;
+ struct dw_mipi_dsi *dsi;
+ u32 hw_version;
+ int lane_min_kbps;
+ int lane_max_kbps;
};
static inline void dsi_write(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 val)
@@ -122,18 +132,19 @@ static int dsi_pll_get_clkout_khz(int clkin_khz, int idf, int ndiv, int odf)
return DIV_ROUND_CLOSEST(clkin_khz * ndiv, divisor);
}
-static int dsi_pll_get_params(int clkin_khz, int clkout_khz,
+static int dsi_pll_get_params(struct dw_mipi_dsi_stm *dsi,
+ int clkin_khz, int clkout_khz,
int *idf, int *ndiv, int *odf)
{
int i, o, n, n_min, n_max;
int fvco_min, fvco_max, delta, best_delta; /* all in khz */
/* Early checks preventing division by 0 & odd results */
- if ((clkin_khz <= 0) || (clkout_khz <= 0))
+ if (clkin_khz <= 0 || clkout_khz <= 0)
return -EINVAL;
- fvco_min = LANE_MIN_KBPS * 2 * ODF_MAX;
- fvco_max = LANE_MAX_KBPS * 2 * ODF_MIN;
+ fvco_min = dsi->lane_min_kbps * 2 * ODF_MAX;
+ fvco_max = dsi->lane_max_kbps * 2 * ODF_MIN;
best_delta = 1000000; /* big started value (1000000khz) */
@@ -155,7 +166,7 @@ static int dsi_pll_get_params(int clkin_khz, int clkout_khz,
for (o = ODF_MIN; o <= ODF_MAX; o *= 2) {
n = DIV_ROUND_CLOSEST(i * o * clkout_khz, clkin_khz);
/* Check ndiv according to vco range */
- if ((n < n_min) || (n > n_max))
+ if (n < n_min || n > n_max)
continue;
/* Check if new delta is better & saves parameters */
delta = dsi_pll_get_clkout_khz(clkin_khz, i, n, o) -
@@ -213,6 +224,15 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
int ret, bpp;
u32 val;
+ /* Update lane capabilities according to hw version */
+ dsi->hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
+ dsi->lane_min_kbps = LANE_MIN_KBPS;
+ dsi->lane_max_kbps = LANE_MAX_KBPS;
+ if (dsi->hw_version == HWVER_131) {
+ dsi->lane_min_kbps *= 2;
+ dsi->lane_max_kbps *= 2;
+ }
+
pll_in_khz = (unsigned int)(clk_get_rate(dsi->pllref_clk) / 1000);
/* Compute requested pll out */
@@ -220,12 +240,12 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
pll_out_khz = mode->clock * bpp / lanes;
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
pll_out_khz = (pll_out_khz * 12) / 10;
- if (pll_out_khz > LANE_MAX_KBPS) {
- pll_out_khz = LANE_MAX_KBPS;
+ if (pll_out_khz > dsi->lane_max_kbps) {
+ pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
}
- if (pll_out_khz < LANE_MIN_KBPS) {
- pll_out_khz = LANE_MIN_KBPS;
+ if (pll_out_khz < dsi->lane_min_kbps) {
+ pll_out_khz = dsi->lane_min_kbps;
DRM_WARN("Warning min phy mbps is used\n");
}
@@ -233,7 +253,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
idf = 0;
ndiv = 0;
odf = 0;
- ret = dsi_pll_get_params(pll_in_khz, pll_out_khz, &idf, &ndiv, &odf);
+ ret = dsi_pll_get_params(dsi, pll_in_khz, pll_out_khz,
+ &idf, &ndiv, &odf);
if (ret)
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
@@ -291,11 +312,6 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Unable to get resource\n");
- return -ENODEV;
- }
-
dsi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->base)) {
DRM_ERROR("Unable to get dsi registers\n");
@@ -318,21 +334,24 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dw_mipi_dsi_stm_plat_data.base = dsi->base;
dw_mipi_dsi_stm_plat_data.priv_data = dsi;
- ret = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
- if (ret) {
+ platform_set_drvdata(pdev, dsi);
+
+ dsi->dsi = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
+ if (IS_ERR(dsi->dsi)) {
DRM_ERROR("Failed to initialize mipi dsi host\n");
clk_disable_unprepare(dsi->pllref_clk);
+ return PTR_ERR(dsi->dsi);
}
- return ret;
+ return 0;
}
static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
{
- struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+ struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
clk_disable_unprepare(dsi->pllref_clk);
- dw_mipi_dsi_remove(pdev);
+ dw_mipi_dsi_remove(dsi->dsi);
return 0;
}
@@ -342,7 +361,7 @@ static struct platform_driver dw_mipi_dsi_stm_driver = {
.remove = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
- .name = "dw_mipi_dsi-stm",
+ .name = "stm32-display-dsi",
},
};
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 735c908..1a3277e 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
@@ -5,8 +6,6 @@
* Yannick Fertre <yannick.fertre@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* Mickael Reulier <mickael.reulier@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
@@ -33,6 +32,8 @@
#define MAX_IRQ 4
+#define MAX_ENDPOINTS 2
+
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
@@ -174,6 +175,8 @@
#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
+#define CLUT_SIZE 256
+
#define CONSTA_MAX 0xFF /* CONSTant Alpha MAX= 1.0 */
#define BF1_PAXCA 0x600 /* Pixel Alpha x Constant Alpha */
#define BF1_CA 0x400 /* Constant Alpha */
@@ -325,6 +328,26 @@ static inline u32 to_drm_pixelformat(enum ltdc_pix_fmt pf)
}
}
+static inline u32 get_pixelformat_without_alpha(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_ARGB4444:
+ return DRM_FORMAT_XRGB4444;
+ case DRM_FORMAT_RGBA4444:
+ return DRM_FORMAT_RGBX4444;
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ default:
+ return 0;
+ }
+}
+
static irqreturn_t ltdc_irq_thread(int irq, void *arg)
{
struct drm_device *ddev = arg;
@@ -362,6 +385,28 @@ static irqreturn_t ltdc_irq(int irq, void *arg)
* DRM_CRTC
*/
+static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_color_lut *lut;
+ u32 val;
+ int i;
+
+ if (!crtc || !crtc->state)
+ return;
+
+ if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
+ return;
+
+ lut = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+
+ for (i = 0; i < CLUT_SIZE; i++, lut++) {
+ val = ((lut->red << 8) & 0xff0000) | (lut->green & 0xff00) |
+ (lut->blue >> 8) | (i << 24);
+ reg_write(ldev->regs, LTDC_L1CLUTWR, val);
+ }
+}
+
static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -403,12 +448,35 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
}
+static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ int rate = mode->clock * 1000;
+
+ /*
+ * TODO clk_round_rate() does not work yet. When ready, it can
+ * be used instead of clk_set_rate() then clk_get_rate().
+ */
+
+ clk_disable(ldev->pixel_clk);
+ if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
+ return false;
+ }
+ clk_enable(ldev->pixel_clk);
+
+ adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
+
+ return true;
+}
+
static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
- int rate = mode->clock * 1000;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
u32 val;
@@ -431,15 +499,6 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
total_width = accum_act_w + vm.hfront_porch;
total_height = accum_act_h + vm.vfront_porch;
- clk_disable(ldev->pixel_clk);
-
- if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
- return;
- }
-
- clk_enable(ldev->pixel_clk);
-
/* Configures the HS, VS, DE and PC polarities. Default Active Low */
val = 0;
@@ -485,6 +544,8 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("\n");
+ ltdc_crtc_update_clut(crtc);
+
/* Commit shadow registers = update planes at next vblank */
reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
@@ -501,6 +562,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+ .mode_fixup = ltdc_crtc_mode_fixup,
.mode_set_nofb = ltdc_crtc_mode_set_nofb,
.atomic_flush = ltdc_crtc_atomic_flush,
.atomic_enable = ltdc_crtc_atomic_enable,
@@ -532,6 +594,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
/*
@@ -556,7 +619,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
src_h = state->src_h >> 16;
/* Reject scaling */
- if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) {
+ if (src_w != state->crtc_w || src_h != state->crtc_h) {
DRM_ERROR("Scaling is not supported");
return -EINVAL;
}
@@ -637,6 +700,14 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
/* Specifies the blending factors */
val = BF1_PAXCA | BF2_1PAXCA;
+ if (!fb->format->has_alpha)
+ val = BF1_CA | BF2_1CA;
+
+ /* Manage hw-specific capabilities */
+ if (ldev->caps.non_alpha_only_l1 &&
+ plane->type != DRM_PLANE_TYPE_PRIMARY)
+ val = BF1_PAXCA | BF2_1PAXCA;
+
reg_update_bits(ldev->regs, LTDC_L1BFCR + lofs,
LXBFCR_BF2 | LXBFCR_BF1, val);
@@ -704,8 +775,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
struct device *dev = ddev->dev;
struct drm_plane *plane;
unsigned int i, nb_fmt = 0;
- u32 formats[NB_PF];
- u32 drm_fmt;
+ u32 formats[NB_PF * 2];
+ u32 drm_fmt, drm_fmt_no_alpha;
int ret;
/* Get supported pixel formats */
@@ -714,6 +785,18 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
if (!drm_fmt)
continue;
formats[nb_fmt++] = drm_fmt;
+
+ /* Add the no-alpha related format if any & supported */
+ drm_fmt_no_alpha = get_pixelformat_without_alpha(drm_fmt);
+ if (!drm_fmt_no_alpha)
+ continue;
+
+ /* Manage hw-specific capabilities */
+ if (ldev->caps.non_alpha_only_l1 &&
+ type != DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ formats[nb_fmt++] = drm_fmt_no_alpha;
}
plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
@@ -764,6 +847,9 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
drm_crtc_helper_add(crtc, &ltdc_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, CLUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, CLUT_SIZE);
+
DRM_DEBUG_DRIVER("CRTC:%d created\n", crtc->base.id);
/* Add planes. Note : the first layer is used by primary plane */
@@ -838,10 +924,19 @@ static int ltdc_get_caps(struct drm_device *ddev)
case HWVER_10300:
ldev->caps.reg_ofs = REG_OFS_NONE;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a0;
+ /*
+ * Hw older versions support non-alpha color formats derived
+ * from native alpha color formats only on the primary layer.
+ * For instance, RG16 native format without alpha works fine
+ * on 2nd layer but XR24 (derived color format from AR24)
+ * does not work on 2nd layer.
+ */
+ ldev->caps.non_alpha_only_l1 = true;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
+ ldev->caps.non_alpha_only_l1 = false;
break;
default:
return -ENODEV;
@@ -856,18 +951,33 @@ int ltdc_load(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
- struct drm_bridge *bridge;
- struct drm_panel *panel;
+ struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
+ struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
struct drm_crtc *crtc;
struct reset_control *rstc;
struct resource *res;
- int irq, ret, i;
+ int irq, ret, i, endpoint_not_ready = -ENODEV;
DRM_DEBUG_DRIVER("\n");
- ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
- if (ret)
- return ret;
+ /* Get endpoints if any */
+ for (i = 0; i < MAX_ENDPOINTS; i++) {
+ ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
+ &bridge[i]);
+
+ /*
+ * If at least one endpoint is ready, continue probing,
+ * else if at least one endpoint is -EPROBE_DEFER and
+ * there is no previous ready endpoints, defer probing.
+ */
+ if (!ret)
+ endpoint_not_ready = 0;
+ else if (ret == -EPROBE_DEFER && endpoint_not_ready)
+ endpoint_not_ready = -EPROBE_DEFER;
+ }
+
+ if (endpoint_not_ready)
+ return endpoint_not_ready;
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -885,12 +995,6 @@ int ltdc_load(struct drm_device *ddev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Unable to get resource\n");
- ret = -ENODEV;
- goto err;
- }
-
ldev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
@@ -928,19 +1032,25 @@ int ltdc_load(struct drm_device *ddev)
DRM_INFO("ltdc hw version 0x%08x - ready\n", ldev->caps.hw_version);
- if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge)) {
- DRM_ERROR("Failed to create panel-bridge\n");
- ret = PTR_ERR(bridge);
- goto err;
+ /* Add endpoints panels or bridges if any */
+ for (i = 0; i < MAX_ENDPOINTS; i++) {
+ if (panel[i]) {
+ bridge[i] = drm_panel_bridge_add(panel[i],
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge[i])) {
+ DRM_ERROR("panel-bridge endpoint %d\n", i);
+ ret = PTR_ERR(bridge[i]);
+ goto err;
+ }
}
- }
- ret = ltdc_encoder_init(ddev, bridge);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- goto err;
+ if (bridge[i]) {
+ ret = ltdc_encoder_init(ddev, bridge[i]);
+ if (ret) {
+ DRM_ERROR("init encoder endpoint %d\n", i);
+ goto err;
+ }
+ }
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -968,7 +1078,8 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- drm_panel_bridge_remove(bridge);
+ for (i = 0; i < MAX_ENDPOINTS; i++)
+ drm_panel_bridge_remove(bridge[i]);
clk_disable_unprepare(ldev->pixel_clk);
@@ -978,10 +1089,12 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
+ int i;
DRM_DEBUG_DRIVER("\n");
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, 0);
+ for (i = 0; i < MAX_ENDPOINTS; i++)
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
clk_disable_unprepare(ldev->pixel_clk);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index ae43755..edb2681 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics SA 2017
*
@@ -5,8 +6,6 @@
* Yannick Fertre <yannick.fertre@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* Mickael Reulier <mickael.reulier@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LTDC_H_
@@ -18,10 +17,10 @@ struct ltdc_caps {
u32 reg_ofs; /* register offset for applicable regs */
u32 bus_width; /* bus width (32 or 64 bits) */
const u32 *pix_fmt_hw; /* supported pixel formats */
+ bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
};
struct ltdc_device {
- struct drm_fbdev_cma *fbdev;
void __iomem *regs;
struct clk *pixel_clk; /* lcd pixel clock */
struct mutex err_lock; /* protecting error_status */
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 882d85d..eee6bc0 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -1,6 +1,6 @@
config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
- depends on DRM && ARM && COMMON_CLK
+ depends on DRM && (ARM || ARM64) && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
@@ -40,6 +40,15 @@ config DRM_SUN4I_BACKEND
do some alpha blending and feed graphics to TCON. If M is
selected the module will be called sun4i-backend.
+config DRM_SUN8I_DW_HDMI
+ tristate "Support for Allwinner version of DesignWare HDMI"
+ depends on DRM_SUN4I
+ select DRM_DW_HDMI
+ help
+ Choose this option if you have an Allwinner SoC with the
+ DesignWare HDMI controller with custom HDMI PHY. If M is
+ selected the module will be called sun8i_dw_hdmi.
+
config DRM_SUN8I_MIXER
tristate "Support for Allwinner Display Engine 2.0 Mixer"
default MACH_SUN8I
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 0c2f8c7..330843c 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
sun4i-backend-y += sun4i_backend.o sun4i_layer.o
+sun4i-frontend-y += sun4i_frontend.o
sun4i-drm-y += sun4i_drv.o
sun4i-drm-y += sun4i_framebuffer.o
@@ -9,10 +10,17 @@ sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
-sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
+sun8i-drm-hdmi-y += sun8i_dw_hdmi.o
+sun8i-drm-hdmi-y += sun8i_hdmi_phy.o
+sun8i-drm-hdmi-y += sun8i_hdmi_phy_clk.o
+
+sun8i-mixer-y += sun8i_mixer.o sun8i_ui_layer.o \
+ sun8i_vi_layer.o sun8i_ui_scaler.o \
+ sun8i_vi_scaler.o sun8i_csc.o
sun4i-tcon-y += sun4i_crtc.o
sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_lvds.o
sun4i-tcon-y += sun4i_tcon.o
sun4i-tcon-y += sun4i_rgb.o
@@ -21,6 +29,7 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
+obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 847eecb..9bad54f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -11,6 +11,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -26,6 +27,7 @@
#include "sun4i_backend.h"
#include "sun4i_drv.h"
+#include "sun4i_frontend.h"
#include "sun4i_layer.h"
#include "sunxi_engine.h"
@@ -40,6 +42,56 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
};
+/*
+ * These coefficients are taken from the A33 BSP from Allwinner.
+ *
+ * The formula is for each component, each coefficient being multiplied by
+ * 1024 and each constant being multiplied by 16:
+ * G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
+ * R = 1.164 * Y + 1.596 * V - 222
+ * B = 1.164 * Y + 2.018 * U + 276
+ *
+ * This seems to be a conversion from Y[16:235] UV[16:240] to RGB[0:255],
+ * following the BT601 spec.
+ */
+static const u32 sunxi_bt601_yuv2rgb_coef[12] = {
+ 0x000004a7, 0x00001e6f, 0x00001cbf, 0x00000877,
+ 0x000004a7, 0x00000000, 0x00000662, 0x00003211,
+ 0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
+};
+
+static inline bool sun4i_backend_format_is_planar_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static inline bool sun4i_backend_format_is_yuv(uint32_t format)
+{
+ return sun4i_backend_format_is_planar_yuv(format) ||
+ sun4i_backend_format_is_packed_yuv422(format);
+}
+
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -90,13 +142,8 @@ void sun4i_backend_layer_enable(struct sun4i_backend *backend,
SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
}
-static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
- u32 format, u32 *mode)
+static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
{
- if ((plane->type == DRM_PLANE_TYPE_PRIMARY) &&
- (format == DRM_FORMAT_ARGB8888))
- format = DRM_FORMAT_XRGB8888;
-
switch (format) {
case DRM_FORMAT_ARGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
@@ -141,7 +188,6 @@ int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
@@ -153,12 +199,6 @@ int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
state->crtc_h));
}
- /* Set the line width */
- DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
- regmap_write(backend->engine.regs,
- SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
- fb->pitches[0] * 8);
-
/* Set height and width */
DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
@@ -176,6 +216,61 @@ int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
return 0;
}
+static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ uint32_t format = fb->format->format;
+ u32 val = SUN4I_BACKEND_IYUVCTL_EN;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
+ regmap_write(backend->engine.regs,
+ SUN4I_BACKEND_YGCOEF_REG(i),
+ sunxi_bt601_yuv2rgb_coef[i]);
+
+ /*
+ * We should do that only for a single plane, but the
+ * framebuffer's atomic_check has our back on this.
+ */
+ regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
+
+ /* TODO: Add support for the multi-planar YUV formats */
+ if (sun4i_backend_format_is_packed_yuv422(format))
+ val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
+ else
+ DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", format);
+
+ /*
+ * Allwinner seems to list the pixel sequence from right to left, while
+ * DRM lists it from left to right.
+ */
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
+ break;
+ case DRM_FORMAT_YVYU:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
+ break;
+ case DRM_FORMAT_UYVY:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
+ break;
+ case DRM_FORMAT_VYUY:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
+ format);
+ }
+
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
+
+ return 0;
+}
+
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -185,6 +280,10 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
u32 val;
int ret;
+ /* Clear the YUV mode */
+ regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+
if (plane->state->crtc)
interlaced = plane->state->crtc->state->adjusted_mode.flags
& DRM_MODE_FLAG_INTERLACE;
@@ -196,8 +295,10 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
interlaced ? "on" : "off");
- ret = sun4i_backend_drm_format_to_layer(plane, fb->format->format,
- &val);
+ if (sun4i_backend_format_is_yuv(fb->format->format))
+ return sun4i_backend_update_yuv_format(backend, layer, plane);
+
+ ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
return ret;
@@ -210,6 +311,45 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
return 0;
}
+int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
+ int layer, uint32_t fmt)
+{
+ u32 val;
+ int ret;
+
+ ret = sun4i_backend_drm_format_to_layer(fmt, &val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid format\n");
+ return ret;
+ }
+
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
+
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_ATTCTL_REG1(layer),
+ SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
+
+ return 0;
+}
+
+static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
+ struct drm_framebuffer *fb,
+ dma_addr_t paddr)
+{
+ /* TODO: Add support for the multi-planar YUV formats */
+ DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
+
+ DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
+ fb->pitches[0] * 8);
+
+ return 0;
+}
+
int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -218,6 +358,12 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
u32 lo_paddr, hi_paddr;
dma_addr_t paddr;
+ /* Set the line width */
+ DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
+ regmap_write(backend->engine.regs,
+ SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
+ fb->pitches[0] * 8);
+
/* Get the start of the displayed memory */
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
@@ -229,6 +375,9 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
*/
paddr -= PHYS_OFFSET;
+ if (sun4i_backend_format_is_yuv(fb->format->format))
+ return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
+
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
@@ -246,6 +395,225 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
return 0;
}
+int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
+ struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
+ unsigned int priority = state->normalized_zpos;
+ unsigned int pipe = p_state->pipe;
+
+ DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
+ layer, priority, pipe);
+ regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
+
+ return 0;
+}
+
+static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
+{
+ u16 src_h = state->src_h >> 16;
+ u16 src_w = state->src_w >> 16;
+
+ DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
+ src_w, src_h, state->crtc_w, state->crtc_h);
+
+ if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
+ return true;
+
+ return false;
+}
+
+static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
+{
+ struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
+ struct sun4i_backend *backend = layer->backend;
+
+ if (IS_ERR(backend->frontend))
+ return false;
+
+ return sun4i_backend_plane_uses_scaler(state);
+}
+
+static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
+ struct drm_crtc_state *old_state)
+{
+ u32 val;
+
+ WARN_ON(regmap_read_poll_timeout(engine->regs,
+ SUN4I_BACKEND_REGBUFFCTL_REG,
+ val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
+ 100, 50000));
+}
+
+static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_device *drm = state->dev;
+ struct drm_plane *plane;
+ unsigned int num_planes = 0;
+ unsigned int num_alpha_planes = 0;
+ unsigned int num_frontend_planes = 0;
+ unsigned int num_yuv_planes = 0;
+ unsigned int current_pipe = 0;
+ unsigned int i;
+
+ DRM_DEBUG_DRIVER("Starting checking our planes\n");
+
+ if (!crtc_state->planes_changed)
+ return 0;
+
+ drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct sun4i_layer_state *layer_state =
+ state_to_sun4i_layer_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_format_name_buf format_name;
+
+ if (sun4i_backend_plane_uses_frontend(plane_state)) {
+ DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
+ plane->index);
+
+ layer_state->uses_frontend = true;
+ num_frontend_planes++;
+ } else {
+ layer_state->uses_frontend = false;
+ }
+
+ DRM_DEBUG_DRIVER("Plane FB format is %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ if (fb->format->has_alpha)
+ num_alpha_planes++;
+
+ if (sun4i_backend_format_is_yuv(fb->format->format)) {
+ DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+ num_yuv_planes++;
+ }
+
+ DRM_DEBUG_DRIVER("Plane zpos is %d\n",
+ plane_state->normalized_zpos);
+
+ /* Sort our planes by Zpos */
+ plane_states[plane_state->normalized_zpos] = plane_state;
+
+ num_planes++;
+ }
+
+ /* All our planes were disabled, bail out */
+ if (!num_planes)
+ return 0;
+
+ /*
+ * The hardware is a bit unusual here.
+ *
+ * Even though it supports 4 layers, it does the composition
+ * in two separate steps.
+ *
+ * The first one is assigning a layer to one of its two
+ * pipes. If more that 1 layer is assigned to the same pipe,
+ * and if pixels overlaps, the pipe will take the pixel from
+ * the layer with the highest priority.
+ *
+ * The second step is the actual alpha blending, that takes
+ * the two pipes as input, and uses the eventual alpha
+ * component to do the transparency between the two.
+ *
+ * This two steps scenario makes us unable to guarantee a
+ * robust alpha blending between the 4 layers in all
+ * situations, since this means that we need to have one layer
+ * with alpha at the lowest position of our two pipes.
+ *
+ * However, we cannot even do that, since the hardware has a
+ * bug where the lowest plane of the lowest pipe (pipe 0,
+ * priority 0), if it has any alpha, will discard the pixel
+ * entirely and just display the pixels in the background
+ * color (black by default).
+ *
+ * This means that we effectively have only three valid
+ * configurations with alpha, all of them with the alpha being
+ * on pipe1 with the lowest position, which can be 1, 2 or 3
+ * depending on the number of planes and their zpos.
+ */
+ if (num_alpha_planes > SUN4I_BACKEND_NUM_ALPHA_LAYERS) {
+ DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
+ return -EINVAL;
+ }
+
+ /* We can't have an alpha plane at the lowest position */
+ if (plane_states[0]->fb->format->has_alpha)
+ return -EINVAL;
+
+ for (i = 1; i < num_planes; i++) {
+ struct drm_plane_state *p_state = plane_states[i];
+ struct drm_framebuffer *fb = p_state->fb;
+ struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
+
+ /*
+ * The only alpha position is the lowest plane of the
+ * second pipe.
+ */
+ if (fb->format->has_alpha)
+ current_pipe++;
+
+ s_state->pipe = current_pipe;
+ }
+
+ /* We can only have a single YUV plane at a time */
+ if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
+ DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
+ return -EINVAL;
+ }
+
+ if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
+ DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
+ num_planes, num_alpha_planes, num_frontend_planes,
+ num_yuv_planes);
+
+ return 0;
+}
+
+static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
+{
+ struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
+ struct sun4i_frontend *frontend = backend->frontend;
+
+ if (!frontend)
+ return;
+
+ /*
+ * In a teardown scenario with the frontend involved, we have
+ * to keep the frontend enabled until the next vblank, and
+ * only then disable it.
+ *
+ * This is due to the fact that the backend will not take into
+ * account the new configuration (with the plane that used to
+ * be fed by the frontend now disabled) until we write to the
+ * commit bit and the hardware fetches the new configuration
+ * during the next vblank.
+ *
+ * So we keep the frontend around in order to prevent any
+ * visual artifacts.
+ */
+ spin_lock(&backend->frontend_lock);
+ if (backend->frontend_teardown) {
+ sun4i_frontend_exit(frontend);
+ backend->frontend_teardown = false;
+ }
+ spin_unlock(&backend->frontend_lock);
+};
+
static int sun4i_backend_init_sat(struct device *dev) {
struct sun4i_backend *backend = dev_get_drvdata(dev);
int ret;
@@ -330,11 +698,43 @@ static int sun4i_backend_of_get_id(struct device_node *node)
return ret;
}
+/* TODO: This needs to take multiple pipelines into account */
+static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
+ struct device_node *node)
+{
+ struct device_node *port, *ep, *remote;
+ struct sun4i_frontend *frontend;
+
+ port = of_graph_get_port_by_id(node, 0);
+ if (!port)
+ return ERR_PTR(-EINVAL);
+
+ for_each_available_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote)
+ continue;
+
+ /* does this node match any registered engines? */
+ list_for_each_entry(frontend, &drv->frontend_list, list) {
+ if (remote == frontend->node) {
+ of_node_put(remote);
+ of_node_put(port);
+ return frontend;
+ }
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
+ .atomic_begin = sun4i_backend_atomic_begin,
+ .atomic_check = sun4i_backend_atomic_check,
.commit = sun4i_backend_commit,
.layers_init = sun4i_layers_init,
.apply_color_correction = sun4i_backend_apply_color_correction,
.disable_color_correction = sun4i_backend_disable_color_correction,
+ .vblank_quirk = sun4i_backend_vblank_quirk,
};
static struct regmap_config sun4i_backend_regmap_config = {
@@ -360,6 +760,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (!backend)
return -ENOMEM;
dev_set_drvdata(dev, backend);
+ spin_lock_init(&backend->frontend_lock);
backend->engine.node = dev->of_node;
backend->engine.ops = &sun4i_backend_engine_ops;
@@ -367,6 +768,10 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (backend->engine.id < 0)
return backend->engine.id;
+ backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
+ if (IS_ERR(backend->frontend))
+ dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs))
@@ -531,6 +936,9 @@ static const struct sun4i_backend_quirks sun7i_backend_quirks = {
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
};
+static const struct sun4i_backend_quirks sun9i_backend_quirks = {
+};
+
static const struct of_device_id sun4i_backend_of_table[] = {
{
.compatible = "allwinner,sun4i-a10-display-backend",
@@ -552,6 +960,10 @@ static const struct of_device_id sun4i_backend_of_table[] = {
.compatible = "allwinner,sun8i-a33-display-backend",
.data = &sun8i_a33_backend_quirks,
},
+ {
+ .compatible = "allwinner,sun9i-a80-display-backend",
+ .data = &sun9i_backend_quirks,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index ac3cc02..316f217 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -72,6 +72,8 @@
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x) ((x) << 15)
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK GENMASK(11, 10)
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x) ((x) << 10)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN BIT(2)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN BIT(1)
#define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l)))
#define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT GENMASK(15, 14)
@@ -109,9 +111,27 @@
#define SUN4I_BACKEND_SPREN_REG 0x900
#define SUN4I_BACKEND_SPRFMTCTL_REG 0x908
#define SUN4I_BACKEND_SPRALPHACTL_REG 0x90c
+
#define SUN4I_BACKEND_IYUVCTL_REG 0x920
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_MASK GENMASK(14, 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV444 (4 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422 (3 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV444 (2 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV222 (1 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV111 (0 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_MASK GENMASK(9, 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_YVYU (3 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_VYUY (2 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_YUYV (1 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_UYVY (0 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_VUYA (1 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_AYUV (0 << 8)
+#define SUN4I_BACKEND_IYUVCTL_EN BIT(0)
+
#define SUN4I_BACKEND_IYUVADD_REG(c) (0x930 + (0x4 * (c)))
-#define SUN4I_BACKEND_IYUVLINEWITDTH_REG(c) (0x940 + (0x4 * (c)))
+
+#define SUN4I_BACKEND_IYUVLINEWIDTH_REG(c) (0x940 + (0x4 * (c)))
+
#define SUN4I_BACKEND_YGCOEF_REG(c) (0x950 + (0x4 * (c)))
#define SUN4I_BACKEND_YGCONS_REG 0x95c
#define SUN4I_BACKEND_URCOEF_REG(c) (0x960 + (0x4 * (c)))
@@ -143,8 +163,14 @@
#define SUN4I_BACKEND_HWCCOLORTAB_OFF 0x4c00
#define SUN4I_BACKEND_PIPE_OFF(p) (0x5000 + (0x400 * (p)))
+#define SUN4I_BACKEND_NUM_LAYERS 4
+#define SUN4I_BACKEND_NUM_ALPHA_LAYERS 1
+#define SUN4I_BACKEND_NUM_FRONTEND_LAYERS 1
+#define SUN4I_BACKEND_NUM_YUV_PLANES 1
+
struct sun4i_backend {
struct sunxi_engine engine;
+ struct sun4i_frontend *frontend;
struct reset_control *reset;
@@ -154,6 +180,10 @@ struct sun4i_backend {
struct clk *sat_clk;
struct reset_control *sat_reset;
+
+ /* Protects against races in the frontend teardown */
+ spinlock_t frontend_lock;
+ bool frontend_teardown;
};
static inline struct sun4i_backend *
@@ -170,5 +200,9 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
+int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
+ int layer, uint32_t in_fmt);
+int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane);
#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 5decae0..2d7c574 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -25,6 +25,7 @@
#include <video/videomode.h>
+#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sunxi_engine.h"
@@ -46,11 +47,25 @@ static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
return NULL;
}
+static int sun4i_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+ struct sunxi_engine *engine = scrtc->engine;
+ int ret = 0;
+
+ if (engine && engine->ops && engine->ops->atomic_check)
+ ret = engine->ops->atomic_check(engine, state);
+
+ return ret;
+}
+
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct sunxi_engine *engine = scrtc->engine;
unsigned long flags;
if (crtc->state->event) {
@@ -60,7 +75,10 @@ static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
scrtc->event = crtc->state->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
- }
+ }
+
+ if (engine->ops->atomic_begin)
+ engine->ops->atomic_begin(engine, old_state);
}
static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -93,6 +111,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
+ drm_crtc_vblank_off(crtc);
+
sun4i_tcon_set_status(scrtc->tcon, encoder, false);
if (crtc->state->event && !crtc->state->active) {
@@ -113,6 +133,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
sun4i_tcon_set_status(scrtc->tcon, encoder, true);
+
+ drm_crtc_vblank_on(crtc);
}
static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -125,6 +147,7 @@ static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
+ .atomic_check = sun4i_crtc_atomic_check,
.atomic_begin = sun4i_crtc_atomic_begin,
.atomic_flush = sun4i_crtc_atomic_flush,
.atomic_enable = sun4i_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index d401156..e36004f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -17,8 +17,9 @@
#include "sun4i_dotclock.h"
struct sun4i_dclk {
- struct clk_hw hw;
- struct regmap *regmap;
+ struct clk_hw hw;
+ struct regmap *regmap;
+ struct sun4i_tcon *tcon;
};
static inline struct sun4i_dclk *hw_to_dclk(struct clk_hw *hw)
@@ -73,11 +74,13 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ struct sun4i_tcon *tcon = dclk->tcon;
unsigned long best_parent = 0;
u8 best_div = 1;
int i;
- for (i = 6; i <= 127; i++) {
+ for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
unsigned long ideal = rate * i;
unsigned long rounded;
@@ -129,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u32 val = degrees / 120;
+
+ val <<= 28;
regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
GENMASK(29, 28),
- degrees / 120);
+ val);
return 0;
}
@@ -167,6 +173,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
if (!dclk)
return -ENOMEM;
+ dclk->tcon = tcon;
init.name = clk_name;
init.ops = &sun4i_dclk_ops;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 75c76cd..50d1960 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -23,23 +23,17 @@
#include <drm/drm_of.h>
#include "sun4i_drv.h"
+#include "sun4i_frontend.h"
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
-static void sun4i_drv_lastclose(struct drm_device *dev)
-{
- struct sun4i_drv *drv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(drv->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
- .lastclose = sun4i_drv_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
@@ -98,6 +92,7 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm;
}
drm->dev_private = drv;
+ INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
INIT_LIST_HEAD(&drv->tcon_list);
@@ -118,7 +113,7 @@ static int sun4i_drv_bind(struct device *dev)
/* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret)
- goto free_mem_region;
+ goto cleanup_mode_config;
drm->irq_enabled = true;
@@ -126,10 +121,9 @@ static int sun4i_drv_bind(struct device *dev)
sun4i_remove_framebuffers();
/* Create our framebuffer */
- drv->fbdev = sun4i_framebuffer_init(drm);
- if (IS_ERR(drv->fbdev)) {
+ ret = sun4i_framebuffer_init(drm);
+ if (ret) {
dev_err(drm->dev, "Couldn't create our framebuffer\n");
- ret = PTR_ERR(drv->fbdev);
goto cleanup_mode_config;
}
@@ -147,7 +141,6 @@ finish_poll:
sun4i_framebuffer_free(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
-free_mem_region:
of_reserved_mem_device_release(dev);
free_drm:
drm_dev_unref(drm);
@@ -182,18 +175,26 @@ static bool sun4i_drv_node_is_frontend(struct device_node *node)
of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
- of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
+ of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun9i-a80-display-frontend");
+}
+
+static bool sun4i_drv_node_is_deu(struct device_node *node)
+{
+ return of_device_is_compatible(node, "allwinner,sun9i-a80-deu");
+}
+
+static bool sun4i_drv_node_is_supported_frontend(struct device_node *node)
+{
+ if (IS_ENABLED(CONFIG_DRM_SUN4I_BACKEND))
+ return !!of_match_node(sun4i_frontend_of_table, node);
+
+ return false;
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") ||
- of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
- of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
- of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
- of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") ||
- of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") ||
- of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon");
+ return !!of_match_node(sun4i_tcon_of_table, node);
}
static int compare_of(struct device *dev, void *data)
@@ -239,9 +240,11 @@ static int sun4i_drv_add_endpoints(struct device *dev,
int count = 0;
/*
- * We don't support the frontend for now, so we will never
- * have a device bound. Just skip over it, but we still want
- * the rest our pipeline to be added.
+ * The frontend has been disabled in some of our old device
+ * trees. If we find a node that is the frontend and is
+ * disabled, we should just follow through and parse its
+ * child, but without adding it to the component list.
+ * Otherwise, we obviously want to add it to the list.
*/
if (!sun4i_drv_node_is_frontend(node) &&
!of_device_is_available(node))
@@ -254,7 +257,15 @@ static int sun4i_drv_add_endpoints(struct device *dev,
if (sun4i_drv_node_is_connector(node))
return 0;
- if (!sun4i_drv_node_is_frontend(node)) {
+ /*
+ * If the device is either just a regular device, or an
+ * enabled frontend supported by the driver, we add it to our
+ * component list.
+ */
+ if (!(sun4i_drv_node_is_frontend(node) ||
+ sun4i_drv_node_is_deu(node)) ||
+ (sun4i_drv_node_is_supported_frontend(node) &&
+ of_device_is_available(node))) {
/* Add current component */
DRM_DEBUG_DRIVER("Adding component %pOF\n", node);
drm_of_component_match_add(dev, match, compare_of, node);
@@ -353,7 +364,10 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun7i-a20-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
+ { .compatible = "allwinner,sun8i-a83t-display-engine" },
+ { .compatible = "allwinner,sun8i-h3-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
+ { .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
index a960c89..5750b8c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.h
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -19,9 +19,8 @@
struct sun4i_drv {
struct list_head engine_list;
+ struct list_head frontend_list;
struct list_head tcon_list;
-
- struct drm_fbdev_cma *fbdev;
};
#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 2992f0a..5f29850 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -10,7 +10,9 @@
* the License, or (at your option) any later version.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
@@ -18,21 +20,34 @@
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
-static void sun4i_de_output_poll_changed(struct drm_device *drm)
+static int sun4i_de_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- struct sun4i_drv *drv = drm->dev_private;
+ int ret;
- drm_fbdev_cma_hotplug_event(drv->fbdev);
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ return drm_atomic_helper_check_planes(dev, state);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
- .output_poll_changed = sun4i_de_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = sun4i_de_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
-struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
+static struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+int sun4i_framebuffer_init(struct drm_device *drm)
{
drm_mode_config_reset(drm);
@@ -40,13 +55,12 @@ struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
drm->mode_config.max_height = 8192;
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
+ drm->mode_config.helper_private = &sun4i_de_mode_config_helpers;
- return drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector);
+ return drm_fb_cma_fbdev_init(drm, 32, 0);
}
void sun4i_framebuffer_free(struct drm_device *drm)
{
- struct sun4i_drv *drv = drm->dev_private;
-
- drm_fbdev_cma_fini(drv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
index 3afd652..7ef0aed 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -13,7 +13,7 @@
#ifndef _SUN4I_FRAMEBUFFER_H_
#define _SUN4I_FRAMEBUFFER_H_
-struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm);
+int sun4i_framebuffer_init(struct drm_device *drm);
void sun4i_framebuffer_free(struct drm_device *drm);
#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
new file mode 100644
index 0000000..ddf6cfa
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "sun4i_drv.h"
+#include "sun4i_frontend.h"
+
+static const u32 sun4i_frontend_vert_coef[32] = {
+ 0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
+ 0x00063efc, 0xff083dfc, 0x000a3bfb, 0xff0d39fb,
+ 0xff0f37fb, 0xff1136fa, 0xfe1433fb, 0xfe1631fb,
+ 0xfd192ffb, 0xfd1c2cfb, 0xfd1f29fb, 0xfc2127fc,
+ 0xfc2424fc, 0xfc2721fc, 0xfb291ffd, 0xfb2c1cfd,
+ 0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfa3611ff,
+ 0xfb370fff, 0xfb390dff, 0xfb3b0a00, 0xfc3d08ff,
+ 0xfc3e0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
+};
+
+static const u32 sun4i_frontend_horz_coef[64] = {
+ 0x40000000, 0x00000000, 0x40fe0000, 0x0000ff03,
+ 0x3ffd0000, 0x0000ff05, 0x3ffc0000, 0x0000ff06,
+ 0x3efb0000, 0x0000ff08, 0x3dfb0000, 0x0000ff09,
+ 0x3bfa0000, 0x0000fe0d, 0x39fa0000, 0x0000fe0f,
+ 0x38fa0000, 0x0000fe10, 0x36fa0000, 0x0000fe12,
+ 0x33fa0000, 0x0000fd16, 0x31fa0000, 0x0000fd18,
+ 0x2ffa0000, 0x0000fd1a, 0x2cfa0000, 0x0000fc1e,
+ 0x29fa0000, 0x0000fc21, 0x27fb0000, 0x0000fb23,
+ 0x24fb0000, 0x0000fb26, 0x21fb0000, 0x0000fb29,
+ 0x1ffc0000, 0x0000fa2b, 0x1cfc0000, 0x0000fa2e,
+ 0x19fd0000, 0x0000fa30, 0x16fd0000, 0x0000fa33,
+ 0x14fd0000, 0x0000fa35, 0x11fe0000, 0x0000fa37,
+ 0x0ffe0000, 0x0000fa39, 0x0dfe0000, 0x0000fa3b,
+ 0x0afe0000, 0x0000fa3e, 0x08ff0000, 0x0000fb3e,
+ 0x06ff0000, 0x0000fb40, 0x05ff0000, 0x0000fc40,
+ 0x03ff0000, 0x0000fd41, 0x01ff0000, 0x0000fe42,
+};
+
+static void sun4i_frontend_scaler_init(struct sun4i_frontend *frontend)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZCOEF0_REG(i),
+ sun4i_frontend_horz_coef[2 * i]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZCOEF0_REG(i),
+ sun4i_frontend_horz_coef[2 * i]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZCOEF1_REG(i),
+ sun4i_frontend_horz_coef[2 * i + 1]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZCOEF1_REG(i),
+ sun4i_frontend_horz_coef[2 * i + 1]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTCOEF_REG(i),
+ sun4i_frontend_vert_coef[i]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTCOEF_REG(i),
+ sun4i_frontend_vert_coef[i]);
+ }
+
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
+ SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL,
+ SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL);
+}
+
+int sun4i_frontend_init(struct sun4i_frontend *frontend)
+{
+ return pm_runtime_get_sync(frontend->dev);
+}
+EXPORT_SYMBOL(sun4i_frontend_init);
+
+void sun4i_frontend_exit(struct sun4i_frontend *frontend)
+{
+ pm_runtime_put(frontend->dev);
+}
+EXPORT_SYMBOL(sun4i_frontend_exit);
+
+void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
+ struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t paddr;
+
+ /* Set the line width */
+ DRM_DEBUG_DRIVER("Frontend stride: %d bytes\n", fb->pitches[0]);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_LINESTRD0_REG,
+ fb->pitches[0]);
+
+ /* Set the physical address of the buffer in memory */
+ paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
+ paddr -= PHYS_OFFSET;
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
+}
+EXPORT_SYMBOL(sun4i_frontend_update_buffer);
+
+static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
+{
+ switch (fmt) {
+ case DRM_FORMAT_ARGB8888:
+ *val = 5;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ *val = 2;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
+ struct drm_plane *plane, uint32_t out_fmt)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ u32 out_fmt_val;
+ u32 in_fmt_val;
+ int ret;
+
+ ret = sun4i_frontend_drm_format_to_input_fmt(fb->format->format,
+ &in_fmt_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid input format\n");
+ return ret;
+ }
+
+ ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid output format\n");
+ return ret;
+ }
+
+ /*
+ * I have no idea what this does exactly, but it seems to be
+ * related to the scaler FIR filter phase parameters.
+ */
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG, 0x400);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG, 0x400);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG, 0x400);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG, 0x400);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
+
+ regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
+ SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(1) |
+ SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(in_fmt_val) |
+ SUN4I_FRONTEND_INPUT_FMT_PS(1));
+
+ /*
+ * TODO: It look like the A31 and A80 at least will need the
+ * bit 7 (ALPHA_EN) enabled when using a format with alpha (so
+ * ARGB8888).
+ */
+ regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
+ SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(out_fmt_val));
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_frontend_update_formats);
+
+void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
+ struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+
+ /* Set height and width */
+ DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
+ state->crtc_w, state->crtc_h);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
+ SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
+ state->src_w >> 16));
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
+ SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
+ state->src_w >> 16));
+
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
+ SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_OUTSIZE_REG,
+ SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
+
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
+ state->src_w / state->crtc_w);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
+ state->src_w / state->crtc_w);
+
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
+ state->src_h / state->crtc_h);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
+ state->src_h / state->crtc_h);
+
+ regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
+ SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
+ SUN4I_FRONTEND_FRM_CTRL_REG_RDY);
+}
+EXPORT_SYMBOL(sun4i_frontend_update_coord);
+
+int sun4i_frontend_enable(struct sun4i_frontend *frontend)
+{
+ regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
+ SUN4I_FRONTEND_FRM_CTRL_FRM_START,
+ SUN4I_FRONTEND_FRM_CTRL_FRM_START);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_frontend_enable);
+
+static struct regmap_config sun4i_frontend_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x0a14,
+};
+
+static int sun4i_frontend_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sun4i_frontend *frontend;
+ struct drm_device *drm = data;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct resource *res;
+ void __iomem *regs;
+
+ frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
+ if (!frontend)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, frontend);
+ frontend->dev = dev;
+ frontend->node = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ frontend->regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_frontend_regmap_config);
+ if (IS_ERR(frontend->regs)) {
+ dev_err(dev, "Couldn't create the frontend regmap\n");
+ return PTR_ERR(frontend->regs);
+ }
+
+ frontend->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(frontend->reset)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(frontend->reset);
+ }
+
+ frontend->bus_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(frontend->bus_clk)) {
+ dev_err(dev, "Couldn't get our bus clock\n");
+ return PTR_ERR(frontend->bus_clk);
+ }
+
+ frontend->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(frontend->mod_clk)) {
+ dev_err(dev, "Couldn't get our mod clock\n");
+ return PTR_ERR(frontend->mod_clk);
+ }
+
+ frontend->ram_clk = devm_clk_get(dev, "ram");
+ if (IS_ERR(frontend->ram_clk)) {
+ dev_err(dev, "Couldn't get our ram clock\n");
+ return PTR_ERR(frontend->ram_clk);
+ }
+
+ list_add_tail(&frontend->list, &drv->frontend_list);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void sun4i_frontend_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun4i_frontend *frontend = dev_get_drvdata(dev);
+
+ list_del(&frontend->list);
+ pm_runtime_force_suspend(dev);
+}
+
+static const struct component_ops sun4i_frontend_ops = {
+ .bind = sun4i_frontend_bind,
+ .unbind = sun4i_frontend_unbind,
+};
+
+static int sun4i_frontend_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun4i_frontend_ops);
+}
+
+static int sun4i_frontend_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun4i_frontend_ops);
+
+ return 0;
+}
+
+static int sun4i_frontend_runtime_resume(struct device *dev)
+{
+ struct sun4i_frontend *frontend = dev_get_drvdata(dev);
+ int ret;
+
+ clk_set_rate(frontend->mod_clk, 300000000);
+
+ clk_prepare_enable(frontend->bus_clk);
+ clk_prepare_enable(frontend->mod_clk);
+ clk_prepare_enable(frontend->ram_clk);
+
+ ret = reset_control_reset(frontend->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't reset our device\n");
+ return ret;
+ }
+
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_EN_REG,
+ SUN4I_FRONTEND_EN_EN,
+ SUN4I_FRONTEND_EN_EN);
+
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
+ SUN4I_FRONTEND_BYPASS_CSC_EN,
+ SUN4I_FRONTEND_BYPASS_CSC_EN);
+
+ sun4i_frontend_scaler_init(frontend);
+
+ return 0;
+}
+
+static int sun4i_frontend_runtime_suspend(struct device *dev)
+{
+ struct sun4i_frontend *frontend = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(frontend->ram_clk);
+ clk_disable_unprepare(frontend->mod_clk);
+ clk_disable_unprepare(frontend->bus_clk);
+
+ reset_control_assert(frontend->reset);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sun4i_frontend_pm_ops = {
+ .runtime_resume = sun4i_frontend_runtime_resume,
+ .runtime_suspend = sun4i_frontend_runtime_suspend,
+};
+
+const struct of_device_id sun4i_frontend_of_table[] = {
+ { .compatible = "allwinner,sun8i-a33-display-frontend" },
+ { }
+};
+EXPORT_SYMBOL(sun4i_frontend_of_table);
+MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table);
+
+static struct platform_driver sun4i_frontend_driver = {
+ .probe = sun4i_frontend_probe,
+ .remove = sun4i_frontend_remove,
+ .driver = {
+ .name = "sun4i-frontend",
+ .of_match_table = sun4i_frontend_of_table,
+ .pm = &sun4i_frontend_pm_ops,
+ },
+};
+module_platform_driver(sun4i_frontend_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Display Engine Frontend Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
new file mode 100644
index 0000000..02661ce
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef _SUN4I_FRONTEND_H_
+#define _SUN4I_FRONTEND_H_
+
+#include <linux/list.h>
+
+#define SUN4I_FRONTEND_EN_REG 0x000
+#define SUN4I_FRONTEND_EN_EN BIT(0)
+
+#define SUN4I_FRONTEND_FRM_CTRL_REG 0x004
+#define SUN4I_FRONTEND_FRM_CTRL_COEF_ACCESS_CTRL BIT(23)
+#define SUN4I_FRONTEND_FRM_CTRL_FRM_START BIT(16)
+#define SUN4I_FRONTEND_FRM_CTRL_COEF_RDY BIT(1)
+#define SUN4I_FRONTEND_FRM_CTRL_REG_RDY BIT(0)
+
+#define SUN4I_FRONTEND_BYPASS_REG 0x008
+#define SUN4I_FRONTEND_BYPASS_CSC_EN BIT(1)
+
+#define SUN4I_FRONTEND_BUF_ADDR0_REG 0x020
+
+#define SUN4I_FRONTEND_LINESTRD0_REG 0x040
+
+#define SUN4I_FRONTEND_INPUT_FMT_REG 0x04c
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(mod) ((mod) << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(fmt) ((fmt) << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_PS(ps) (ps)
+
+#define SUN4I_FRONTEND_OUTPUT_FMT_REG 0x05c
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(fmt) (fmt)
+
+#define SUN4I_FRONTEND_CH0_INSIZE_REG 0x100
+#define SUN4I_FRONTEND_INSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1)))
+
+#define SUN4I_FRONTEND_CH0_OUTSIZE_REG 0x104
+#define SUN4I_FRONTEND_OUTSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1)))
+
+#define SUN4I_FRONTEND_CH0_HORZFACT_REG 0x108
+#define SUN4I_FRONTEND_HORZFACT(i, f) (((i) << 16) | (f))
+
+#define SUN4I_FRONTEND_CH0_VERTFACT_REG 0x10c
+#define SUN4I_FRONTEND_VERTFACT(i, f) (((i) << 16) | (f))
+
+#define SUN4I_FRONTEND_CH0_HORZPHASE_REG 0x110
+#define SUN4I_FRONTEND_CH0_VERTPHASE0_REG 0x114
+#define SUN4I_FRONTEND_CH0_VERTPHASE1_REG 0x118
+
+#define SUN4I_FRONTEND_CH1_INSIZE_REG 0x200
+#define SUN4I_FRONTEND_CH1_OUTSIZE_REG 0x204
+#define SUN4I_FRONTEND_CH1_HORZFACT_REG 0x208
+#define SUN4I_FRONTEND_CH1_VERTFACT_REG 0x20c
+
+#define SUN4I_FRONTEND_CH1_HORZPHASE_REG 0x210
+#define SUN4I_FRONTEND_CH1_VERTPHASE0_REG 0x214
+#define SUN4I_FRONTEND_CH1_VERTPHASE1_REG 0x218
+
+#define SUN4I_FRONTEND_CH0_HORZCOEF0_REG(i) (0x400 + i * 4)
+#define SUN4I_FRONTEND_CH0_HORZCOEF1_REG(i) (0x480 + i * 4)
+#define SUN4I_FRONTEND_CH0_VERTCOEF_REG(i) (0x500 + i * 4)
+#define SUN4I_FRONTEND_CH1_HORZCOEF0_REG(i) (0x600 + i * 4)
+#define SUN4I_FRONTEND_CH1_HORZCOEF1_REG(i) (0x680 + i * 4)
+#define SUN4I_FRONTEND_CH1_VERTCOEF_REG(i) (0x700 + i * 4)
+
+struct clk;
+struct device_node;
+struct drm_plane;
+struct regmap;
+struct reset_control;
+
+struct sun4i_frontend {
+ struct list_head list;
+ struct device *dev;
+ struct device_node *node;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+ struct regmap *regs;
+ struct reset_control *reset;
+};
+
+extern const struct of_device_id sun4i_frontend_of_table[];
+
+int sun4i_frontend_init(struct sun4i_frontend *frontend);
+void sun4i_frontend_exit(struct sun4i_frontend *frontend);
+int sun4i_frontend_enable(struct sun4i_frontend *frontend);
+
+void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
+ struct drm_plane *plane);
+void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
+ struct drm_plane *plane);
+int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
+ struct drm_plane *plane, uint32_t out_fmt);
+
+#endif /* _SUN4I_FRONTEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 500b6fb..fa4bcd0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
&sun4i_hdmi_regmap_config);
if (IS_ERR(hdmi->regmap)) {
dev_err(dev, "Couldn't create HDMI encoder regmap\n");
- return PTR_ERR(hdmi->regmap);
+ ret = PTR_ERR(hdmi->regmap);
+ goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
if (IS_ERR(hdmi->ddc_parent_clk)) {
dev_err(dev, "Couldn't get the HDMI DDC clock\n");
- return PTR_ERR(hdmi->ddc_parent_clk);
+ ret = PTR_ERR(hdmi->ddc_parent_clk);
+ goto err_disable_mod_clk;
}
} else {
hdmi->ddc_parent_clk = hdmi->tmds_clk;
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 7bddf12..2949a3c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -15,34 +15,100 @@
#include <drm/drmP.h>
#include "sun4i_backend.h"
+#include "sun4i_frontend.h"
#include "sun4i_layer.h"
#include "sunxi_engine.h"
-struct sun4i_plane_desc {
- enum drm_plane_type type;
- u8 pipe;
- const uint32_t *formats;
- uint32_t nformats;
-};
+static void sun4i_backend_layer_reset(struct drm_plane *plane)
+{
+ struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+ struct sun4i_layer_state *state;
+
+ if (plane->state) {
+ state = state_to_sun4i_layer_state(plane->state);
+
+ __drm_atomic_helper_plane_destroy_state(&state->state);
+
+ kfree(state);
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ plane->state = &state->state;
+ plane->state->plane = plane;
+ plane->state->zpos = layer->id;
+ }
+}
+
+static struct drm_plane_state *
+sun4i_backend_layer_duplicate_state(struct drm_plane *plane)
+{
+ struct sun4i_layer_state *orig = state_to_sun4i_layer_state(plane->state);
+ struct sun4i_layer_state *copy;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+ copy->uses_frontend = orig->uses_frontend;
+
+ return &copy->state;
+}
+
+static void sun4i_backend_layer_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+
+ kfree(s_state);
+}
static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(old_state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
sun4i_backend_layer_enable(backend, layer->id, false);
+
+ if (layer_state->uses_frontend) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&backend->frontend_lock, flags);
+ backend->frontend_teardown = true;
+ spin_unlock_irqrestore(&backend->frontend_lock, flags);
+ }
}
static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(plane->state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
+ struct sun4i_frontend *frontend = backend->frontend;
+
+ if (layer_state->uses_frontend) {
+ sun4i_frontend_init(frontend);
+ sun4i_frontend_update_coord(frontend, plane);
+ sun4i_frontend_update_buffer(frontend, plane);
+ sun4i_frontend_update_formats(frontend, plane,
+ DRM_FORMAT_ARGB8888);
+ sun4i_backend_update_layer_frontend(backend, layer->id,
+ DRM_FORMAT_ARGB8888);
+ sun4i_frontend_enable(frontend);
+ } else {
+ sun4i_backend_update_layer_formats(backend, layer->id, plane);
+ sun4i_backend_update_layer_buffer(backend, layer->id, plane);
+ }
sun4i_backend_update_layer_coord(backend, layer->id, plane);
- sun4i_backend_update_layer_formats(backend, layer->id, plane);
- sun4i_backend_update_layer_buffer(backend, layer->id, plane);
+ sun4i_backend_update_layer_zpos(backend, layer->id, plane);
sun4i_backend_layer_enable(backend, layer->id, true);
}
@@ -52,22 +118,15 @@ static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
};
static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = sun4i_backend_layer_destroy_state,
+ .atomic_duplicate_state = sun4i_backend_layer_duplicate_state,
.destroy = drm_plane_cleanup,
.disable_plane = drm_atomic_helper_disable_plane,
- .reset = drm_atomic_helper_plane_reset,
+ .reset = sun4i_backend_layer_reset,
.update_plane = drm_atomic_helper_update_plane,
};
-static const uint32_t sun4i_backend_layer_formats_primary[] = {
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
-};
-
-static const uint32_t sun4i_backend_layer_formats_overlay[] = {
+static const uint32_t sun4i_backend_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
@@ -75,27 +134,16 @@ static const uint32_t sun4i_backend_layer_formats_overlay[] = {
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
-};
-
-static const struct sun4i_plane_desc sun4i_backend_planes[] = {
- {
- .type = DRM_PLANE_TYPE_PRIMARY,
- .pipe = 0,
- .formats = sun4i_backend_layer_formats_primary,
- .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_primary),
- },
- {
- .type = DRM_PLANE_TYPE_OVERLAY,
- .pipe = 1,
- .formats = sun4i_backend_layer_formats_overlay,
- .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_overlay),
- },
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
struct sun4i_backend *backend,
- const struct sun4i_plane_desc *plane)
+ enum drm_plane_type type)
{
struct sun4i_layer *layer;
int ret;
@@ -107,8 +155,9 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
- plane->formats, plane->nformats,
- NULL, plane->type, NULL);
+ sun4i_backend_layer_formats,
+ ARRAY_SIZE(sun4i_backend_layer_formats),
+ NULL, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
@@ -118,6 +167,9 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
&sun4i_backend_layer_helper_funcs);
layer->backend = backend;
+ drm_plane_create_zpos_property(&layer->plane, 0, 0,
+ SUN4I_BACKEND_NUM_LAYERS - 1);
+
return layer;
}
@@ -128,49 +180,23 @@ struct drm_plane **sun4i_layers_init(struct drm_device *drm,
struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
int i;
- planes = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes) + 1,
+ /* We need to have a sentinel at the need, hence the overallocation */
+ planes = devm_kcalloc(drm->dev, SUN4I_BACKEND_NUM_LAYERS + 1,
sizeof(*planes), GFP_KERNEL);
if (!planes)
return ERR_PTR(-ENOMEM);
- /*
- * The hardware is a bit unusual here.
- *
- * Even though it supports 4 layers, it does the composition
- * in two separate steps.
- *
- * The first one is assigning a layer to one of its two
- * pipes. If more that 1 layer is assigned to the same pipe,
- * and if pixels overlaps, the pipe will take the pixel from
- * the layer with the highest priority.
- *
- * The second step is the actual alpha blending, that takes
- * the two pipes as input, and uses the eventual alpha
- * component to do the transparency between the two.
- *
- * This two steps scenario makes us unable to guarantee a
- * robust alpha blending between the 4 layers in all
- * situations. So we just expose two layers, one per pipe. On
- * SoCs that support it, sprites could fill the need for more
- * layers.
- */
- for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
- const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
+ for (i = 0; i < SUN4I_BACKEND_NUM_LAYERS; i++) {
+ enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
struct sun4i_layer *layer;
- layer = sun4i_layer_init_one(drm, backend, plane);
+ layer = sun4i_layer_init_one(drm, backend, type);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
};
- DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
- i ? "overlay" : "primary", plane->pipe);
- regmap_update_bits(engine->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
- SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
- SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
-
layer->id = i;
planes[i] = &layer->plane;
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h
index 4e84f43..36b2026 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.h
@@ -22,12 +22,24 @@ struct sun4i_layer {
int id;
};
+struct sun4i_layer_state {
+ struct drm_plane_state state;
+ unsigned int pipe;
+ bool uses_frontend;
+};
+
static inline struct sun4i_layer *
plane_to_sun4i_layer(struct drm_plane *plane)
{
return container_of(plane, struct sun4i_layer, plane);
}
+static inline struct sun4i_layer_state *
+state_to_sun4i_layer_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct sun4i_layer_state, state);
+}
+
struct drm_plane **sun4i_layers_init(struct drm_device *drm,
struct sunxi_engine *engine);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
new file mode 100644
index 0000000..bffff4c
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_tcon.h"
+#include "sun4i_lvds.h"
+
+struct sun4i_lvds {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct sun4i_tcon *tcon;
+};
+
+static inline struct sun4i_lvds *
+drm_connector_to_sun4i_lvds(struct drm_connector *connector)
+{
+ return container_of(connector, struct sun4i_lvds,
+ connector);
+}
+
+static inline struct sun4i_lvds *
+drm_encoder_to_sun4i_lvds(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct sun4i_lvds,
+ encoder);
+}
+
+static int sun4i_lvds_get_modes(struct drm_connector *connector)
+{
+ struct sun4i_lvds *lvds =
+ drm_connector_to_sun4i_lvds(connector);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ return drm_panel_get_modes(tcon->panel);
+}
+
+static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
+ .get_modes = sun4i_lvds_get_modes,
+};
+
+static void
+sun4i_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ drm_panel_detach(tcon->panel);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = sun4i_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ DRM_DEBUG_DRIVER("Enabling LVDS output\n");
+
+ if (!IS_ERR(tcon->panel)) {
+ drm_panel_prepare(tcon->panel);
+ drm_panel_enable(tcon->panel);
+ }
+}
+
+static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ DRM_DEBUG_DRIVER("Disabling LVDS output\n");
+
+ if (!IS_ERR(tcon->panel)) {
+ drm_panel_disable(tcon->panel);
+ drm_panel_unprepare(tcon->panel);
+ }
+}
+
+static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
+ struct sun4i_tcon *tcon = lvds->tcon;
+ u32 hsync = mode->hsync_end - mode->hsync_start;
+ u32 vsync = mode->vsync_end - mode->vsync_start;
+ unsigned long rate = mode->clock * 1000;
+ long rounded_rate;
+
+ DRM_DEBUG_DRIVER("Validating modes...\n");
+
+ if (hsync < 1)
+ return MODE_HSYNC_NARROW;
+
+ if (hsync > 0x3ff)
+ return MODE_HSYNC_WIDE;
+
+ if ((mode->hdisplay < 1) || (mode->htotal < 1))
+ return MODE_H_ILLEGAL;
+
+ if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
+ return MODE_BAD_HVALUE;
+
+ DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
+
+ if (vsync < 1)
+ return MODE_VSYNC_NARROW;
+
+ if (vsync > 0x3ff)
+ return MODE_VSYNC_WIDE;
+
+ if ((mode->vdisplay < 1) || (mode->vtotal < 1))
+ return MODE_V_ILLEGAL;
+
+ if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
+ return MODE_BAD_VVALUE;
+
+ DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+
+ tcon->dclk_min_div = 7;
+ tcon->dclk_max_div = 7;
+ rounded_rate = clk_round_rate(tcon->dclk, rate);
+ if (rounded_rate < rate)
+ return MODE_CLOCK_LOW;
+
+ if (rounded_rate > rate)
+ return MODE_CLOCK_HIGH;
+
+ DRM_DEBUG_DRIVER("Clock rate OK\n");
+
+ return MODE_OK;
+}
+
+static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
+ .disable = sun4i_lvds_encoder_disable,
+ .enable = sun4i_lvds_encoder_enable,
+ .mode_valid = sun4i_lvds_encoder_mode_valid,
+};
+
+static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
+{
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ struct sun4i_lvds *lvds;
+ int ret;
+
+ lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+ lvds->tcon = tcon;
+ encoder = &lvds->encoder;
+
+ ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
+ &tcon->panel, &bridge);
+ if (ret) {
+ dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
+ return 0;
+ }
+
+ drm_encoder_helper_add(&lvds->encoder,
+ &sun4i_lvds_enc_helper_funcs);
+ ret = drm_encoder_init(drm,
+ &lvds->encoder,
+ &sun4i_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS,
+ NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
+ goto err_out;
+ }
+
+ /* The LVDS encoder can only work with the TCON channel 0 */
+ lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+
+ if (tcon->panel) {
+ drm_connector_helper_add(&lvds->connector,
+ &sun4i_lvds_con_helper_funcs);
+ ret = drm_connector_init(drm, &lvds->connector,
+ &sun4i_lvds_con_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the lvds connector\n");
+ goto err_cleanup_connector;
+ }
+
+ drm_mode_connector_attach_encoder(&lvds->connector,
+ &lvds->encoder);
+
+ ret = drm_panel_attach(tcon->panel, &lvds->connector);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our panel\n");
+ goto err_cleanup_connector;
+ }
+ }
+
+ if (bridge) {
+ ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our bridge\n");
+ goto err_cleanup_connector;
+ }
+ }
+
+ return 0;
+
+err_cleanup_connector:
+ drm_encoder_cleanup(&lvds->encoder);
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL(sun4i_lvds_init);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.h b/drivers/gpu/drm/sun4i/sun4i_lvds.h
new file mode 100644
index 0000000..f3e90fa
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef _SUN4I_LVDS_H_
+#define _SUN4I_LVDS_H_
+
+int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon);
+
+#endif /* _SUN4I_LVDS_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 832f8f9..f2fa1f2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -52,10 +52,10 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(tcon->panel);
}
-static int sun4i_rgb_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
+ const struct drm_display_mode *mode)
{
- struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(crtc);
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ tcon->dclk_min_div = 6;
+ tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
if (rounded_rate < rate)
return MODE_CLOCK_LOW;
@@ -106,7 +108,6 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
- .mode_valid = sun4i_rgb_mode_valid,
};
static void
@@ -156,6 +157,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
+ .mode_valid = sun4i_rgb_mode_valid,
};
static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index f4284b5..c3d92d5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,6 +17,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -31,10 +32,52 @@
#include "sun4i_crtc.h"
#include "sun4i_dotclock.h"
#include "sun4i_drv.h"
+#include "sun4i_lvds.h"
#include "sun4i_rgb.h"
#include "sun4i_tcon.h"
#include "sunxi_engine.h"
+static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+
+ drm_connector_list_iter_begin(encoder->dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ if (connector->encoder == encoder) {
+ drm_connector_list_iter_end(&iter);
+ return connector;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return NULL;
+}
+
+static int sun4i_tcon_get_pixel_depth(const struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_display_info *info;
+
+ connector = sun4i_tcon_get_connector(encoder);
+ if (!connector)
+ return -EINVAL;
+
+ info = &connector->display_info;
+ if (info->num_bus_formats != 1)
+ return -EINVAL;
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ return 18;
+
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ return 24;
+ }
+
+ return -EINVAL;
+}
+
static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
bool enabled)
{
@@ -42,6 +85,7 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
switch (channel) {
case 0:
+ WARN_ON(!tcon->quirks->has_channel_0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_TCON_ENABLE,
enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0);
@@ -59,19 +103,72 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
return;
}
- if (enabled)
+ if (enabled) {
clk_prepare_enable(clk);
- else
+ clk_rate_exclusive_get(clk);
+ } else {
+ clk_rate_exclusive_put(clk);
clk_disable_unprepare(clk);
+ }
+}
+
+static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ bool enabled)
+{
+ if (enabled) {
+ u8 val;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
+ SUN4I_TCON0_LVDS_IF_EN,
+ SUN4I_TCON0_LVDS_IF_EN);
+
+ /*
+ * As their name suggest, these values only apply to the A31
+ * and later SoCs. We'll have to rework this when merging
+ * support for the older SoCs.
+ */
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ } else {
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
+ SUN4I_TCON0_LVDS_IF_EN, 0);
+ }
}
void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
+ bool is_lvds = false;
int channel;
switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ is_lvds = true;
+ /* Fallthrough */
case DRM_MODE_ENCODER_NONE:
channel = 0;
break;
@@ -84,10 +181,16 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
return;
}
+ if (is_lvds && !enabled)
+ sun4i_tcon_lvds_set_status(tcon, encoder, false);
+
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_TCON_ENABLE,
enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
+ if (is_lvds && enabled)
+ sun4i_tcon_lvds_set_status(tcon, encoder, true);
+
sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
@@ -170,13 +273,94 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
}
+static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ unsigned int bp;
+ u8 clk_delay;
+ u32 reg, val = 0;
+
+ WARN_ON(!tcon->quirks->has_channel_0);
+
+ tcon->dclk_min_div = 7;
+ tcon->dclk_max_div = 7;
+ sun4i_tcon0_mode_set_common(tcon, mode);
+
+ /* Adjust clock delay */
+ clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_CLK_DELAY_MASK,
+ SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the back porch + hsync
+ */
+ bp = mode->crtc_htotal - mode->crtc_hsync_start;
+ DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+ mode->crtc_htotal, bp);
+
+ /* Set horizontal display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
+ SUN4I_TCON0_BASIC1_H_TOTAL(mode->htotal) |
+ SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the back porch + hsync
+ */
+ bp = mode->crtc_vtotal - mode->crtc_vsync_start;
+ DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+ mode->crtc_vtotal, bp);
+
+ /* Set vertical display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
+ SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal * 2) |
+ SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
+
+ reg = SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0 |
+ SUN4I_TCON0_LVDS_IF_DATA_POL_NORMAL |
+ SUN4I_TCON0_LVDS_IF_CLK_POL_NORMAL;
+ if (sun4i_tcon_get_pixel_depth(encoder) == 24)
+ reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS;
+ else
+ reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_18BITS;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_IF_REG, reg);
+
+ /* Setup the polarity of the various signals */
+ if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+
+ /* Map output pins to channel 0 */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_IOMAP_MASK,
+ SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+ /* Enable the output on the pins */
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
+}
+
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
+ struct drm_panel *panel = tcon->panel;
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
+ WARN_ON(!tcon->quirks->has_channel_0);
+
+ tcon->dclk_min_div = 6;
+ tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
/* Adjust clock delay */
@@ -220,12 +404,33 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
SUN4I_TCON0_BASIC3_H_SYNC(hsync));
/* Setup the polarity of the various signals */
- if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
- if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ /*
+ * On A20 and similar SoCs, the only way to achieve Positive Edge
+ * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+ * By default TCON works in Negative Edge(Falling Edge),
+ * this is why phase is set to 0 in that case.
+ * Unfortunately there's no way to logically invert dclk through
+ * IO_POL register.
+ * The only acceptable way to work, triple checked with scope,
+ * is using clock phase set to 0° for Negative Edge and set to 240°
+ * for Positive Edge.
+ * On A33 and similar SoCs there would be a 90° phase option,
+ * but it divides also dclk by 2.
+ * Following code is a way to avoid quirks all around TCON
+ * and DOTCLOCK drivers.
+ */
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
+
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
@@ -334,6 +539,9 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
+ break;
case DRM_MODE_ENCODER_NONE:
sun4i_tcon0_mode_set_rgb(tcon, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
@@ -368,6 +576,7 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
struct sun4i_tcon *tcon = private;
struct drm_device *drm = tcon->drm;
struct sun4i_crtc *scrtc = tcon->crtc;
+ struct sunxi_engine *engine = scrtc->engine;
unsigned int status;
regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
@@ -385,6 +594,9 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
SUN4I_TCON_GINT0_VBLANK_INT(1),
0);
+ if (engine->ops->vblank_quirk)
+ engine->ops->vblank_quirk(engine);
+
return IRQ_HANDLED;
}
@@ -398,10 +610,12 @@ static int sun4i_tcon_init_clocks(struct device *dev,
}
clk_prepare_enable(tcon->clk);
- tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
- if (IS_ERR(tcon->sclk0)) {
- dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
- return PTR_ERR(tcon->sclk0);
+ if (tcon->quirks->has_channel_0) {
+ tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
+ if (IS_ERR(tcon->sclk0)) {
+ dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+ return PTR_ERR(tcon->sclk0);
+ }
}
if (tcon->quirks->has_channel_1) {
@@ -665,7 +879,10 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sunxi_engine *engine;
+ struct device_node *remote;
struct sun4i_tcon *tcon;
+ struct reset_control *edp_rstc;
+ bool has_lvds_rst, has_lvds_alt, can_lvds;
int ret;
engine = sun4i_tcon_find_engine(drv, dev->of_node);
@@ -689,6 +906,20 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon->lcd_rst);
}
+ if (tcon->quirks->needs_edp_reset) {
+ edp_rstc = devm_reset_control_get_shared(dev, "edp");
+ if (IS_ERR(edp_rstc)) {
+ dev_err(dev, "Couldn't get edp reset line\n");
+ return PTR_ERR(edp_rstc);
+ }
+
+ ret = reset_control_deassert(edp_rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert edp reset line\n");
+ return ret;
+ }
+ }
+
/* Make sure our TCON is reset */
ret = reset_control_reset(tcon->lcd_rst);
if (ret) {
@@ -696,6 +927,58 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return ret;
}
+ if (tcon->quirks->supports_lvds) {
+ /*
+ * This can only be made optional since we've had DT
+ * nodes without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and
+ * print a warning.
+ */
+ tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
+ if (IS_ERR(tcon->lvds_rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon->lvds_rst);
+ } else if (tcon->lvds_rst) {
+ has_lvds_rst = true;
+ reset_control_reset(tcon->lvds_rst);
+ } else {
+ has_lvds_rst = false;
+ }
+
+ /*
+ * This can only be made optional since we've had DT
+ * nodes without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and
+ * print a warning.
+ */
+ if (tcon->quirks->has_lvds_alt) {
+ tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
+ if (IS_ERR(tcon->lvds_pll)) {
+ if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
+ has_lvds_alt = false;
+ } else {
+ dev_err(dev, "Couldn't get the LVDS PLL\n");
+ return PTR_ERR(tcon->lvds_pll);
+ }
+ } else {
+ has_lvds_alt = true;
+ }
+ }
+
+ if (!has_lvds_rst ||
+ (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
+ dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
+ dev_warn(dev, "LVDS output disabled\n");
+ can_lvds = false;
+ } else {
+ can_lvds = true;
+ }
+ } else {
+ can_lvds = false;
+ }
+
ret = sun4i_tcon_init_clocks(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON clocks\n");
@@ -708,10 +991,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_clocks;
}
- ret = sun4i_dclk_create(dev, tcon);
- if (ret) {
- dev_err(dev, "Couldn't create our TCON dot clock\n");
- goto err_free_clocks;
+ if (tcon->quirks->has_channel_0) {
+ ret = sun4i_dclk_create(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't create our TCON dot clock\n");
+ goto err_free_clocks;
+ }
}
ret = sun4i_tcon_init_irq(dev, tcon);
@@ -727,7 +1012,21 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_dotclock;
}
- ret = sun4i_rgb_init(drm, tcon);
+ /*
+ * If we have an LVDS panel connected to the TCON, we should
+ * just probe the LVDS connector. Otherwise, just probe RGB as
+ * we used to.
+ */
+ remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (of_device_is_compatible(remote, "panel-lvds"))
+ if (can_lvds)
+ ret = sun4i_lvds_init(drm, tcon);
+ else
+ ret = -EINVAL;
+ else
+ ret = sun4i_rgb_init(drm, tcon);
+ of_node_put(remote);
+
if (ret < 0)
goto err_free_dotclock;
@@ -755,7 +1054,8 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return 0;
err_free_dotclock:
- sun4i_dclk_free(tcon);
+ if (tcon->quirks->has_channel_0)
+ sun4i_dclk_free(tcon);
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
err_assert_reset:
@@ -769,7 +1069,8 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
struct sun4i_tcon *tcon = dev_get_drvdata(dev);
list_del(&tcon->list);
- sun4i_dclk_free(tcon);
+ if (tcon->quirks->has_channel_0)
+ sun4i_dclk_free(tcon);
sun4i_tcon_free_clocks(tcon);
}
@@ -866,51 +1167,83 @@ static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
}
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
+ .has_channel_0 = true,
.has_channel_1 = true,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
+ .has_channel_0 = true,
.has_channel_1 = true,
.set_mux = sun5i_a13_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
+ .has_channel_0 = true,
.has_channel_1 = true,
+ .has_lvds_alt = true,
.needs_de_be_mux = true,
.set_mux = sun6i_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
+ .has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
+ .has_channel_0 = true,
.has_channel_1 = true,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
- /* nothing is supported */
+ .has_channel_0 = true,
+ .has_lvds_alt = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
+ .has_channel_1 = true,
};
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
- /* nothing is supported */
+ .has_channel_0 = true,
+};
+
+static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+};
+
+static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
+ .has_channel_1 = true,
+ .needs_edp_reset = true,
};
-static const struct of_device_id sun4i_tcon_of_table[] = {
+/* sun4i_drv uses this list to check if a device node is a TCON */
+const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
+ { .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
+ { .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
+ { .compatible = "allwinner,sun9i-a80-tcon-lcd", .data = &sun9i_a80_tcon_lcd_quirks },
+ { .compatible = "allwinner,sun9i-a80-tcon-tv", .data = &sun9i_a80_tcon_tv_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
+EXPORT_SYMBOL(sun4i_tcon_of_table);
static struct platform_driver sun4i_tcon_platform_driver = {
.probe = sun4i_tcon_probe,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f61bf6d..161e094 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -70,7 +70,21 @@
#define SUN4I_TCON0_TTL2_REG 0x78
#define SUN4I_TCON0_TTL3_REG 0x7c
#define SUN4I_TCON0_TTL4_REG 0x80
+
#define SUN4I_TCON0_LVDS_IF_REG 0x84
+#define SUN4I_TCON0_LVDS_IF_EN BIT(31)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_MASK BIT(26)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_18BITS (1 << 26)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS (0 << 26)
+#define SUN4I_TCON0_LVDS_IF_CLK_SEL_MASK BIT(20)
+#define SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0 (1 << 20)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_MASK BIT(4)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_NORMAL (1 << 4)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_INV (0 << 4)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_MASK GENMASK(3, 0)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_NORMAL (0xf)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_INV (0)
+
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
@@ -131,6 +145,16 @@
#define SUN4I_TCON_CEU_RANGE_G_REG 0x144
#define SUN4I_TCON_CEU_RANGE_B_REG 0x148
#define SUN4I_TCON_MUX_CTRL_REG 0x200
+
+#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
+#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
+#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
+#define SUN6I_TCON0_LVDS_ANA0_EN_DRVD(x) (((x) & 0xf) << 20)
+#define SUN6I_TCON0_LVDS_ANA0_C(x) (((x) & 3) << 17)
+#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
+#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -148,8 +172,12 @@
struct sun4i_tcon;
struct sun4i_tcon_quirks {
+ bool has_channel_0; /* a83t does not have channel 0 on second TCON */
bool has_channel_1; /* a33 does not have channel 1 */
+ bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
bool needs_de_be_mux; /* sun6i needs mux to select backend */
+ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
+ bool supports_lvds; /* Does the TCON support an LVDS output? */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
@@ -167,11 +195,17 @@ struct sun4i_tcon {
struct clk *sclk0;
struct clk *sclk1;
+ /* Possible mux for the LVDS clock */
+ struct clk *lvds_pll;
+
/* Pixel clock */
struct clk *dclk;
+ u8 dclk_max_div;
+ u8 dclk_min_div;
/* Reset control */
struct reset_control *lcd_rst;
+ struct reset_control *lvds_rst;
struct drm_panel *panel;
@@ -197,4 +231,6 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
void sun4i_tcon_set_status(struct sun4i_tcon *crtc,
const struct drm_encoder *encoder, bool enable);
+extern const struct of_device_id sun4i_tcon_of_table[];
+
#endif /* __SUN4I_TCON_H__ */
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index 09bba85..b5e071a 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -101,6 +101,7 @@ static const struct of_device_id sun6i_drc_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-drc" },
{ .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
+ { .compatible = "allwinner,sun9i-a80-drc" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
new file mode 100644
index 0000000..b14925b
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+
+#include "sun8i_csc.h"
+#include "sun8i_mixer.h"
+
+static const u32 ccsc_base[2][2] = {
+ {CCSC00_OFFSET, CCSC01_OFFSET},
+ {CCSC10_OFFSET, CCSC11_OFFSET},
+};
+
+/*
+ * Factors are in two's complement format, 10 bits for fractinal part.
+ * First tree values in each line are multiplication factor and last
+ * value is constant, which is added at the end.
+ */
+static const u32 yuv2rgb[] = {
+ 0x000004A8, 0x00000000, 0x00000662, 0xFFFC845A,
+ 0x000004A8, 0xFFFFFE6F, 0xFFFFFCBF, 0x00021DF4,
+ 0x000004A8, 0x00000813, 0x00000000, 0xFFFBAC4A,
+};
+
+static const u32 yvu2rgb[] = {
+ 0x000004A8, 0x00000662, 0x00000000, 0xFFFC845A,
+ 0x000004A8, 0xFFFFFCBF, 0xFFFFFE6F, 0x00021DF4,
+ 0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
+};
+
+static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
+ enum sun8i_csc_mode mode)
+{
+ const u32 *table;
+ int i, data;
+
+ switch (mode) {
+ case SUN8I_CSC_MODE_YUV2RGB:
+ table = yuv2rgb;
+ break;
+ case SUN8I_CSC_MODE_YVU2RGB:
+ table = yvu2rgb;
+ break;
+ default:
+ DRM_WARN("Wrong CSC mode specified.\n");
+ return;
+ }
+
+ for (i = 0; i < 12; i++) {
+ data = table[i];
+ /* For some reason, 0x200 must be added to constant parts */
+ if (((i + 1) & 3) == 0)
+ data += 0x200;
+ regmap_write(map, SUN8I_CSC_COEFF(base, i), data);
+ }
+}
+
+static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = SUN8I_CSC_CTRL_EN;
+ else
+ val = 0;
+
+ regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
+}
+
+void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
+ enum sun8i_csc_mode mode)
+{
+ u32 base;
+
+ base = ccsc_base[mixer->cfg->ccsc][layer];
+
+ sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
+}
+
+void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
+{
+ u32 base;
+
+ base = ccsc_base[mixer->cfg->ccsc][layer];
+
+ sun8i_csc_enable(mixer->engine.regs, base, enable);
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
new file mode 100644
index 0000000..880e8fb
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN8I_CSC_H_
+#define _SUN8I_CSC_H_
+
+struct sun8i_mixer;
+
+/* VI channel CSC units offsets */
+#define CCSC00_OFFSET 0xAA050
+#define CCSC01_OFFSET 0xFA000
+#define CCSC10_OFFSET 0xA0000
+#define CCSC11_OFFSET 0xF0000
+
+#define SUN8I_CSC_CTRL(base) (base + 0x0)
+#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
+
+#define SUN8I_CSC_CTRL_EN BIT(0)
+
+enum sun8i_csc_mode {
+ SUN8I_CSC_MODE_OFF,
+ SUN8I_CSC_MODE_YUV2RGB,
+ SUN8I_CSC_MODE_YVU2RGB,
+};
+
+void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
+ enum sun8i_csc_mode mode);
+void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
new file mode 100644
index 0000000..9f40a44
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "sun8i_dw_hdmi.h"
+
+static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
+
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+}
+
+static const struct drm_encoder_helper_funcs
+sun8i_dw_hdmi_encoder_helper_funcs = {
+ .mode_set = sun8i_dw_hdmi_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static enum drm_mode_status
+sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_plat_data *plat_data;
+ struct drm_device *drm = data;
+ struct device_node *phy_node;
+ struct drm_encoder *encoder;
+ struct sun8i_dw_hdmi *hdmi;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ plat_data = &hdmi->plat_data;
+ hdmi->dev = &pdev->dev;
+ encoder = &hdmi->encoder;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ hdmi->rst_ctrl = devm_reset_control_get(dev, "ctrl");
+ if (IS_ERR(hdmi->rst_ctrl)) {
+ dev_err(dev, "Could not get ctrl reset control\n");
+ return PTR_ERR(hdmi->rst_ctrl);
+ }
+
+ hdmi->clk_tmds = devm_clk_get(dev, "tmds");
+ if (IS_ERR(hdmi->clk_tmds)) {
+ dev_err(dev, "Couldn't get the tmds clock\n");
+ return PTR_ERR(hdmi->clk_tmds);
+ }
+
+ ret = reset_control_deassert(hdmi->rst_ctrl);
+ if (ret) {
+ dev_err(dev, "Could not deassert ctrl reset control\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(hdmi->clk_tmds);
+ if (ret) {
+ dev_err(dev, "Could not enable tmds clock\n");
+ goto err_assert_ctrl_reset;
+ }
+
+ phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+ if (!phy_node) {
+ dev_err(dev, "Can't found PHY phandle\n");
+ goto err_disable_clk_tmds;
+ }
+
+ ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+ of_node_put(phy_node);
+ if (ret) {
+ dev_err(dev, "Couldn't get the HDMI PHY\n");
+ goto err_disable_clk_tmds;
+ }
+
+ drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ sun8i_hdmi_phy_init(hdmi->phy);
+
+ plat_data->mode_valid = &sun8i_dw_hdmi_mode_valid;
+ plat_data->phy_ops = sun8i_hdmi_phy_get_ops();
+ plat_data->phy_name = "sun8i_dw_hdmi_phy";
+ plat_data->phy_data = hdmi->phy;
+
+ platform_set_drvdata(pdev, hdmi);
+
+ hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (IS_ERR(hdmi->hdmi)) {
+ ret = PTR_ERR(hdmi->hdmi);
+ goto cleanup_encoder;
+ }
+
+ return 0;
+
+cleanup_encoder:
+ drm_encoder_cleanup(encoder);
+ sun8i_hdmi_phy_remove(hdmi);
+err_disable_clk_tmds:
+ clk_disable_unprepare(hdmi->clk_tmds);
+err_assert_ctrl_reset:
+ reset_control_assert(hdmi->rst_ctrl);
+
+ return ret;
+}
+
+static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_unbind(hdmi->hdmi);
+ sun8i_hdmi_phy_remove(hdmi);
+ clk_disable_unprepare(hdmi->clk_tmds);
+ reset_control_assert(hdmi->rst_ctrl);
+}
+
+static const struct component_ops sun8i_dw_hdmi_ops = {
+ .bind = sun8i_dw_hdmi_bind,
+ .unbind = sun8i_dw_hdmi_unbind,
+};
+
+static int sun8i_dw_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun8i_dw_hdmi_ops);
+}
+
+static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun8i_dw_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
+ { .compatible = "allwinner,sun8i-a83t-dw-hdmi" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
+
+struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+ .probe = sun8i_dw_hdmi_probe,
+ .remove = sun8i_dw_hdmi_remove,
+ .driver = {
+ .name = "sun8i-dw-hdmi",
+ .of_match_table = sun8i_dw_hdmi_dt_ids,
+ },
+};
+module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
new file mode 100644
index 0000000..79154f0
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_DW_HDMI_H_
+#define _SUN8I_DW_HDMI_H_
+
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_encoder.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define SUN8I_HDMI_PHY_DBG_CTRL_REG 0x0000
+#define SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK BIT(0)
+#define SUN8I_HDMI_PHY_DBG_CTRL_POL_MASK GENMASK(15, 8)
+#define SUN8I_HDMI_PHY_DBG_CTRL_POL_NHSYNC BIT(8)
+#define SUN8I_HDMI_PHY_DBG_CTRL_POL_NVSYNC BIT(9)
+#define SUN8I_HDMI_PHY_DBG_CTRL_ADDR_MASK GENMASK(23, 16)
+#define SUN8I_HDMI_PHY_DBG_CTRL_ADDR(addr) (addr << 16)
+
+#define SUN8I_HDMI_PHY_REXT_CTRL_REG 0x0004
+#define SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN BIT(31)
+
+#define SUN8I_HDMI_PHY_READ_EN_REG 0x0010
+#define SUN8I_HDMI_PHY_READ_EN_MAGIC 0x54524545
+
+#define SUN8I_HDMI_PHY_UNSCRAMBLE_REG 0x0014
+#define SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC 0x42494E47
+
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG 0x0020
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_SWI BIT(31)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_PWEND BIT(30)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_PWENC BIT(29)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_CALSW BIT(28)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_SVRCAL(x) ((x) << 26)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_SVBH(x) ((x) << 24)
+#define SUN8I_HDMI_PHY_ANA_CFG1_AMP_OPT BIT(23)
+#define SUN8I_HDMI_PHY_ANA_CFG1_EMP_OPT BIT(22)
+#define SUN8I_HDMI_PHY_ANA_CFG1_AMPCK_OPT BIT(21)
+#define SUN8I_HDMI_PHY_ANA_CFG1_EMPCK_OPT BIT(20)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL BIT(19)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG BIT(18)
+#define SUN8I_HDMI_PHY_ANA_CFG1_REG_SCKTMDS BIT(17)
+#define SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN BIT(16)
+#define SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK GENMASK(15, 12)
+#define SUN8I_HDMI_PHY_ANA_CFG1_TXEN_ALL (0xf << 12)
+#define SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK BIT(11)
+#define SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 BIT(10)
+#define SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 BIT(9)
+#define SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 BIT(8)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDSCLK BIT(7)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2 BIT(6)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 BIT(5)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 BIT(4)
+#define SUN8I_HDMI_PHY_ANA_CFG1_CKEN BIT(3)
+#define SUN8I_HDMI_PHY_ANA_CFG1_LDOEN BIT(2)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENVBS BIT(1)
+#define SUN8I_HDMI_PHY_ANA_CFG1_ENBI BIT(0)
+
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG 0x0024
+#define SUN8I_HDMI_PHY_ANA_CFG2_M_EN BIT(31)
+#define SUN8I_HDMI_PHY_ANA_CFG2_PLLDBEN BIT(30)
+#define SUN8I_HDMI_PHY_ANA_CFG2_SEN BIT(29)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_HPDPD BIT(28)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_HPDEN BIT(27)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_PLRCK BIT(26)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_PLR(x) ((x) << 23)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_DENCK BIT(22)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_DEN BIT(21)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_CD(x) ((x) << 19)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_CKSS(x) ((x) << 17)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSWCK BIT(16)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW BIT(15)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_CSMPS(x) ((x) << 13)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(x) ((x) << 10)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_BOOSTCK(x) ((x) << 8)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_BOOST(x) ((x) << 6)
+#define SUN8I_HDMI_PHY_ANA_CFG2_REG_RESDI(x) ((x) << 0)
+
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG 0x0028
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_SLOWCK(x) ((x) << 30)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_SLOW(x) ((x) << 28)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_WIRE(x) ((x) << 18)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(x) ((x) << 14)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_EMPCK(x) ((x) << 11)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(x) ((x) << 7)
+#define SUN8I_HDMI_PHY_ANA_CFG3_REG_EMP(x) ((x) << 4)
+#define SUN8I_HDMI_PHY_ANA_CFG3_SDAPD BIT(3)
+#define SUN8I_HDMI_PHY_ANA_CFG3_SDAEN BIT(2)
+#define SUN8I_HDMI_PHY_ANA_CFG3_SCLPD BIT(1)
+#define SUN8I_HDMI_PHY_ANA_CFG3_SCLEN BIT(0)
+
+#define SUN8I_HDMI_PHY_PLL_CFG1_REG 0x002c
+#define SUN8I_HDMI_PHY_PLL_CFG1_REG_OD1 BIT(31)
+#define SUN8I_HDMI_PHY_PLL_CFG1_REG_OD BIT(30)
+#define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN BIT(29)
+#define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN BIT(28)
+#define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 BIT(27)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN BIT(25)
+#define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x) ((x) << 22)
+#define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x) ((x) << 20)
+#define SUN8I_HDMI_PHY_PLL_CFG1_PLLDBEN BIT(19)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CS BIT(18)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CP_S(x) ((x) << 13)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(x) ((x) << 7)
+#define SUN8I_HDMI_PHY_PLL_CFG1_BWS BIT(6)
+#define SUN8I_HDMI_PHY_PLL_CFG1_B_IN_MSK GENMASK(5, 0)
+#define SUN8I_HDMI_PHY_PLL_CFG1_B_IN_SHIFT 0
+
+#define SUN8I_HDMI_PHY_PLL_CFG2_REG 0x0030
+#define SUN8I_HDMI_PHY_PLL_CFG2_SV_H BIT(31)
+#define SUN8I_HDMI_PHY_PLL_CFG2_PDCLKSEL(x) ((x) << 29)
+#define SUN8I_HDMI_PHY_PLL_CFG2_CLKSTEP(x) ((x) << 27)
+#define SUN8I_HDMI_PHY_PLL_CFG2_PSET(x) ((x) << 24)
+#define SUN8I_HDMI_PHY_PLL_CFG2_PCLK_SEL BIT(23)
+#define SUN8I_HDMI_PHY_PLL_CFG2_AUTOSYNC_DIS BIT(22)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VREG2_OUT_EN BIT(21)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VREG1_OUT_EN BIT(20)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VCOGAIN_EN BIT(19)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VCOGAIN(x) ((x) << 16)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(x) ((x) << 12)
+#define SUN8I_HDMI_PHY_PLL_CFG2_VCO_RST_IN BIT(11)
+#define SUN8I_HDMI_PHY_PLL_CFG2_SINT_FRAC BIT(10)
+#define SUN8I_HDMI_PHY_PLL_CFG2_SDIV2 BIT(9)
+#define SUN8I_HDMI_PHY_PLL_CFG2_S(x) ((x) << 6)
+#define SUN8I_HDMI_PHY_PLL_CFG2_S6P25_7P5 BIT(5)
+#define SUN8I_HDMI_PHY_PLL_CFG2_S5_7 BIT(4)
+#define SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK GENMASK(3, 0)
+#define SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_SHIFT 0
+#define SUN8I_HDMI_PHY_PLL_CFG2_PREDIV(x) (((x) - 1) << 0)
+
+#define SUN8I_HDMI_PHY_PLL_CFG3_REG 0x0034
+#define SUN8I_HDMI_PHY_PLL_CFG3_SOUT_DIV2 BIT(0)
+
+#define SUN8I_HDMI_PHY_ANA_STS_REG 0x0038
+#define SUN8I_HDMI_PHY_ANA_STS_B_OUT_SHIFT 11
+#define SUN8I_HDMI_PHY_ANA_STS_B_OUT_MSK GENMASK(16, 11)
+#define SUN8I_HDMI_PHY_ANA_STS_RCALEND2D BIT(7)
+#define SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK GENMASK(5, 0)
+
+#define SUN8I_HDMI_PHY_CEC_REG 0x003c
+
+struct sun8i_hdmi_phy;
+
+struct sun8i_hdmi_phy_variant {
+ bool has_phy_clk;
+ void (*phy_init)(struct sun8i_hdmi_phy *phy);
+ void (*phy_disable)(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy);
+ int (*phy_config)(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy,
+ unsigned int clk_rate);
+};
+
+struct sun8i_hdmi_phy {
+ struct clk *clk_bus;
+ struct clk *clk_mod;
+ struct clk *clk_phy;
+ struct clk *clk_pll0;
+ unsigned int rcal;
+ struct regmap *regs;
+ struct reset_control *rst_phy;
+ struct sun8i_hdmi_phy_variant *variant;
+};
+
+struct sun8i_dw_hdmi {
+ struct clk *clk_tmds;
+ struct device *dev;
+ struct dw_hdmi *hdmi;
+ struct drm_encoder encoder;
+ struct sun8i_hdmi_phy *phy;
+ struct dw_hdmi_plat_data plat_data;
+ struct reset_control *rst_ctrl;
+};
+
+static inline struct sun8i_dw_hdmi *
+encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct sun8i_dw_hdmi, encoder);
+}
+
+int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
+void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+
+void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
+
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+
+#endif /* _SUN8I_DW_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
new file mode 100644
index 0000000..5a52fc4
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#include <linux/delay.h>
+#include <linux/of_address.h>
+
+#include "sun8i_dw_hdmi.h"
+
+/*
+ * Address can be actually any value. Here is set to same value as
+ * it is set in BSP driver.
+ */
+#define I2C_ADDR 0x69
+
+static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy,
+ unsigned int clk_rate)
+{
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
+
+ /* power down */
+ dw_hdmi_phy_gen2_txpwron(hdmi, 0);
+ dw_hdmi_phy_gen2_pddq(hdmi, 1);
+
+ dw_hdmi_phy_reset(hdmi);
+
+ dw_hdmi_phy_gen2_pddq(hdmi, 0);
+
+ dw_hdmi_phy_i2c_set_addr(hdmi, I2C_ADDR);
+
+ /*
+ * Values are taken from BSP HDMI driver. Although AW didn't
+ * release any documentation, explanation of this values can
+ * be found in i.MX 6Dual/6Quad Reference Manual.
+ */
+ if (clk_rate <= 27000000) {
+ dw_hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
+ dw_hdmi_phy_i2c_write(hdmi, 0x08da, 0x10);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0007, 0x19);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0318, 0x0e);
+ dw_hdmi_phy_i2c_write(hdmi, 0x8009, 0x09);
+ } else if (clk_rate <= 74250000) {
+ dw_hdmi_phy_i2c_write(hdmi, 0x0540, 0x06);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0007, 0x19);
+ dw_hdmi_phy_i2c_write(hdmi, 0x02b5, 0x0e);
+ dw_hdmi_phy_i2c_write(hdmi, 0x8009, 0x09);
+ } else if (clk_rate <= 148500000) {
+ dw_hdmi_phy_i2c_write(hdmi, 0x04a0, 0x06);
+ dw_hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0002, 0x19);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0021, 0x0e);
+ dw_hdmi_phy_i2c_write(hdmi, 0x8029, 0x09);
+ } else {
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x06);
+ dw_hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x10);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0002, 0x19);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x0e);
+ dw_hdmi_phy_i2c_write(hdmi, 0x802b, 0x09);
+ }
+
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x1e);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x13);
+ dw_hdmi_phy_i2c_write(hdmi, 0x0000, 0x17);
+
+ dw_hdmi_phy_gen2_txpwron(hdmi, 1);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy,
+ unsigned int clk_rate)
+{
+ u32 pll_cfg1_init;
+ u32 pll_cfg2_init;
+ u32 ana_cfg1_end;
+ u32 ana_cfg2_init;
+ u32 ana_cfg3_init;
+ u32 b_offset = 0;
+ u32 val;
+
+ /* bandwidth / frequency independent settings */
+
+ pll_cfg1_init = SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN |
+ SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN |
+ SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(7) |
+ SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(1) |
+ SUN8I_HDMI_PHY_PLL_CFG1_PLLDBEN |
+ SUN8I_HDMI_PHY_PLL_CFG1_CS |
+ SUN8I_HDMI_PHY_PLL_CFG1_CP_S(2) |
+ SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(63) |
+ SUN8I_HDMI_PHY_PLL_CFG1_BWS;
+
+ pll_cfg2_init = SUN8I_HDMI_PHY_PLL_CFG2_SV_H |
+ SUN8I_HDMI_PHY_PLL_CFG2_VCOGAIN_EN |
+ SUN8I_HDMI_PHY_PLL_CFG2_SDIV2;
+
+ ana_cfg1_end = SUN8I_HDMI_PHY_ANA_CFG1_REG_SVBH(1) |
+ SUN8I_HDMI_PHY_ANA_CFG1_AMP_OPT |
+ SUN8I_HDMI_PHY_ANA_CFG1_EMP_OPT |
+ SUN8I_HDMI_PHY_ANA_CFG1_AMPCK_OPT |
+ SUN8I_HDMI_PHY_ANA_CFG1_EMPCK_OPT |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG |
+ SUN8I_HDMI_PHY_ANA_CFG1_REG_SCKTMDS |
+ SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN |
+ SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK |
+ SUN8I_HDMI_PHY_ANA_CFG1_TXEN_ALL |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_CKEN |
+ SUN8I_HDMI_PHY_ANA_CFG1_LDOEN |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENVBS |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENBI;
+
+ ana_cfg2_init = SUN8I_HDMI_PHY_ANA_CFG2_M_EN |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_DENCK |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_DEN |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_CKSS(1) |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_CSMPS(1);
+
+ ana_cfg3_init = SUN8I_HDMI_PHY_ANA_CFG3_REG_WIRE(0x3e0) |
+ SUN8I_HDMI_PHY_ANA_CFG3_SDAEN |
+ SUN8I_HDMI_PHY_ANA_CFG3_SCLEN;
+
+ /* bandwidth / frequency dependent settings */
+ if (clk_rate <= 27000000) {
+ pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
+ SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
+ pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
+ SUN8I_HDMI_PHY_PLL_CFG2_S(4);
+ ana_cfg1_end |= SUN8I_HDMI_PHY_ANA_CFG1_REG_CALSW;
+ ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4) |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_RESDI(phy->rcal);
+ ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(3) |
+ SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(5);
+ } else if (clk_rate <= 74250000) {
+ pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
+ SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
+ pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
+ SUN8I_HDMI_PHY_PLL_CFG2_S(5);
+ ana_cfg1_end |= SUN8I_HDMI_PHY_ANA_CFG1_REG_CALSW;
+ ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4) |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_RESDI(phy->rcal);
+ ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(5) |
+ SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(7);
+ } else if (clk_rate <= 148500000) {
+ pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33 |
+ SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(32);
+ pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(4) |
+ SUN8I_HDMI_PHY_PLL_CFG2_S(6);
+ ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSWCK |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(2);
+ ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(7) |
+ SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(9);
+ } else {
+ b_offset = 2;
+ pll_cfg1_init |= SUN8I_HDMI_PHY_PLL_CFG1_CNT_INT(63);
+ pll_cfg2_init |= SUN8I_HDMI_PHY_PLL_CFG2_VCO_S(6) |
+ SUN8I_HDMI_PHY_PLL_CFG2_S(7);
+ ana_cfg2_init |= SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSWCK |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW |
+ SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4);
+ ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(9) |
+ SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13);
+ }
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
+
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
+ (u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
+ pll_cfg2_init);
+ usleep_range(10000, 15000);
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG3_REG,
+ SUN8I_HDMI_PHY_PLL_CFG3_SOUT_DIV2);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_PLLEN,
+ SUN8I_HDMI_PHY_PLL_CFG1_PLLEN);
+ msleep(100);
+
+ /* get B value */
+ regmap_read(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, &val);
+ val = (val & SUN8I_HDMI_PHY_ANA_STS_B_OUT_MSK) >>
+ SUN8I_HDMI_PHY_ANA_STS_B_OUT_SHIFT;
+ val = min(val + b_offset, (u32)0x3f);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_REG_OD1 |
+ SUN8I_HDMI_PHY_PLL_CFG1_REG_OD,
+ SUN8I_HDMI_PHY_PLL_CFG1_REG_OD1 |
+ SUN8I_HDMI_PHY_PLL_CFG1_REG_OD);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+ SUN8I_HDMI_PHY_PLL_CFG1_B_IN_MSK,
+ val << SUN8I_HDMI_PHY_PLL_CFG1_B_IN_SHIFT);
+ msleep(100);
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, ana_cfg1_end);
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG2_REG, ana_cfg2_init);
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG3_REG, ana_cfg3_init);
+
+ return 0;
+}
+
+static int sun8i_hdmi_phy_config(struct dw_hdmi *hdmi, void *data,
+ struct drm_display_mode *mode)
+{
+ struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data;
+ u32 val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NHSYNC;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= SUN8I_HDMI_PHY_DBG_CTRL_POL_NVSYNC;
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
+ SUN8I_HDMI_PHY_DBG_CTRL_POL_MASK, val);
+
+ if (phy->variant->has_phy_clk)
+ clk_set_rate(phy->clk_phy, mode->crtc_clock * 1000);
+
+ return phy->variant->phy_config(hdmi, phy, mode->crtc_clock * 1000);
+};
+
+static void sun8i_hdmi_phy_disable_a83t(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy)
+{
+ dw_hdmi_phy_gen2_txpwron(hdmi, 0);
+ dw_hdmi_phy_gen2_pddq(hdmi, 1);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN, 0);
+}
+
+static void sun8i_hdmi_phy_disable_h3(struct dw_hdmi *hdmi,
+ struct sun8i_hdmi_phy *phy)
+{
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_LDOEN |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENVBS |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENBI);
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, 0);
+}
+
+static void sun8i_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data)
+{
+ struct sun8i_hdmi_phy *phy = (struct sun8i_hdmi_phy *)data;
+
+ phy->variant->phy_disable(hdmi, phy);
+}
+
+static const struct dw_hdmi_phy_ops sun8i_hdmi_phy_ops = {
+ .init = &sun8i_hdmi_phy_config,
+ .disable = &sun8i_hdmi_phy_disable,
+ .read_hpd = &dw_hdmi_phy_read_hpd,
+ .update_hpd = &dw_hdmi_phy_update_hpd,
+ .setup_hpd = &dw_hdmi_phy_setup_hpd,
+};
+
+static void sun8i_hdmi_phy_init_a83t(struct sun8i_hdmi_phy *phy)
+{
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
+ SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK,
+ SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK);
+
+ /*
+ * Set PHY I2C address. It must match to the address set by
+ * dw_hdmi_phy_set_slave_addr().
+ */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
+ SUN8I_HDMI_PHY_DBG_CTRL_ADDR_MASK,
+ SUN8I_HDMI_PHY_DBG_CTRL_ADDR(I2C_ADDR));
+}
+
+static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
+{
+ unsigned int val;
+
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, 0);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENBI,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENBI);
+ udelay(5);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN,
+ SUN8I_HDMI_PHY_ANA_CFG1_TMDSCLK_EN);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENVBS,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENVBS);
+ usleep_range(10, 20);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_LDOEN,
+ SUN8I_HDMI_PHY_ANA_CFG1_LDOEN);
+ udelay(5);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_CKEN,
+ SUN8I_HDMI_PHY_ANA_CFG1_CKEN);
+ usleep_range(40, 100);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENRCAL);
+ usleep_range(100, 200);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENCALOG);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDS2);
+
+ /* wait for calibration to finish */
+ regmap_read_poll_timeout(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, val,
+ (val & SUN8I_HDMI_PHY_ANA_STS_RCALEND2D),
+ 100, 2000);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDSCLK,
+ SUN8I_HDMI_PHY_ANA_CFG1_ENP2S_TMDSCLK);
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK,
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS0 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS1 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDS2 |
+ SUN8I_HDMI_PHY_ANA_CFG1_BIASEN_TMDSCLK);
+
+ /* enable DDC communication */
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG3_REG,
+ SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
+ SUN8I_HDMI_PHY_ANA_CFG3_SDAEN,
+ SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
+ SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
+
+ /* set HW control of CEC pins */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
+
+ /* read calibration data */
+ regmap_read(phy->regs, SUN8I_HDMI_PHY_ANA_STS_REG, &val);
+ phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
+}
+
+void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+{
+ /* enable read access to HDMI controller */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
+ SUN8I_HDMI_PHY_READ_EN_MAGIC);
+
+ /* unscramble register offsets */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
+ SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
+
+ phy->variant->phy_init(phy);
+}
+
+const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void)
+{
+ return &sun8i_hdmi_phy_ops;
+}
+
+static struct regmap_config sun8i_hdmi_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SUN8I_HDMI_PHY_CEC_REG,
+ .name = "phy"
+};
+
+static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
+ .phy_init = &sun8i_hdmi_phy_init_a83t,
+ .phy_disable = &sun8i_hdmi_phy_disable_a83t,
+ .phy_config = &sun8i_hdmi_phy_config_a83t,
+};
+
+static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
+ .has_phy_clk = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
+static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
+ {
+ .compatible = "allwinner,sun8i-a83t-hdmi-phy",
+ .data = &sun8i_a83t_hdmi_phy,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-hdmi-phy",
+ .data = &sun8i_h3_hdmi_phy,
+ },
+ { /* sentinel */ }
+};
+
+int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+ const struct of_device_id *match;
+ struct device *dev = hdmi->dev;
+ struct sun8i_hdmi_phy *phy;
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ match = of_match_node(sun8i_hdmi_phy_of_table, node);
+ if (!match) {
+ dev_err(dev, "Incompatible HDMI PHY\n");
+ return -EINVAL;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "phy: Couldn't get our resources\n");
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "Couldn't map the HDMI PHY registers\n");
+ return PTR_ERR(regs);
+ }
+
+ phy->regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_hdmi_phy_regmap_config);
+ if (IS_ERR(phy->regs)) {
+ dev_err(dev, "Couldn't create the HDMI PHY regmap\n");
+ return PTR_ERR(phy->regs);
+ }
+
+ phy->clk_bus = of_clk_get_by_name(node, "bus");
+ if (IS_ERR(phy->clk_bus)) {
+ dev_err(dev, "Could not get bus clock\n");
+ return PTR_ERR(phy->clk_bus);
+ }
+
+ phy->clk_mod = of_clk_get_by_name(node, "mod");
+ if (IS_ERR(phy->clk_mod)) {
+ dev_err(dev, "Could not get mod clock\n");
+ ret = PTR_ERR(phy->clk_mod);
+ goto err_put_clk_bus;
+ }
+
+ if (phy->variant->has_phy_clk) {
+ phy->clk_pll0 = of_clk_get_by_name(node, "pll-0");
+ if (IS_ERR(phy->clk_pll0)) {
+ dev_err(dev, "Could not get pll-0 clock\n");
+ ret = PTR_ERR(phy->clk_pll0);
+ goto err_put_clk_mod;
+ }
+
+ ret = sun8i_phy_clk_create(phy, dev);
+ if (ret) {
+ dev_err(dev, "Couldn't create the PHY clock\n");
+ goto err_put_clk_pll0;
+ }
+ }
+
+ phy->rst_phy = of_reset_control_get_shared(node, "phy");
+ if (IS_ERR(phy->rst_phy)) {
+ dev_err(dev, "Could not get phy reset control\n");
+ ret = PTR_ERR(phy->rst_phy);
+ goto err_put_clk_pll0;
+ }
+
+ ret = reset_control_deassert(phy->rst_phy);
+ if (ret) {
+ dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
+ goto err_put_rst_phy;
+ }
+
+ ret = clk_prepare_enable(phy->clk_bus);
+ if (ret) {
+ dev_err(dev, "Cannot enable bus clock: %d\n", ret);
+ goto err_deassert_rst_phy;
+ }
+
+ ret = clk_prepare_enable(phy->clk_mod);
+ if (ret) {
+ dev_err(dev, "Cannot enable mod clock: %d\n", ret);
+ goto err_disable_clk_bus;
+ }
+
+ hdmi->phy = phy;
+
+ return 0;
+
+err_disable_clk_bus:
+ clk_disable_unprepare(phy->clk_bus);
+err_deassert_rst_phy:
+ reset_control_assert(phy->rst_phy);
+err_put_rst_phy:
+ reset_control_put(phy->rst_phy);
+err_put_clk_pll0:
+ if (phy->variant->has_phy_clk)
+ clk_put(phy->clk_pll0);
+err_put_clk_mod:
+ clk_put(phy->clk_mod);
+err_put_clk_bus:
+ clk_put(phy->clk_bus);
+
+ return ret;
+}
+
+void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+{
+ struct sun8i_hdmi_phy *phy = hdmi->phy;
+
+ clk_disable_unprepare(phy->clk_mod);
+ clk_disable_unprepare(phy->clk_bus);
+
+ reset_control_assert(phy->rst_phy);
+
+ reset_control_put(phy->rst_phy);
+
+ if (phy->variant->has_phy_clk)
+ clk_put(phy->clk_pll0);
+ clk_put(phy->clk_mod);
+ clk_put(phy->clk_bus);
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
new file mode 100644
index 0000000..faea449
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#include <linux/clk-provider.h>
+
+#include "sun8i_dw_hdmi.h"
+
+struct sun8i_phy_clk {
+ struct clk_hw hw;
+ struct sun8i_hdmi_phy *phy;
+};
+
+static inline struct sun8i_phy_clk *hw_to_phy_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct sun8i_phy_clk, hw);
+}
+
+static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long rate = req->rate;
+ unsigned long best_rate = 0;
+ struct clk_hw *parent;
+ int best_div = 1;
+ int i;
+
+ parent = clk_hw_get_parent(hw);
+
+ for (i = 1; i <= 16; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_rate = rounded;
+ best_div = i;
+ break;
+ }
+
+ if (!best_rate ||
+ abs(rate - rounded / i) <
+ abs(rate - best_rate / best_div)) {
+ best_rate = rounded;
+ best_div = i;
+ }
+ }
+
+ req->rate = best_rate / best_div;
+ req->best_parent_rate = best_rate;
+ req->best_parent_hw = parent;
+
+ return 0;
+}
+
+static unsigned long sun8i_phy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+ u32 reg;
+
+ regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG, &reg);
+ reg = ((reg >> SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_SHIFT) &
+ SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK) + 1;
+
+ return parent_rate / reg;
+}
+
+static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+ unsigned long best_rate = 0;
+ u8 best_m = 0, m;
+
+ for (m = 1; m <= 16; m++) {
+ unsigned long tmp_rate = parent_rate / m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if (!best_rate ||
+ (rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_m = m;
+ }
+ }
+
+ regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
+ SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
+ SUN8I_HDMI_PHY_PLL_CFG2_PREDIV(best_m));
+
+ return 0;
+}
+
+static const struct clk_ops sun8i_phy_clk_ops = {
+ .determine_rate = sun8i_phy_clk_determine_rate,
+ .recalc_rate = sun8i_phy_clk_recalc_rate,
+ .set_rate = sun8i_phy_clk_set_rate,
+};
+
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+{
+ struct clk_init_data init;
+ struct sun8i_phy_clk *priv;
+ const char *parents[1];
+
+ parents[0] = __clk_get_name(phy->clk_pll0);
+ if (!parents[0])
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ init.name = "hdmi-phy-clk";
+ init.ops = &sun8i_phy_clk_ops;
+ init.parent_names = parents;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ priv->phy = phy;
+ priv->hw.init = &init;
+
+ phy->clk_phy = devm_clk_register(dev, &priv->hw);
+ if (IS_ERR(phy->clk_phy))
+ return PTR_ERR(phy->clk_phy);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_layer.c b/drivers/gpu/drm/sun4i/sun8i_layer.c
deleted file mode 100644
index 23810ff..0000000
--- a/drivers/gpu/drm/sun4i/sun8i_layer.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) Icenowy Zheng <icenowy@aosc.io>
- *
- * Based on sun4i_layer.h, which is:
- * Copyright (C) 2015 Free Electrons
- * Copyright (C) 2015 NextThing Co
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drmP.h>
-
-#include "sun8i_layer.h"
-#include "sun8i_mixer.h"
-
-struct sun8i_plane_desc {
- enum drm_plane_type type;
- const uint32_t *formats;
- uint32_t nformats;
-};
-
-static void sun8i_mixer_layer_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
- struct sun8i_mixer *mixer = layer->mixer;
-
- sun8i_mixer_layer_enable(mixer, layer->id, false);
-}
-
-static void sun8i_mixer_layer_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
- struct sun8i_mixer *mixer = layer->mixer;
-
- sun8i_mixer_update_layer_coord(mixer, layer->id, plane);
- sun8i_mixer_update_layer_formats(mixer, layer->id, plane);
- sun8i_mixer_update_layer_buffer(mixer, layer->id, plane);
- sun8i_mixer_layer_enable(mixer, layer->id, true);
-}
-
-static struct drm_plane_helper_funcs sun8i_mixer_layer_helper_funcs = {
- .atomic_disable = sun8i_mixer_layer_atomic_disable,
- .atomic_update = sun8i_mixer_layer_atomic_update,
-};
-
-static const struct drm_plane_funcs sun8i_mixer_layer_funcs = {
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .destroy = drm_plane_cleanup,
- .disable_plane = drm_atomic_helper_disable_plane,
- .reset = drm_atomic_helper_plane_reset,
- .update_plane = drm_atomic_helper_update_plane,
-};
-
-static const uint32_t sun8i_mixer_layer_formats[] = {
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
-};
-
-static const struct sun8i_plane_desc sun8i_mixer_planes[] = {
- {
- .type = DRM_PLANE_TYPE_PRIMARY,
- .formats = sun8i_mixer_layer_formats,
- .nformats = ARRAY_SIZE(sun8i_mixer_layer_formats),
- },
-};
-
-static struct sun8i_layer *sun8i_layer_init_one(struct drm_device *drm,
- struct sun8i_mixer *mixer,
- const struct sun8i_plane_desc *plane)
-{
- struct sun8i_layer *layer;
- int ret;
-
- layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
- if (!layer)
- return ERR_PTR(-ENOMEM);
-
- /* possible crtcs are set later */
- ret = drm_universal_plane_init(drm, &layer->plane, 0,
- &sun8i_mixer_layer_funcs,
- plane->formats, plane->nformats,
- NULL, plane->type, NULL);
- if (ret) {
- dev_err(drm->dev, "Couldn't initialize layer\n");
- return ERR_PTR(ret);
- }
-
- drm_plane_helper_add(&layer->plane,
- &sun8i_mixer_layer_helper_funcs);
- layer->mixer = mixer;
-
- return layer;
-}
-
-struct drm_plane **sun8i_layers_init(struct drm_device *drm,
- struct sunxi_engine *engine)
-{
- struct drm_plane **planes;
- struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
- int i;
-
- planes = devm_kcalloc(drm->dev, ARRAY_SIZE(sun8i_mixer_planes) + 1,
- sizeof(*planes), GFP_KERNEL);
- if (!planes)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < ARRAY_SIZE(sun8i_mixer_planes); i++) {
- const struct sun8i_plane_desc *plane = &sun8i_mixer_planes[i];
- struct sun8i_layer *layer;
-
- layer = sun8i_layer_init_one(drm, mixer, plane);
- if (IS_ERR(layer)) {
- dev_err(drm->dev, "Couldn't initialize %s plane\n",
- i ? "overlay" : "primary");
- return ERR_CAST(layer);
- };
-
- layer->id = i;
- planes[i] = &layer->plane;
- };
-
- return planes;
-}
diff --git a/drivers/gpu/drm/sun4i/sun8i_layer.h b/drivers/gpu/drm/sun4i/sun8i_layer.h
deleted file mode 100644
index e5eccd2..0000000
--- a/drivers/gpu/drm/sun4i/sun8i_layer.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) Icenowy Zheng <icenowy@aosc.io>
- *
- * Based on sun4i_layer.h, which is:
- * Copyright (C) 2015 Free Electrons
- * Copyright (C) 2015 NextThing Co
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- */
-
-#ifndef _SUN8I_LAYER_H_
-#define _SUN8I_LAYER_H_
-
-struct sunxi_engine;
-
-struct sun8i_layer {
- struct drm_plane plane;
- struct sun4i_drv *drv;
- struct sun8i_mixer *mixer;
- int id;
-};
-
-static inline struct sun8i_layer *
-plane_to_sun8i_layer(struct drm_plane *plane)
-{
- return container_of(plane, struct sun8i_layer, plane);
-}
-
-struct drm_plane **sun8i_layers_init(struct drm_device *drm,
- struct sunxi_engine *engine);
-#endif /* _SUN8I_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index cb193c5..126899d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -26,204 +26,288 @@
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
-#include "sun8i_layer.h"
+#include "sun8i_ui_layer.h"
+#include "sun8i_vi_layer.h"
#include "sunxi_engine.h"
-static void sun8i_mixer_commit(struct sunxi_engine *engine)
-{
- DRM_DEBUG_DRIVER("Committing changes\n");
-
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
- SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
-}
-
-void sun8i_mixer_layer_enable(struct sun8i_mixer *mixer,
- int layer, bool enable)
-{
- u32 val;
- /* Currently the first UI channel is used */
- int chan = mixer->cfg->vi_num;
-
- DRM_DEBUG_DRIVER("Enabling layer %d in channel %d\n", layer, chan);
-
- if (enable)
- val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN;
- else
- val = 0;
-
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(chan, layer),
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
-
- /* Set the alpha configuration */
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(chan, layer),
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_DEF);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(chan, layer),
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_DEF);
-}
-
-static int sun8i_mixer_drm_format_to_layer(struct drm_plane *plane,
- u32 format, u32 *mode)
-{
- switch (format) {
- case DRM_FORMAT_ARGB8888:
- *mode = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_ARGB8888;
- break;
-
- case DRM_FORMAT_XRGB8888:
- *mode = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_XRGB8888;
- break;
-
- case DRM_FORMAT_RGB888:
- *mode = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_RGB888;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
+static const struct de2_fmt_info de2_formats[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBX8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRX8888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .de2_fmt = SUN8I_MIXER_FBFMT_NV16,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .de2_fmt = SUN8I_MIXER_FBFMT_NV61,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .de2_fmt = SUN8I_MIXER_FBFMT_NV12,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .de2_fmt = SUN8I_MIXER_FBFMT_NV21,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YUV411,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_YVU2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YVU2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YVU2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_YVU411,
+ .de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YVU2RGB,
+ },
+};
-int sun8i_mixer_update_layer_coord(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane)
+const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
{
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- /* Currently the first UI channel is used */
- int chan = mixer->cfg->vi_num;
-
- DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
- state->crtc_w, state->crtc_h);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_SIZE,
- SUN8I_MIXER_SIZE(state->crtc_w,
- state->crtc_h));
- DRM_DEBUG_DRIVER("Updating blender size\n");
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(0),
- SUN8I_MIXER_SIZE(state->crtc_w,
- state->crtc_h));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTSIZE,
- SUN8I_MIXER_SIZE(state->crtc_w,
- state->crtc_h));
- DRM_DEBUG_DRIVER("Updating channel size\n");
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_OVL_SIZE(chan),
- SUN8I_MIXER_SIZE(state->crtc_w,
- state->crtc_h));
- }
+ unsigned int i;
- /* Set the line width */
- DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_PITCH(chan, layer),
- fb->pitches[0]);
-
- /* Set height and width */
- DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
- state->crtc_w, state->crtc_h);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_SIZE(chan, layer),
- SUN8I_MIXER_SIZE(state->crtc_w, state->crtc_h));
-
- /* Set base coordinates */
- DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
- state->crtc_x, state->crtc_y);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_COORD(chan, layer),
- SUN8I_MIXER_COORD(state->crtc_x, state->crtc_y));
+ for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
+ if (de2_formats[i].drm_fmt == format)
+ return &de2_formats[i];
- return 0;
+ return NULL;
}
-int sun8i_mixer_update_layer_formats(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane)
+static void sun8i_mixer_commit(struct sunxi_engine *engine)
{
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- bool interlaced = false;
- u32 val;
- /* Currently the first UI channel is used */
- int chan = mixer->cfg->vi_num;
- int ret;
-
- if (plane->state->crtc)
- interlaced = plane->state->crtc->state->adjusted_mode.flags
- & DRM_MODE_FLAG_INTERLACE;
-
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTCTL,
- SUN8I_MIXER_BLEND_OUTCTL_INTERLACED,
- interlaced ?
- SUN8I_MIXER_BLEND_OUTCTL_INTERLACED : 0);
-
- DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
- interlaced ? "on" : "off");
-
- ret = sun8i_mixer_drm_format_to_layer(plane, fb->format->format,
- &val);
- if (ret) {
- DRM_DEBUG_DRIVER("Invalid format\n");
- return ret;
- }
-
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(chan, layer),
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
+ DRM_DEBUG_DRIVER("Committing changes\n");
- return 0;
+ regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
+ SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
-int sun8i_mixer_update_layer_buffer(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane)
+static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
+ struct sunxi_engine *engine)
{
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
- dma_addr_t paddr;
- /* Currently the first UI channel is used */
- int chan = mixer->cfg->vi_num;
- int bpp;
-
- /* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
-
- /* Compute the start of the displayed memory */
- bpp = fb->format->cpp[0];
- paddr = gem->paddr + fb->offsets[0];
-
- /* Fixup framebuffer address for src coordinates */
- paddr += (state->src_x >> 16) * bpp;
- paddr += (state->src_y >> 16) * fb->pitches[0];
-
- /*
- * The hardware cannot correctly deal with negative crtc
- * coordinates, the display is cropped to the requested size,
- * but the display content is not moved.
- * Manually move the display content by fixup the framebuffer
- * address when crtc_x or crtc_y is negative, like what we
- * have did for src_x and src_y.
- */
- if (state->crtc_x < 0)
- paddr += -state->crtc_x * bpp;
- if (state->crtc_y < 0)
- paddr += -state->crtc_y * fb->pitches[0];
-
- DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
-
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(chan, layer),
- lower_32_bits(paddr));
-
- return 0;
+ struct drm_plane **planes;
+ struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ int i;
+
+ planes = devm_kcalloc(drm->dev,
+ mixer->cfg->vi_num + mixer->cfg->ui_num + 1,
+ sizeof(*planes), GFP_KERNEL);
+ if (!planes)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < mixer->cfg->vi_num; i++) {
+ struct sun8i_vi_layer *layer;
+
+ layer = sun8i_vi_layer_init_one(drm, mixer, i);
+ if (IS_ERR(layer)) {
+ dev_err(drm->dev,
+ "Couldn't initialize overlay plane\n");
+ return ERR_CAST(layer);
+ };
+
+ planes[i] = &layer->plane;
+ };
+
+ for (i = 0; i < mixer->cfg->ui_num; i++) {
+ struct sun8i_ui_layer *layer;
+
+ layer = sun8i_ui_layer_init_one(drm, mixer, i);
+ if (IS_ERR(layer)) {
+ dev_err(drm->dev, "Couldn't initialize %s plane\n",
+ i ? "overlay" : "primary");
+ return ERR_CAST(layer);
+ };
+
+ planes[mixer->cfg->vi_num + i] = &layer->plane;
+ };
+
+ return planes;
}
static const struct sunxi_engine_ops sun8i_engine_ops = {
@@ -247,6 +331,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun8i_mixer *mixer;
struct resource *res;
void __iomem *regs;
+ int plane_cnt;
int i, ret;
/*
@@ -313,6 +398,15 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
ret = PTR_ERR(mixer->mod_clk);
goto err_disable_bus_clk;
}
+
+ /*
+ * It seems that we need to enforce that rate for whatever
+ * reason for the mixer to be functional. Make sure it's the
+ * case.
+ */
+ if (mixer->cfg->mod_rate)
+ clk_set_rate(mixer->mod_clk, mixer->cfg->mod_rate);
+
clk_prepare_enable(mixer->mod_clk);
list_add_tail(&mixer->engine.list, &drv->engine_list);
@@ -325,27 +419,26 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
SUN8I_MIXER_GLOBAL_CTL_RT_EN);
- /* Initialize blender */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_FCOLOR_CTL,
- SUN8I_MIXER_BLEND_FCOLOR_CTL_DEF);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PREMULTIPLY,
- SUN8I_MIXER_BLEND_PREMULTIPLY_DEF);
+ /* Set background color to black */
regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR,
- SUN8I_MIXER_BLEND_BKCOLOR_DEF);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(0),
- SUN8I_MIXER_BLEND_MODE_DEF);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_CK_CTL,
- SUN8I_MIXER_BLEND_CK_CTL_DEF);
-
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
- SUN8I_MIXER_BLEND_ATTR_FCOLOR_DEF);
-
- /* Select the first UI channel */
- DRM_DEBUG_DRIVER("Selecting channel %d (first UI channel)\n",
- mixer->cfg->vi_num);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE,
- mixer->cfg->vi_num);
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /*
+ * Set fill color of bottom plane to black. Generally not needed
+ * except when VI plane is at bottom (zpos = 0) and enabled.
+ */
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /* Fixed zpos for now */
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210);
+
+ plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
+ for (i = 0; i < plane_cnt; i++)
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
+ SUN8I_MIXER_BLEND_MODE_DEF);
return 0;
@@ -385,13 +478,50 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
+ .ccsc = 0,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
+ .ccsc = 1,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 432000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
+ .scaler_mask = 0x3,
+ .ccsc = 0,
+ .mod_rate = 150000000,
};
static const struct of_device_id sun8i_mixer_of_table[] = {
{
+ .compatible = "allwinner,sun8i-a83t-de2-mixer-0",
+ .data = &sun8i_a83t_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun8i-a83t-de2-mixer-1",
+ .data = &sun8i_a83t_mixer1_cfg,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-de2-mixer-0",
+ .data = &sun8i_h3_mixer0_cfg,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 4785ac0..f34e70c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -14,10 +14,9 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "sun8i_csc.h"
#include "sunxi_engine.h"
-#define SUN8I_MIXER_MAX_CHAN_COUNT 4
-
#define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
#define SUN8I_MIXER_COORD(x, y) ((y) << 16 | (x))
@@ -26,14 +25,14 @@
#define SUN8I_MIXER_GLOBAL_DBUFF 0x8
#define SUN8I_MIXER_GLOBAL_SIZE 0xc
-#define SUN8I_MIXER_GLOBAL_CTL_RT_EN 0x1
+#define SUN8I_MIXER_GLOBAL_CTL_RT_EN BIT(0)
-#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE 0x1
+#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
-#define SUN8I_MIXER_BLEND_FCOLOR_CTL 0x1000
+#define SUN8I_MIXER_BLEND_PIPE_CTL 0x1000
#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(x) (0x1004 + 0x10 * (x) + 0x0)
#define SUN8I_MIXER_BLEND_ATTR_INSIZE(x) (0x1004 + 0x10 * (x) + 0x4)
-#define SUN8I_MIXER_BLEND_ATTR_OFFSET(x) (0x1004 + 0x10 * (x) + 0x8)
+#define SUN8I_MIXER_BLEND_ATTR_COORD(x) (0x1004 + 0x10 * (x) + 0x8)
#define SUN8I_MIXER_BLEND_ROUTE 0x1080
#define SUN8I_MIXER_BLEND_PREMULTIPLY 0x1084
#define SUN8I_MIXER_BLEND_BKCOLOR 0x1088
@@ -45,57 +44,56 @@
#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
+#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
+/* colors are always in AARRGGBB format */
+#define SUN8I_MIXER_BLEND_COLOR_BLACK 0xff000000
/* The following numbers are some still unknown magic numbers */
-#define SUN8I_MIXER_BLEND_ATTR_FCOLOR_DEF 0xff000000
-#define SUN8I_MIXER_BLEND_FCOLOR_CTL_DEF 0x00000101
-#define SUN8I_MIXER_BLEND_PREMULTIPLY_DEF 0x0
-#define SUN8I_MIXER_BLEND_BKCOLOR_DEF 0xff000000
#define SUN8I_MIXER_BLEND_MODE_DEF 0x03010301
-#define SUN8I_MIXER_BLEND_CK_CTL_DEF 0x0
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
-/*
- * VI channels are not used now, but the support of them may be introduced in
- * the future.
- */
-
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0xc)
-#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x10)
-#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x14)
-#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x18)
-#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x80)
-#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x84)
-#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0x88)
-
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(11, 8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_DEF (1 << 1)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_ARGB8888 (0 << 8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_XRGB8888 (4 << 8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_RGB888 (8 << 8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_DEF (0xff << 24)
+#define SUN8I_MIXER_FBFMT_ARGB8888 0
+#define SUN8I_MIXER_FBFMT_ABGR8888 1
+#define SUN8I_MIXER_FBFMT_RGBA8888 2
+#define SUN8I_MIXER_FBFMT_BGRA8888 3
+#define SUN8I_MIXER_FBFMT_XRGB8888 4
+#define SUN8I_MIXER_FBFMT_XBGR8888 5
+#define SUN8I_MIXER_FBFMT_RGBX8888 6
+#define SUN8I_MIXER_FBFMT_BGRX8888 7
+#define SUN8I_MIXER_FBFMT_RGB888 8
+#define SUN8I_MIXER_FBFMT_BGR888 9
+#define SUN8I_MIXER_FBFMT_RGB565 10
+#define SUN8I_MIXER_FBFMT_BGR565 11
+#define SUN8I_MIXER_FBFMT_ARGB4444 12
+#define SUN8I_MIXER_FBFMT_ABGR4444 13
+#define SUN8I_MIXER_FBFMT_RGBA4444 14
+#define SUN8I_MIXER_FBFMT_BGRA4444 15
+#define SUN8I_MIXER_FBFMT_ARGB1555 16
+#define SUN8I_MIXER_FBFMT_ABGR1555 17
+#define SUN8I_MIXER_FBFMT_RGBA5551 18
+#define SUN8I_MIXER_FBFMT_BGRA5551 19
+
+#define SUN8I_MIXER_FBFMT_YUYV 0
+#define SUN8I_MIXER_FBFMT_UYVY 1
+#define SUN8I_MIXER_FBFMT_YVYU 2
+#define SUN8I_MIXER_FBFMT_VYUY 3
+#define SUN8I_MIXER_FBFMT_NV16 4
+#define SUN8I_MIXER_FBFMT_NV61 5
+#define SUN8I_MIXER_FBFMT_YUV422 6
+/* format 7 doesn't exist */
+#define SUN8I_MIXER_FBFMT_NV12 8
+#define SUN8I_MIXER_FBFMT_NV21 9
+#define SUN8I_MIXER_FBFMT_YUV420 10
+/* format 11 doesn't exist */
+/* format 12 is semi-planar YUV411 UVUV */
+/* format 13 is semi-planar YUV411 VUVU */
+#define SUN8I_MIXER_FBFMT_YUV411 14
/*
* These sub-engines are still unknown now, the EN registers are here only to
* be used to disable these sub-engines.
*/
-#define SUN8I_MIXER_VSU_EN 0x20000
-#define SUN8I_MIXER_GSU1_EN 0x30000
-#define SUN8I_MIXER_GSU2_EN 0x40000
-#define SUN8I_MIXER_GSU3_EN 0x50000
#define SUN8I_MIXER_FCE_EN 0xa0000
#define SUN8I_MIXER_BWS_EN 0xa2000
#define SUN8I_MIXER_LTI_EN 0xa4000
@@ -104,9 +102,34 @@
#define SUN8I_MIXER_FCC_EN 0xaa000
#define SUN8I_MIXER_DCSC_EN 0xb0000
+struct de2_fmt_info {
+ u32 drm_fmt;
+ u32 de2_fmt;
+ bool rgb;
+ enum sun8i_csc_mode csc;
+};
+
+/**
+ * struct sun8i_mixer_cfg - mixer HW configuration
+ * @vi_num: number of VI channels
+ * @ui_num: number of UI channels
+ * @scaler_mask: bitmask which tells which channel supports scaling
+ * First, scaler supports for VI channels is defined and after that, scaler
+ * support for UI channels. For example, if mixer has 2 VI channels without
+ * scaler and 2 UI channels with scaler, bitmask would be 0xC.
+ * @ccsc: select set of CCSC base addresses
+ * Set value to 0 if this is first mixer or second mixer with VEP support.
+ * Set value to 1 if this is second mixer without VEP support. Other values
+ * are invalid.
+ * @mod_rate: module clock rate that needs to be set in order to have
+ * a functional block.
+ */
struct sun8i_mixer_cfg {
int vi_num;
int ui_num;
+ int scaler_mask;
+ int ccsc;
+ unsigned long mod_rate;
};
struct sun8i_mixer {
@@ -126,12 +149,5 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
return container_of(engine, struct sun8i_mixer, engine);
}
-void sun8i_mixer_layer_enable(struct sun8i_mixer *mixer,
- int layer, bool enable);
-int sun8i_mixer_update_layer_coord(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane);
-int sun8i_mixer_update_layer_formats(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane);
-int sun8i_mixer_update_layer_buffer(struct sun8i_mixer *mixer,
- int layer, struct drm_plane *plane);
+const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
new file mode 100644
index 0000000..9a54033
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on sun4i_layer.h, which is:
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "sun8i_ui_layer.h"
+#include "sun8i_mixer.h"
+#include "sun8i_ui_scaler.h"
+
+static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
+ int overlay, bool enable)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("%sabling channel %d overlay %d\n",
+ enable ? "En" : "Dis", channel, overlay);
+
+ if (enable)
+ val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN;
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
+
+ if (enable)
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+}
+
+static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ u32 src_w, src_h, dst_w, dst_h;
+ u32 outsize, insize;
+ u32 hphase, vphase;
+
+ DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
+ channel, overlay);
+
+ src_w = drm_rect_width(&state->src) >> 16;
+ src_h = drm_rect_height(&state->src) >> 16;
+ dst_w = drm_rect_width(&state->dst);
+ dst_h = drm_rect_height(&state->dst);
+
+ hphase = state->src.x1 & 0xffff;
+ vphase = state->src.y1 & 0xffff;
+
+ insize = SUN8I_MIXER_SIZE(src_w, src_h);
+ outsize = SUN8I_MIXER_SIZE(dst_w, dst_h);
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ bool interlaced = false;
+ u32 val;
+
+ DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
+ dst_w, dst_h);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_GLOBAL_SIZE,
+ outsize);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTSIZE,
+ outsize);
+
+ if (state->crtc)
+ interlaced = state->crtc->state->adjusted_mode.flags
+ & DRM_MODE_FLAG_INTERLACE;
+
+ if (interlaced)
+ val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_OUTCTL,
+ SUN8I_MIXER_BLEND_OUTCTL_INTERLACED,
+ val);
+
+ DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
+ interlaced ? "on" : "off");
+ }
+
+ /* Set height and width */
+ DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n",
+ state->src.x1 >> 16, state->src.y1 >> 16);
+ DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_SIZE(channel, overlay),
+ insize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_OVL_SIZE(channel),
+ insize);
+
+ if (insize != outsize || hphase || vphase) {
+ u32 hscale, vscale;
+
+ DRM_DEBUG_DRIVER("HW scaling is enabled\n");
+
+ hscale = state->src_w / state->crtc_w;
+ vscale = state->src_h / state->crtc_h;
+
+ sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
+ dst_h, hscale, vscale, hphase, vphase);
+ sun8i_ui_scaler_enable(mixer, channel, true);
+ } else {
+ DRM_DEBUG_DRIVER("HW scaling is not needed\n");
+ sun8i_ui_scaler_enable(mixer, channel, false);
+ }
+
+ /* Set base coordinates */
+ DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
+ state->dst.x1, state->dst.y1);
+ DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ outsize);
+
+ return 0;
+}
+
+static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ const struct de2_fmt_info *fmt_info;
+ u32 val;
+
+ fmt_info = sun8i_mixer_format_info(state->fb->format->format);
+ if (!fmt_info || !fmt_info->rgb) {
+ DRM_DEBUG_DRIVER("Invalid format\n");
+ return -EINVAL;
+ }
+
+ val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
+
+ return 0;
+}
+
+static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ dma_addr_t paddr;
+ int bpp;
+
+ /* Get the physical address of the buffer in memory */
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+
+ /* Compute the start of the displayed memory */
+ bpp = fb->format->cpp[0];
+ paddr = gem->paddr + fb->offsets[0];
+
+ /* Fixup framebuffer address for src coordinates */
+ paddr += (state->src.x1 >> 16) * bpp;
+ paddr += (state->src.y1 >> 16) * fb->pitches[0];
+
+ /* Set the line width */
+ DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_PITCH(channel, overlay),
+ fb->pitches[0]);
+
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(channel, overlay),
+ lower_32_bits(paddr));
+
+ return 0;
+}
+
+static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
+
+ if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
+ min_scale = SUN8I_UI_SCALER_SCALE_MIN;
+ max_scale = SUN8I_UI_SCALER_SCALE_MAX;
+ }
+
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+}
+
+static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ struct sun8i_mixer *mixer = layer->mixer;
+
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false);
+}
+
+static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+ struct sun8i_mixer *mixer = layer->mixer;
+
+ if (!plane->state->visible) {
+ sun8i_ui_layer_enable(mixer, layer->channel,
+ layer->overlay, false);
+ return;
+ }
+
+ sun8i_ui_layer_update_coord(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_ui_layer_update_formats(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_ui_layer_update_buffer(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true);
+}
+
+static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+ .atomic_check = sun8i_ui_layer_atomic_check,
+ .atomic_disable = sun8i_ui_layer_atomic_disable,
+ .atomic_update = sun8i_ui_layer_atomic_update,
+};
+
+static const struct drm_plane_funcs sun8i_ui_layer_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .destroy = drm_plane_cleanup,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .update_plane = drm_atomic_helper_update_plane,
+};
+
+static const u32 sun8i_ui_layer_formats[] = {
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
+ struct sun8i_mixer *mixer,
+ int index)
+{
+ enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
+ int channel = mixer->cfg->vi_num + index;
+ struct sun8i_ui_layer *layer;
+ int ret;
+
+ layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ if (index == 0)
+ type = DRM_PLANE_TYPE_PRIMARY;
+
+ /* possible crtcs are set later */
+ ret = drm_universal_plane_init(drm, &layer->plane, 0,
+ &sun8i_ui_layer_funcs,
+ sun8i_ui_layer_formats,
+ ARRAY_SIZE(sun8i_ui_layer_formats),
+ NULL, type, NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialize layer\n");
+ return ERR_PTR(ret);
+ }
+
+ /* fixed zpos for now */
+ ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add zpos property\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(&layer->plane, &sun8i_ui_layer_helper_funcs);
+ layer->mixer = mixer;
+ layer->channel = channel;
+ layer->overlay = 0;
+
+ return layer;
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
new file mode 100644
index 0000000..123b15e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on sun4i_layer.h, which is:
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN8I_UI_LAYER_H_
+#define _SUN8I_UI_LAYER_H_
+
+#include <drm/drm_plane.h>
+
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0xc)
+#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x10)
+#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x14)
+#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x18)
+#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x80)
+#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x84)
+#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0x88)
+
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET 8
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
+
+struct sun8i_mixer;
+
+struct sun8i_ui_layer {
+ struct drm_plane plane;
+ struct sun8i_mixer *mixer;
+ int channel;
+ int overlay;
+};
+
+static inline struct sun8i_ui_layer *
+plane_to_sun8i_ui_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct sun8i_ui_layer, plane);
+}
+
+struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
+ struct sun8i_mixer *mixer,
+ int index);
+#endif /* _SUN8I_UI_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
new file mode 100644
index 0000000..6bb2aa1
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2017 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Coefficients are taken from BSP driver, which is:
+ * Copyright (C) 2014-2015 Allwinner
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "sun8i_ui_scaler.h"
+
+static const u32 lan2coefftab16[240] = {
+ 0x00004000, 0x00033ffe, 0x00063efc, 0x000a3bfb,
+ 0xff0f37fb, 0xfe1433fb, 0xfd192ffb, 0xfd1f29fb,
+ 0xfc2424fc, 0xfb291ffd, 0xfb2f19fd, 0xfb3314fe,
+ 0xfb370fff, 0xfb3b0a00, 0xfc3e0600, 0xfe3f0300,
+
+ 0xff053804, 0xff083801, 0xff0a3700, 0xff0e34ff,
+ 0xff1232fd, 0xfe162ffd, 0xfd1b2cfc, 0xfd1f28fc,
+ 0xfd2323fd, 0xfc281ffd, 0xfc2c1bfd, 0xfd2f16fe,
+ 0xfd3212ff, 0xff340eff, 0x00360a00, 0x02370700,
+
+ 0xff083207, 0xff0a3205, 0xff0d3103, 0xfe113001,
+ 0xfe142e00, 0xfe182bff, 0xfe1b29fe, 0xfe1f25fe,
+ 0xfe2222fe, 0xfe251ffe, 0xfe291bfe, 0xff2b18fe,
+ 0x002e14fe, 0x013010ff, 0x03310dff, 0x05310a00,
+
+ 0xff0a2e09, 0xff0c2e07, 0xff0f2d05, 0xff122c03,
+ 0xfe152b02, 0xfe182901, 0xfe1b2700, 0xff1e24ff,
+ 0xff2121ff, 0xff241eff, 0x00261bff, 0x012818ff,
+ 0x022a15ff, 0x032c12ff, 0x052d0fff, 0x072d0c00,
+
+ 0xff0c2a0b, 0xff0e2a09, 0xff102a07, 0xff132905,
+ 0xff162803, 0xff182702, 0xff1b2501, 0xff1e2300,
+ 0x00202000, 0x01221d00, 0x01251bff, 0x032618ff,
+ 0x042815ff, 0x052913ff, 0x072a10ff, 0x092a0d00,
+
+ 0xff0d280c, 0xff0f280a, 0xff112808, 0xff142706,
+ 0xff162605, 0xff192503, 0x001b2302, 0x001d2201,
+ 0x011f1f01, 0x01221d00, 0x02231b00, 0x04241800,
+ 0x052616ff, 0x072713ff, 0x08271100, 0x0a280e00,
+
+ 0xff0e260d, 0xff10260b, 0xff122609, 0xff142508,
+ 0x00152506, 0x00182305, 0x001b2203, 0x011d2002,
+ 0x011f1f01, 0x02201d01, 0x03221b00, 0x04231801,
+ 0x06241600, 0x08251300, 0x09261100, 0x0b260f00,
+
+ 0xff0e250e, 0xff10250c, 0x0011250a, 0x00142408,
+ 0x00162307, 0x00182206, 0x011a2104, 0x011c2003,
+ 0x021e1e02, 0x03201c01, 0x04211a01, 0x05221801,
+ 0x07231600, 0x08241400, 0x0a241200, 0x0c241000,
+
+ 0x000e240e, 0x0010240c, 0x0013230a, 0x00142309,
+ 0x00162208, 0x01182106, 0x011a2005, 0x021b1f04,
+ 0x031d1d03, 0x041e1c02, 0x05201a01, 0x06211801,
+ 0x07221601, 0x09231400, 0x0a231300, 0x0c231100,
+
+ 0x000f220f, 0x0011220d, 0x0013220b, 0x0015210a,
+ 0x01162108, 0x01182007, 0x02191f06, 0x031a1e05,
+ 0x041c1c04, 0x051d1b03, 0x061f1902, 0x07201801,
+ 0x08211601, 0x0a211500, 0x0b221300, 0x0d221100,
+
+ 0x0010210f, 0x0011210e, 0x0013210c, 0x0114200b,
+ 0x01161f0a, 0x02171f08, 0x03181e07, 0x031a1d06,
+ 0x041c1c04, 0x051d1a04, 0x071d1903, 0x081e1802,
+ 0x091f1602, 0x0b1f1501, 0x0c211300, 0x0e201200,
+
+ 0x00102010, 0x0012200e, 0x0013200d, 0x01151f0b,
+ 0x01161f0a, 0x02171e09, 0x03191d07, 0x041a1c06,
+ 0x051b1b05, 0x061c1a04, 0x071d1903, 0x081e1703,
+ 0x0a1f1601, 0x0b1f1501, 0x0d201300, 0x0e201200,
+
+ 0x00102010, 0x00121f0f, 0x00141f0d, 0x01141f0c,
+ 0x02161e0a, 0x03171d09, 0x03181d08, 0x041a1c06,
+ 0x051b1b05, 0x061c1a04, 0x081c1903, 0x091d1703,
+ 0x0a1e1602, 0x0c1e1501, 0x0d1f1400, 0x0e1f1201,
+
+ 0x00111e11, 0x00131e0f, 0x01131e0e, 0x02151d0c,
+ 0x02161d0b, 0x03171c0a, 0x04181b09, 0x05191b07,
+ 0x061a1a06, 0x071b1905, 0x091b1804, 0x0a1c1703,
+ 0x0b1d1602, 0x0c1d1502, 0x0e1d1401, 0x0f1e1300,
+
+ 0x00111e11, 0x00131d10, 0x01141d0e, 0x02151c0d,
+ 0x03161c0b, 0x04171b0a, 0x05171b09, 0x06181a08,
+ 0x07191907, 0x081a1806, 0x091a1805, 0x0a1b1704,
+ 0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
+};
+
+static int sun8i_ui_scaler_coef_index(unsigned int step)
+{
+ unsigned int scale, int_part, float_part;
+
+ scale = step >> (SUN8I_UI_SCALER_SCALE_FRAC - 3);
+ int_part = scale >> 3;
+ float_part = scale & 0x7;
+
+ switch (int_part) {
+ case 0:
+ return 0;
+ case 1:
+ return float_part;
+ case 2:
+ return 8 + (float_part >> 1);
+ case 3:
+ return 12;
+ case 4:
+ return 13;
+ default:
+ return 14;
+ }
+}
+
+void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
+{
+ int vi_cnt = mixer->cfg->vi_num;
+ u32 val;
+
+ if (WARN_ON(layer < vi_cnt))
+ return;
+
+ if (enable)
+ val = SUN8I_SCALER_GSU_CTRL_EN |
+ SUN8I_SCALER_GSU_CTRL_COEFF_RDY;
+ else
+ val = 0;
+
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_CTRL(vi_cnt, layer - vi_cnt), val);
+}
+
+void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
+ u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
+ u32 hscale, u32 vscale, u32 hphase, u32 vphase)
+{
+ int vi_cnt = mixer->cfg->vi_num;
+ u32 insize, outsize;
+ int i, offset;
+
+ if (WARN_ON(layer < vi_cnt))
+ return;
+
+ hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
+ vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
+ hscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
+ vscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
+
+ insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
+ outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
+
+ layer -= vi_cnt;
+
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, layer), outsize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_INSIZE(vi_cnt, layer), insize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_HSTEP(vi_cnt, layer), hscale);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_VSTEP(vi_cnt, layer), vscale);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_HPHASE(vi_cnt, layer), hphase);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_VPHASE(vi_cnt, layer), vphase);
+ offset = sun8i_ui_scaler_coef_index(hscale) *
+ SUN8I_UI_SCALER_COEFF_COUNT;
+ for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_GSU_HCOEFF(vi_cnt, layer, i),
+ lan2coefftab16[offset + i]);
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
new file mode 100644
index 0000000..86295be
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _SUN8I_UI_SCALER_H_
+#define _SUN8I_UI_SCALER_H_
+
+#include "sun8i_mixer.h"
+
+/* this two macros assumes 16 fractional bits which is standard in DRM */
+#define SUN8I_UI_SCALER_SCALE_MIN 1
+#define SUN8I_UI_SCALER_SCALE_MAX ((1UL << 20) - 1)
+
+#define SUN8I_UI_SCALER_SCALE_FRAC 20
+#define SUN8I_UI_SCALER_PHASE_FRAC 20
+#define SUN8I_UI_SCALER_COEFF_COUNT 16
+#define SUN8I_UI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+#define SUN8I_SCALER_GSU_CTRL(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x0)
+#define SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x40)
+#define SUN8I_SCALER_GSU_INSIZE(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x80)
+#define SUN8I_SCALER_GSU_HSTEP(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x88)
+#define SUN8I_SCALER_GSU_VSTEP(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x8c)
+#define SUN8I_SCALER_GSU_HPHASE(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x90)
+#define SUN8I_SCALER_GSU_VPHASE(vi_cnt, ui_idx) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x98)
+#define SUN8I_SCALER_GSU_HCOEFF(vi_cnt, ui_idx, index) \
+ (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x200 + \
+ 0x4 * (index))
+
+#define SUN8I_SCALER_GSU_CTRL_EN BIT(0)
+#define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4)
+
+void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
+void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
+ u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
+ u32 hscale, u32 vscale, u32 hphase, u32 vphase);
+
+#endif
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
new file mode 100644
index 0000000..5877f8e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "sun8i_vi_layer.h"
+#include "sun8i_mixer.h"
+#include "sun8i_vi_scaler.h"
+
+static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
+ int overlay, bool enable)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("%sabling VI channel %d overlay %d\n",
+ enable ? "En" : "Dis", channel, overlay);
+
+ if (enable)
+ val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN;
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
+
+ if (enable)
+ val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+}
+
+static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ const struct drm_format_info *format = state->fb->format;
+ u32 src_w, src_h, dst_w, dst_h;
+ u32 outsize, insize;
+ u32 hphase, vphase;
+ bool subsampled;
+
+ DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
+ channel, overlay);
+
+ src_w = drm_rect_width(&state->src) >> 16;
+ src_h = drm_rect_height(&state->src) >> 16;
+ dst_w = drm_rect_width(&state->dst);
+ dst_h = drm_rect_height(&state->dst);
+
+ hphase = state->src.x1 & 0xffff;
+ vphase = state->src.y1 & 0xffff;
+
+ /* make coordinates dividable by subsampling factor */
+ if (format->hsub > 1) {
+ int mask, remainder;
+
+ mask = format->hsub - 1;
+ remainder = (state->src.x1 >> 16) & mask;
+ src_w = (src_w + remainder) & ~mask;
+ hphase += remainder << 16;
+ }
+
+ if (format->vsub > 1) {
+ int mask, remainder;
+
+ mask = format->vsub - 1;
+ remainder = (state->src.y1 >> 16) & mask;
+ src_h = (src_h + remainder) & ~mask;
+ vphase += remainder << 16;
+ }
+
+ insize = SUN8I_MIXER_SIZE(src_w, src_h);
+ outsize = SUN8I_MIXER_SIZE(dst_w, dst_h);
+
+ /* Set height and width */
+ DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n",
+ (state->src.x1 >> 16) & ~(format->hsub - 1),
+ (state->src.y1 >> 16) & ~(format->vsub - 1));
+ DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_SIZE(channel, overlay),
+ insize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_OVL_SIZE(channel),
+ insize);
+
+ /*
+ * Scaler must be enabled for subsampled formats, so it scales
+ * chroma to same size as luma.
+ */
+ subsampled = format->hsub > 1 || format->vsub > 1;
+
+ if (insize != outsize || subsampled || hphase || vphase) {
+ u32 hscale, vscale;
+
+ DRM_DEBUG_DRIVER("HW scaling is enabled\n");
+
+ hscale = state->src_w / state->crtc_w;
+ vscale = state->src_h / state->crtc_h;
+
+ sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
+ dst_h, hscale, vscale, hphase, vphase,
+ format);
+ sun8i_vi_scaler_enable(mixer, channel, true);
+ } else {
+ DRM_DEBUG_DRIVER("HW scaling is not needed\n");
+ sun8i_vi_scaler_enable(mixer, channel, false);
+ }
+
+ /* Set base coordinates */
+ DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
+ state->dst.x1, state->dst.y1);
+ DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+ SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+ outsize);
+
+ return 0;
+}
+
+static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ const struct de2_fmt_info *fmt_info;
+ u32 val;
+
+ fmt_info = sun8i_mixer_format_info(state->fb->format->format);
+ if (!fmt_info) {
+ DRM_DEBUG_DRIVER("Invalid format\n");
+ return -EINVAL;
+ }
+
+ val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
+
+ if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc);
+ sun8i_csc_enable_ccsc(mixer, channel, true);
+ } else {
+ sun8i_csc_enable_ccsc(mixer, channel, false);
+ }
+
+ if (fmt_info->rgb)
+ val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
+ else
+ val = 0;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
+
+ return 0;
+}
+
+static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ struct drm_gem_cma_object *gem;
+ u32 dx, dy, src_x, src_y;
+ dma_addr_t paddr;
+ int i;
+
+ /* Adjust x and y to be dividable by subsampling factor */
+ src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
+ src_y = (state->src.y1 >> 16) & ~(format->vsub - 1);
+
+ for (i = 0; i < format->num_planes; i++) {
+ /* Get the physical address of the buffer in memory */
+ gem = drm_fb_cma_get_gem_obj(fb, i);
+
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+
+ /* Compute the start of the displayed memory */
+ paddr = gem->paddr + fb->offsets[i];
+
+ dx = src_x;
+ dy = src_y;
+
+ if (i > 0) {
+ dx /= format->hsub;
+ dy /= format->vsub;
+ }
+
+ /* Fixup framebuffer address for src coordinates */
+ paddr += dx * format->cpp[i];
+ paddr += dy * fb->pitches[i];
+
+ /* Set the line width */
+ DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
+ i + 1, fb->pitches[i]);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_PITCH(channel,
+ overlay, i),
+ fb->pitches[i]);
+
+ DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
+ i + 1, &paddr);
+
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(channel,
+ overlay, i),
+ lower_32_bits(paddr));
+ }
+
+ return 0;
+}
+
+static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
+
+ if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
+ min_scale = SUN8I_VI_SCALER_SCALE_MIN;
+ max_scale = SUN8I_VI_SCALER_SCALE_MAX;
+ }
+
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+}
+
+static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ struct sun8i_mixer *mixer = layer->mixer;
+
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false);
+}
+
+static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+ struct sun8i_mixer *mixer = layer->mixer;
+
+ if (!plane->state->visible) {
+ sun8i_vi_layer_enable(mixer, layer->channel,
+ layer->overlay, false);
+ return;
+ }
+
+ sun8i_vi_layer_update_coord(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_vi_layer_update_formats(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_vi_layer_update_buffer(mixer, layer->channel,
+ layer->overlay, plane);
+ sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true);
+}
+
+static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+ .atomic_check = sun8i_vi_layer_atomic_check,
+ .atomic_disable = sun8i_vi_layer_atomic_disable,
+ .atomic_update = sun8i_vi_layer_atomic_update,
+};
+
+static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .destroy = drm_plane_cleanup,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .update_plane = drm_atomic_helper_update_plane,
+};
+
+/*
+ * While all RGB formats are supported, VI planes don't support
+ * alpha blending, so there is no point having formats with alpha
+ * channel if their opaque analog exist.
+ */
+static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YVU444,
+};
+
+struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
+ struct sun8i_mixer *mixer,
+ int index)
+{
+ struct sun8i_vi_layer *layer;
+ int ret;
+
+ layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ /* possible crtcs are set later */
+ ret = drm_universal_plane_init(drm, &layer->plane, 0,
+ &sun8i_vi_layer_funcs,
+ sun8i_vi_layer_formats,
+ ARRAY_SIZE(sun8i_vi_layer_formats),
+ NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialize layer\n");
+ return ERR_PTR(ret);
+ }
+
+ /* fixed zpos for now */
+ ret = drm_plane_create_zpos_immutable_property(&layer->plane, index);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add zpos property\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs);
+ layer->mixer = mixer;
+ layer->channel = index;
+ layer->overlay = 0;
+
+ return layer;
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
new file mode 100644
index 0000000..6996627
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN8I_VI_LAYER_H_
+#define _SUN8I_VI_LAYER_H_
+
+#include <drm/drm_plane.h>
+
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(ch, layer) \
+ (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch, layer, plane) \
+ (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0xc + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch, layer, plane) \
+ (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x18 + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0xe8)
+
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
+/* RGB mode should be set for RGB formats and cleared for YCbCr */
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE BIT(15)
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET 8
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
+
+struct sun8i_mixer;
+
+struct sun8i_vi_layer {
+ struct drm_plane plane;
+ struct sun8i_mixer *mixer;
+ int channel;
+ int overlay;
+};
+
+static inline struct sun8i_vi_layer *
+plane_to_sun8i_vi_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct sun8i_vi_layer, plane);
+}
+
+struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
+ struct sun8i_mixer *mixer,
+ int index);
+#endif /* _SUN8I_VI_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
new file mode 100644
index 0000000..d3f1acb
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2017 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Coefficients are taken from BSP driver, which is:
+ * Copyright (C) 2014-2015 Allwinner
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "sun8i_vi_scaler.h"
+
+static const u32 lan3coefftab32_left[480] = {
+ 0x40000000, 0x40fe0000, 0x3ffd0100, 0x3efc0100,
+ 0x3efb0100, 0x3dfa0200, 0x3cf90200, 0x3bf80200,
+ 0x39f70200, 0x37f70200, 0x35f70200, 0x33f70200,
+ 0x31f70200, 0x2ef70200, 0x2cf70200, 0x2af70200,
+ 0x27f70200, 0x24f80100, 0x22f80100, 0x1ef90100,
+ 0x1cf90100, 0x19fa0100, 0x17fa0100, 0x14fb0100,
+ 0x11fc0000, 0x0ffc0000, 0x0cfd0000, 0x0afd0000,
+ 0x08fe0000, 0x05ff0000, 0x03ff0000, 0x02000000,
+
+ 0x3806fc02, 0x3805fc02, 0x3803fd01, 0x3801fe01,
+ 0x3700fe01, 0x35ffff01, 0x35fdff01, 0x34fc0001,
+ 0x34fb0000, 0x33fa0000, 0x31fa0100, 0x2ff90100,
+ 0x2df80200, 0x2bf80200, 0x2af70200, 0x28f70200,
+ 0x27f70200, 0x24f70300, 0x22f70300, 0x1ff70300,
+ 0x1ef70300, 0x1cf70300, 0x1af70300, 0x18f70300,
+ 0x16f80300, 0x13f80300, 0x11f90300, 0x0ef90300,
+ 0x0efa0200, 0x0cfa0200, 0x0afb0200, 0x08fb0200,
+
+ 0x320bfa02, 0x3309fa02, 0x3208fb02, 0x3206fb02,
+ 0x3205fb02, 0x3104fc02, 0x3102fc01, 0x3001fd01,
+ 0x3000fd01, 0x2ffffd01, 0x2efefe01, 0x2dfdfe01,
+ 0x2bfcff01, 0x29fcff01, 0x28fbff01, 0x27fa0001,
+ 0x26fa0000, 0x24f90000, 0x22f90100, 0x20f90100,
+ 0x1ff80100, 0x1ef80100, 0x1cf80100, 0x1af80200,
+ 0x18f80200, 0x17f80200, 0x15f80200, 0x12f80200,
+ 0x11f90200, 0x0ff90200, 0x0df90200, 0x0cfa0200,
+
+ 0x2e0efa01, 0x2f0dfa01, 0x2f0bfa01, 0x2e0afa01,
+ 0x2e09fa01, 0x2e07fb01, 0x2d06fb01, 0x2d05fb01,
+ 0x2c04fb01, 0x2b03fc01, 0x2a02fc01, 0x2a01fc01,
+ 0x2800fd01, 0x28fffd01, 0x26fefd01, 0x25fefe01,
+ 0x24fdfe01, 0x23fcfe01, 0x21fcff01, 0x20fbff01,
+ 0x1efbff01, 0x1efbff00, 0x1cfa0000, 0x1bfa0000,
+ 0x19fa0000, 0x18fa0000, 0x17f90000, 0x15f90100,
+ 0x14f90100, 0x12f90100, 0x11f90100, 0x0ff90100,
+
+ 0x2b10fa00, 0x2b0ffa00, 0x2b0efa00, 0x2b0cfa00,
+ 0x2b0bfa00, 0x2a0afb01, 0x2a09fb01, 0x2908fb01,
+ 0x2807fb01, 0x2806fb01, 0x2805fb01, 0x2604fc01,
+ 0x2503fc01, 0x2502fc01, 0x2401fc01, 0x2301fc01,
+ 0x2100fd01, 0x21fffd01, 0x21fffd01, 0x20fefd01,
+ 0x1dfefe01, 0x1cfdfe01, 0x1cfdfe00, 0x1bfcfe00,
+ 0x19fcff00, 0x19fbff00, 0x17fbff00, 0x16fbff00,
+ 0x15fbff00, 0x14fb0000, 0x13fa0000, 0x11fa0000,
+
+ 0x2811fcff, 0x2810fcff, 0x280ffbff, 0x280efbff,
+ 0x270dfb00, 0x270cfb00, 0x270bfb00, 0x260afb00,
+ 0x2609fb00, 0x2508fb00, 0x2507fb00, 0x2407fb00,
+ 0x2406fc00, 0x2305fc00, 0x2204fc00, 0x2203fc00,
+ 0x2103fc00, 0x2002fc00, 0x1f01fd00, 0x1e01fd00,
+ 0x1d00fd00, 0x1dfffd00, 0x1cfffd00, 0x1bfefd00,
+ 0x1afefe00, 0x19fefe00, 0x18fdfe00, 0x17fdfe00,
+ 0x16fdfe00, 0x15fcff00, 0x13fcff00, 0x12fcff00,
+
+ 0x2512fdfe, 0x2511fdff, 0x2410fdff, 0x240ffdff,
+ 0x240efcff, 0x240dfcff, 0x240dfcff, 0x240cfcff,
+ 0x230bfcff, 0x230afc00, 0x2209fc00, 0x2108fc00,
+ 0x2108fc00, 0x2007fc00, 0x2006fc00, 0x2005fc00,
+ 0x1f05fc00, 0x1e04fc00, 0x1e03fc00, 0x1c03fd00,
+ 0x1c02fd00, 0x1b02fd00, 0x1b01fd00, 0x1a00fd00,
+ 0x1900fd00, 0x1800fd00, 0x17fffe00, 0x16fffe00,
+ 0x16fefe00, 0x14fefe00, 0x13fefe00, 0x13fdfe00,
+
+ 0x2212fffe, 0x2211fefe, 0x2211fefe, 0x2110fefe,
+ 0x210ffeff, 0x220efdff, 0x210dfdff, 0x210dfdff,
+ 0x210cfdff, 0x210bfdff, 0x200afdff, 0x200afdff,
+ 0x1f09fdff, 0x1f08fdff, 0x1d08fd00, 0x1c07fd00,
+ 0x1d06fd00, 0x1b06fd00, 0x1b05fd00, 0x1c04fd00,
+ 0x1b04fd00, 0x1a03fd00, 0x1a03fd00, 0x1902fd00,
+ 0x1802fd00, 0x1801fd00, 0x1701fd00, 0x1600fd00,
+ 0x1400fe00, 0x1400fe00, 0x14fffe00, 0x13fffe00,
+
+ 0x201200fe, 0x201100fe, 0x1f11fffe, 0x2010fffe,
+ 0x1f0ffffe, 0x1e0ffffe, 0x1f0efeff, 0x1f0dfeff,
+ 0x1f0dfeff, 0x1e0cfeff, 0x1e0bfeff, 0x1d0bfeff,
+ 0x1d0afeff, 0x1d09fdff, 0x1d09fdff, 0x1c08fdff,
+ 0x1c07fdff, 0x1b07fd00, 0x1b06fd00, 0x1a06fd00,
+ 0x1a05fd00, 0x1805fd00, 0x1904fd00, 0x1804fd00,
+ 0x1703fd00, 0x1703fd00, 0x1602fe00, 0x1502fe00,
+ 0x1501fe00, 0x1401fe00, 0x1301fe00, 0x1300fe00,
+
+ 0x1c1202fe, 0x1c1102fe, 0x1b1102fe, 0x1c1001fe,
+ 0x1b1001fe, 0x1b0f01ff, 0x1b0e00ff, 0x1b0e00ff,
+ 0x1b0d00ff, 0x1a0d00ff, 0x1a0c00ff, 0x1a0cffff,
+ 0x1a0bffff, 0x1a0bffff, 0x1a0affff, 0x180affff,
+ 0x1909ffff, 0x1809ffff, 0x1808ffff, 0x1808feff,
+ 0x1807feff, 0x1707fe00, 0x1606fe00, 0x1506fe00,
+ 0x1605fe00, 0x1505fe00, 0x1504fe00, 0x1304fe00,
+ 0x1304fe00, 0x1303fe00, 0x1203fe00, 0x1203fe00,
+
+ 0x181104ff, 0x191103ff, 0x191003ff, 0x181003ff,
+ 0x180f03ff, 0x190f02ff, 0x190e02ff, 0x180e02ff,
+ 0x180d02ff, 0x180d01ff, 0x180d01ff, 0x180c01ff,
+ 0x180c01ff, 0x180b00ff, 0x170b00ff, 0x170a00ff,
+ 0x170a00ff, 0x170900ff, 0x160900ff, 0x160900ff,
+ 0x1608ffff, 0x1508ffff, 0x1507ff00, 0x1507ff00,
+ 0x1407ff00, 0x1306ff00, 0x1306ff00, 0x1305ff00,
+ 0x1205ff00, 0x1105ff00, 0x1204ff00, 0x1104ff00,
+
+ 0x171005ff, 0x171005ff, 0x171004ff, 0x170f04ff,
+ 0x160f04ff, 0x170f03ff, 0x170e03ff, 0x160e03ff,
+ 0x160d03ff, 0x160d02ff, 0x160d02ff, 0x160c02ff,
+ 0x160c02ff, 0x160c02ff, 0x160b01ff, 0x150b01ff,
+ 0x150a01ff, 0x150a01ff, 0x150a01ff, 0x140901ff,
+ 0x14090000, 0x14090000, 0x14080000, 0x13080000,
+ 0x13070000, 0x12070000, 0x12070000, 0x12060000,
+ 0x11060000, 0x11060000, 0x11050000, 0x1105ff00,
+
+ 0x14100600, 0x15100500, 0x150f0500, 0x150f0500,
+ 0x140f0500, 0x150e0400, 0x140e0400, 0x130e0400,
+ 0x140d0400, 0x150d0300, 0x130d0300, 0x140c0300,
+ 0x140c0300, 0x140c0200, 0x140b0200, 0x130b0200,
+ 0x120b0200, 0x130a0200, 0x130a0200, 0x130a0100,
+ 0x13090100, 0x12090100, 0x11090100, 0x12080100,
+ 0x11080100, 0x10080100, 0x11070100, 0x11070000,
+ 0x10070000, 0x11060000, 0x10060000, 0x10060000,
+
+ 0x140f0600, 0x140f0600, 0x130f0600, 0x140f0500,
+ 0x140e0500, 0x130e0500, 0x130e0500, 0x140d0400,
+ 0x140d0400, 0x130d0400, 0x120d0400, 0x130c0400,
+ 0x130c0300, 0x130c0300, 0x130b0300, 0x130b0300,
+ 0x110b0300, 0x130a0200, 0x120a0200, 0x120a0200,
+ 0x120a0200, 0x12090200, 0x10090200, 0x11090100,
+ 0x11080100, 0x11080100, 0x10080100, 0x10080100,
+ 0x10070100, 0x10070100, 0x0f070100, 0x10060100,
+
+ 0x120f0701, 0x130f0601, 0x130e0601, 0x130e0601,
+ 0x120e0601, 0x130e0501, 0x130e0500, 0x130d0500,
+ 0x120d0500, 0x120d0500, 0x130c0400, 0x130c0400,
+ 0x120c0400, 0x110c0400, 0x120b0400, 0x120b0300,
+ 0x120b0300, 0x120b0300, 0x120a0300, 0x110a0300,
+ 0x110a0200, 0x11090200, 0x11090200, 0x10090200,
+ 0x10090200, 0x10080200, 0x10080200, 0x10080100,
+ 0x0f080100, 0x10070100, 0x0f070100, 0x0f070100
+};
+
+static const u32 lan3coefftab32_right[480] = {
+ 0x00000000, 0x00000002, 0x0000ff04, 0x0000ff06,
+ 0x0000fe08, 0x0000fd0a, 0x0000fd0c, 0x0000fc0f,
+ 0x0000fc12, 0x0001fb14, 0x0001fa17, 0x0001fa19,
+ 0x0001f91c, 0x0001f91f, 0x0001f822, 0x0001f824,
+ 0x0002f727, 0x0002f72a, 0x0002f72c, 0x0002f72f,
+ 0x0002f731, 0x0002f733, 0x0002f735, 0x0002f737,
+ 0x0002f73a, 0x0002f83b, 0x0002f93c, 0x0002fa3d,
+ 0x0001fb3e, 0x0001fc3f, 0x0001fd40, 0x0000fe40,
+
+ 0x0002fc06, 0x0002fb08, 0x0002fb0a, 0x0002fa0c,
+ 0x0002fa0e, 0x0003f910, 0x0003f912, 0x0003f814,
+ 0x0003f816, 0x0003f719, 0x0003f71a, 0x0003f71d,
+ 0x0003f71f, 0x0003f721, 0x0003f723, 0x0003f725,
+ 0x0002f727, 0x0002f729, 0x0002f72b, 0x0002f82d,
+ 0x0002f82e, 0x0001f930, 0x0001fa31, 0x0000fa34,
+ 0x0000fb34, 0x0100fc35, 0x01fffd36, 0x01ffff37,
+ 0x01fe0037, 0x01fe0138, 0x01fd0338, 0x02fc0538,
+
+ 0x0002fa0b, 0x0002fa0c, 0x0002f90e, 0x0002f910,
+ 0x0002f911, 0x0002f813, 0x0002f816, 0x0002f817,
+ 0x0002f818, 0x0002f81a, 0x0001f81c, 0x0001f81e,
+ 0x0001f820, 0x0001f921, 0x0001f923, 0x0000f925,
+ 0x0000fa26, 0x0100fa28, 0x01fffb29, 0x01fffc2a,
+ 0x01fffc2c, 0x01fefd2d, 0x01fefe2e, 0x01fdff2f,
+ 0x01fd0030, 0x01fd0130, 0x01fc0232, 0x02fc0432,
+ 0x02fb0532, 0x02fb0633, 0x02fb0833, 0x02fa0933,
+
+ 0x0001fa0e, 0x0001f90f, 0x0001f911, 0x0001f913,
+ 0x0001f914, 0x0001f915, 0x0000f918, 0x0000fa18,
+ 0x0000fa1a, 0x0000fa1b, 0x0000fa1d, 0x00fffb1e,
+ 0x01fffb1f, 0x01fffb20, 0x01fffc22, 0x01fefc23,
+ 0x01fefd24, 0x01fefe25, 0x01fdfe27, 0x01fdff28,
+ 0x01fd0029, 0x01fc012a, 0x01fc022b, 0x01fc032b,
+ 0x01fb042d, 0x01fb052d, 0x01fb062e, 0x01fb072e,
+ 0x01fa092e, 0x01fa0a2f, 0x01fa0b2f, 0x01fa0d2f,
+
+ 0x0000fa11, 0x0000fa12, 0x0000fa13, 0x0000fb14,
+ 0x00fffb16, 0x00fffb16, 0x00fffb17, 0x00fffb19,
+ 0x00fffc1a, 0x00fefc1c, 0x00fefd1c, 0x01fefd1d,
+ 0x01fefe1e, 0x01fdfe20, 0x01fdff21, 0x01fdff22,
+ 0x01fd0023, 0x01fc0124, 0x01fc0124, 0x01fc0225,
+ 0x01fc0326, 0x01fc0427, 0x01fb0528, 0x01fb0629,
+ 0x01fb0729, 0x01fb0829, 0x01fb092a, 0x01fb0a2a,
+ 0x00fa0b2c, 0x00fa0c2b, 0x00fa0e2b, 0x00fa0f2c,
+
+ 0x00fffc11, 0x00fffc12, 0x00fffc14, 0x00fffc15,
+ 0x00fefd16, 0x00fefd17, 0x00fefd18, 0x00fefe19,
+ 0x00fefe1a, 0x00fdfe1d, 0x00fdff1d, 0x00fdff1e,
+ 0x00fd001d, 0x00fd011e, 0x00fd0120, 0x00fc0221,
+ 0x00fc0321, 0x00fc0323, 0x00fc0423, 0x00fc0523,
+ 0x00fc0624, 0x00fb0725, 0x00fb0726, 0x00fb0827,
+ 0x00fb0926, 0x00fb0a26, 0x00fb0b27, 0x00fb0c27,
+ 0x00fb0d27, 0xfffb0e28, 0xfffb0f29, 0xfffc1028,
+
+ 0x00fefd13, 0x00fefd13, 0x00fefe14, 0x00fefe15,
+ 0x00fefe17, 0x00feff17, 0x00feff17, 0x00fd0018,
+ 0x00fd001a, 0x00fd001a, 0x00fd011b, 0x00fd021c,
+ 0x00fd021c, 0x00fd031d, 0x00fc031f, 0x00fc041f,
+ 0x00fc051f, 0x00fc0521, 0x00fc0621, 0x00fc0721,
+ 0x00fc0821, 0x00fc0822, 0x00fc0922, 0x00fc0a23,
+ 0xfffc0b24, 0xfffc0c24, 0xfffc0d24, 0xfffc0d25,
+ 0xfffc0e25, 0xfffd0f25, 0xfffd1025, 0xfffd1125,
+
+ 0x00feff12, 0x00feff14, 0x00feff14, 0x00fe0015,
+ 0x00fe0015, 0x00fd0017, 0x00fd0118, 0x00fd0118,
+ 0x00fd0218, 0x00fd0219, 0x00fd031a, 0x00fd031a,
+ 0x00fd041b, 0x00fd041c, 0x00fd051c, 0x00fd061d,
+ 0x00fd061d, 0x00fd071e, 0x00fd081e, 0xfffd081f,
+ 0xfffd091f, 0xfffd0a20, 0xfffd0a20, 0xfffd0b21,
+ 0xfffd0c21, 0xfffd0d21, 0xfffd0d22, 0xfffd0e23,
+ 0xfffe0f22, 0xfefe1022, 0xfefe1122, 0xfefe1123,
+
+ 0x00fe0012, 0x00fe0013, 0x00fe0114, 0x00fe0114,
+ 0x00fe0116, 0x00fe0216, 0x00fe0216, 0x00fd0317,
+ 0x00fd0317, 0x00fd0418, 0x00fd0419, 0x00fd0519,
+ 0x00fd051a, 0x00fd061b, 0x00fd061b, 0x00fd071c,
+ 0xfffd071e, 0xfffd081d, 0xfffd091d, 0xfffd091e,
+ 0xfffe0a1d, 0xfffe0b1e, 0xfffe0b1e, 0xfffe0c1e,
+ 0xfffe0d1f, 0xfffe0d1f, 0xfffe0e1f, 0xfeff0f1f,
+ 0xfeff0f20, 0xfeff1020, 0xfeff1120, 0xfe001120,
+
+ 0x00fe0212, 0x00fe0312, 0x00fe0313, 0x00fe0314,
+ 0x00fe0414, 0x00fe0414, 0x00fe0416, 0x00fe0515,
+ 0x00fe0516, 0x00fe0616, 0x00fe0617, 0x00fe0717,
+ 0xfffe0719, 0xfffe0818, 0xffff0818, 0xffff0919,
+ 0xffff0919, 0xffff0a19, 0xffff0a1a, 0xffff0b1a,
+ 0xffff0b1b, 0xffff0c1a, 0xff000c1b, 0xff000d1b,
+ 0xff000d1b, 0xff000e1b, 0xff000e1c, 0xff010f1c,
+ 0xfe01101c, 0xfe01101d, 0xfe02111c, 0xfe02111c,
+
+ 0x00ff0411, 0x00ff0411, 0x00ff0412, 0x00ff0512,
+ 0x00ff0513, 0x00ff0513, 0x00ff0613, 0x00ff0614,
+ 0x00ff0714, 0x00ff0715, 0x00ff0715, 0xffff0816,
+ 0xffff0816, 0xff000916, 0xff000917, 0xff000918,
+ 0xff000a17, 0xff000a18, 0xff000b18, 0xff000b18,
+ 0xff010c18, 0xff010c19, 0xff010d18, 0xff010d18,
+ 0xff020d18, 0xff020e19, 0xff020e19, 0xff020f19,
+ 0xff030f19, 0xff031019, 0xff031019, 0xff031119,
+
+ 0x00ff0511, 0x00ff0511, 0x00000511, 0x00000611,
+ 0x00000612, 0x00000612, 0x00000712, 0x00000713,
+ 0x00000714, 0x00000814, 0x00000814, 0x00000914,
+ 0x00000914, 0xff010914, 0xff010a15, 0xff010a16,
+ 0xff010a17, 0xff010b16, 0xff010b16, 0xff020c16,
+ 0xff020c16, 0xff020c16, 0xff020d16, 0xff020d17,
+ 0xff030d17, 0xff030e17, 0xff030e17, 0xff030f17,
+ 0xff040f17, 0xff040f17, 0xff041017, 0xff051017,
+
+ 0x00000610, 0x00000610, 0x00000611, 0x00000611,
+ 0x00000711, 0x00000712, 0x00010712, 0x00010812,
+ 0x00010812, 0x00010812, 0x00010913, 0x00010913,
+ 0x00010913, 0x00010a13, 0x00020a13, 0x00020a14,
+ 0x00020b14, 0x00020b14, 0x00020b14, 0x00020c14,
+ 0x00030c14, 0x00030c15, 0x00030d15, 0x00030d15,
+ 0x00040d15, 0x00040e15, 0x00040e15, 0x00040e16,
+ 0x00050f15, 0x00050f15, 0x00050f16, 0x00051015,
+
+ 0x00000611, 0x00010610, 0x00010710, 0x00010710,
+ 0x00010711, 0x00010811, 0x00010811, 0x00010812,
+ 0x00010812, 0x00010912, 0x00020912, 0x00020912,
+ 0x00020a12, 0x00020a12, 0x00020a13, 0x00020a13,
+ 0x00030b13, 0x00030b13, 0x00030b14, 0x00030c13,
+ 0x00030c13, 0x00040c13, 0x00040d14, 0x00040d14,
+ 0x00040d15, 0x00040d15, 0x00050e14, 0x00050e14,
+ 0x00050e15, 0x00050f14, 0x00060f14, 0x00060f14,
+
+ 0x0001070f, 0x0001070f, 0x00010710, 0x00010710,
+ 0x00010810, 0x00010810, 0x00020810, 0x00020811,
+ 0x00020911, 0x00020911, 0x00020912, 0x00020912,
+ 0x00020a12, 0x00030a12, 0x00030a12, 0x00030b12,
+ 0x00030b12, 0x00030b12, 0x00040b12, 0x00040c12,
+ 0x00040c13, 0x00040c14, 0x00040c14, 0x00050d13,
+ 0x00050d13, 0x00050d14, 0x00050e13, 0x01050e13,
+ 0x01060e13, 0x01060e13, 0x01060e14, 0x01060f13
+};
+
+static const u32 lan2coefftab32[480] = {
+ 0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
+ 0x00063efc, 0xff083dfc, 0x000a3bfb, 0xff0d39fb,
+ 0xff0f37fb, 0xff1136fa, 0xfe1433fb, 0xfe1631fb,
+ 0xfd192ffb, 0xfd1c2cfb, 0xfd1f29fb, 0xfc2127fc,
+ 0xfc2424fc, 0xfc2721fc, 0xfb291ffd, 0xfb2c1cfd,
+ 0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfa3611ff,
+ 0xfb370fff, 0xfb390dff, 0xfb3b0a00, 0xfc3d08ff,
+ 0xfc3e0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
+
+ 0xff053804, 0xff063803, 0xff083801, 0xff093701,
+ 0xff0a3700, 0xff0c3500, 0xff0e34ff, 0xff1033fe,
+ 0xff1232fd, 0xfe1431fd, 0xfe162ffd, 0xfe182dfd,
+ 0xfd1b2cfc, 0xfd1d2afc, 0xfd1f28fc, 0xfd2126fc,
+ 0xfd2323fd, 0xfc2621fd, 0xfc281ffd, 0xfc2a1dfd,
+ 0xfc2c1bfd, 0xfd2d18fe, 0xfd2f16fe, 0xfd3114fe,
+ 0xfd3212ff, 0xfe3310ff, 0xff340eff, 0x00350cff,
+ 0x00360a00, 0x01360900, 0x02370700, 0x03370600,
+
+ 0xff083207, 0xff093206, 0xff0a3205, 0xff0c3203,
+ 0xff0d3103, 0xff0e3102, 0xfe113001, 0xfe132f00,
+ 0xfe142e00, 0xfe162dff, 0xfe182bff, 0xfe192aff,
+ 0xfe1b29fe, 0xfe1d27fe, 0xfe1f25fe, 0xfd2124fe,
+ 0xfe2222fe, 0xfe2421fd, 0xfe251ffe, 0xfe271dfe,
+ 0xfe291bfe, 0xff2a19fe, 0xff2b18fe, 0xff2d16fe,
+ 0x002e14fe, 0x002f12ff, 0x013010ff, 0x02300fff,
+ 0x03310dff, 0x04310cff, 0x05310a00, 0x06310900,
+
+ 0xff0a2e09, 0xff0b2e08, 0xff0c2e07, 0xff0e2d06,
+ 0xff0f2d05, 0xff102d04, 0xff122c03, 0xfe142c02,
+ 0xfe152b02, 0xfe172a01, 0xfe182901, 0xfe1a2800,
+ 0xfe1b2700, 0xfe1d2500, 0xff1e24ff, 0xfe2023ff,
+ 0xff2121ff, 0xff2320fe, 0xff241eff, 0x00251dfe,
+ 0x00261bff, 0x00281afe, 0x012818ff, 0x012a16ff,
+ 0x022a15ff, 0x032b13ff, 0x032c12ff, 0x052c10ff,
+ 0x052d0fff, 0x062d0d00, 0x072d0c00, 0x082d0b00,
+
+ 0xff0c2a0b, 0xff0d2a0a, 0xff0e2a09, 0xff0f2a08,
+ 0xff102a07, 0xff112a06, 0xff132905, 0xff142904,
+ 0xff162803, 0xff172703, 0xff182702, 0xff1a2601,
+ 0xff1b2501, 0xff1c2401, 0xff1e2300, 0xff1f2200,
+ 0x00202000, 0x00211f00, 0x01221d00, 0x01231c00,
+ 0x01251bff, 0x02251aff, 0x032618ff, 0x032717ff,
+ 0x042815ff, 0x052814ff, 0x052913ff, 0x06291100,
+ 0x072a10ff, 0x082a0e00, 0x092a0d00, 0x0a2a0c00,
+
+ 0xff0d280c, 0xff0e280b, 0xff0f280a, 0xff102809,
+ 0xff112808, 0xff122708, 0xff142706, 0xff152705,
+ 0xff162605, 0xff172604, 0xff192503, 0xff1a2403,
+ 0x001b2302, 0x001c2202, 0x001d2201, 0x001e2101,
+ 0x011f1f01, 0x01211e00, 0x01221d00, 0x02221c00,
+ 0x02231b00, 0x03241900, 0x04241800, 0x04251700,
+ 0x052616ff, 0x06261400, 0x072713ff, 0x08271100,
+ 0x08271100, 0x09271000, 0x0a280e00, 0x0b280d00,
+
+ 0xff0e260d, 0xff0f260c, 0xff10260b, 0xff11260a,
+ 0xff122609, 0xff132608, 0xff142508, 0xff152507,
+ 0x00152506, 0x00172405, 0x00182305, 0x00192304,
+ 0x001b2203, 0x001c2103, 0x011d2002, 0x011d2002,
+ 0x011f1f01, 0x021f1e01, 0x02201d01, 0x03211c00,
+ 0x03221b00, 0x04221a00, 0x04231801, 0x05241700,
+ 0x06241600, 0x07241500, 0x08251300, 0x09251200,
+ 0x09261100, 0x0a261000, 0x0b260f00, 0x0c260e00,
+
+ 0xff0e250e, 0xff0f250d, 0xff10250c, 0xff11250b,
+ 0x0011250a, 0x00132409, 0x00142408, 0x00152407,
+ 0x00162307, 0x00172306, 0x00182206, 0x00192205,
+ 0x011a2104, 0x011b2004, 0x011c2003, 0x021c1f03,
+ 0x021e1e02, 0x031e1d02, 0x03201c01, 0x04201b01,
+ 0x04211a01, 0x05221900, 0x05221801, 0x06231700,
+ 0x07231600, 0x07241500, 0x08241400, 0x09241300,
+ 0x0a241200, 0x0b241100, 0x0c241000, 0x0d240f00,
+
+ 0x000e240e, 0x000f240d, 0x0010240c, 0x0011240b,
+ 0x0013230a, 0x0013230a, 0x00142309, 0x00152308,
+ 0x00162208, 0x00172207, 0x01182106, 0x01192105,
+ 0x011a2005, 0x021b1f04, 0x021b1f04, 0x021d1e03,
+ 0x031d1d03, 0x031e1d02, 0x041e1c02, 0x041f1b02,
+ 0x05201a01, 0x05211901, 0x06211801, 0x07221700,
+ 0x07221601, 0x08231500, 0x09231400, 0x0a231300,
+ 0x0a231300, 0x0b231200, 0x0c231100, 0x0d231000,
+
+ 0x000f220f, 0x0010220e, 0x0011220d, 0x0012220c,
+ 0x0013220b, 0x0013220b, 0x0015210a, 0x0015210a,
+ 0x01162108, 0x01172008, 0x01182007, 0x02191f06,
+ 0x02191f06, 0x021a1e06, 0x031a1e05, 0x031c1d04,
+ 0x041c1c04, 0x041d1c03, 0x051d1b03, 0x051e1a03,
+ 0x061f1902, 0x061f1902, 0x07201801, 0x08201701,
+ 0x08211601, 0x09211501, 0x0a211500, 0x0b211400,
+ 0x0b221300, 0x0c221200, 0x0d221100, 0x0e221000,
+
+ 0x0010210f, 0x0011210e, 0x0011210e, 0x0012210d,
+ 0x0013210c, 0x0014200c, 0x0114200b, 0x0115200a,
+ 0x01161f0a, 0x01171f09, 0x02171f08, 0x02181e08,
+ 0x03181e07, 0x031a1d06, 0x031a1d06, 0x041b1c05,
+ 0x041c1c04, 0x051c1b04, 0x051d1a04, 0x061d1a03,
+ 0x071d1903, 0x071e1803, 0x081e1802, 0x081f1702,
+ 0x091f1602, 0x0a201501, 0x0b1f1501, 0x0b201401,
+ 0x0c211300, 0x0d211200, 0x0e201200, 0x0e211100,
+
+ 0x00102010, 0x0011200f, 0x0012200e, 0x0013200d,
+ 0x0013200d, 0x01141f0c, 0x01151f0b, 0x01151f0b,
+ 0x01161f0a, 0x02171e09, 0x02171e09, 0x03181d08,
+ 0x03191d07, 0x03191d07, 0x041a1c06, 0x041b1c05,
+ 0x051b1b05, 0x051c1b04, 0x061c1a04, 0x071d1903,
+ 0x071d1903, 0x081d1803, 0x081e1703, 0x091e1702,
+ 0x0a1f1601, 0x0a1f1502, 0x0b1f1501, 0x0c1f1401,
+ 0x0d201300, 0x0d201300, 0x0e201200, 0x0f201100,
+
+ 0x00102010, 0x0011200f, 0x00121f0f, 0x00131f0e,
+ 0x00141f0d, 0x01141f0c, 0x01141f0c, 0x01151e0c,
+ 0x02161e0a, 0x02171e09, 0x03171d09, 0x03181d08,
+ 0x03181d08, 0x04191c07, 0x041a1c06, 0x051a1b06,
+ 0x051b1b05, 0x061b1a05, 0x061c1a04, 0x071c1904,
+ 0x081c1903, 0x081d1803, 0x091d1703, 0x091e1702,
+ 0x0a1e1602, 0x0b1e1502, 0x0c1e1501, 0x0c1f1401,
+ 0x0d1f1400, 0x0e1f1300, 0x0e1f1201, 0x0f1f1200,
+
+ 0x00111e11, 0x00121e10, 0x00131e0f, 0x00131e0f,
+ 0x01131e0e, 0x01141d0e, 0x02151d0c, 0x02151d0c,
+ 0x02161d0b, 0x03161c0b, 0x03171c0a, 0x04171c09,
+ 0x04181b09, 0x05181b08, 0x05191b07, 0x06191a07,
+ 0x061a1a06, 0x071a1906, 0x071b1905, 0x081b1805,
+ 0x091b1804, 0x091c1704, 0x0a1c1703, 0x0a1c1604,
+ 0x0b1d1602, 0x0c1d1502, 0x0c1d1502, 0x0d1d1402,
+ 0x0e1d1401, 0x0e1e1301, 0x0f1e1300, 0x101e1200,
+
+ 0x00111e11, 0x00121e10, 0x00131d10, 0x01131d0f,
+ 0x01141d0e, 0x01141d0e, 0x02151c0d, 0x02151c0d,
+ 0x03161c0b, 0x03161c0b, 0x04171b0a, 0x04171b0a,
+ 0x05171b09, 0x05181a09, 0x06181a08, 0x06191a07,
+ 0x07191907, 0x071a1906, 0x081a1806, 0x081a1806,
+ 0x091a1805, 0x0a1b1704, 0x0a1b1704, 0x0b1c1603,
+ 0x0b1c1603, 0x0c1c1503, 0x0d1c1502, 0x0d1d1402,
+ 0x0e1d1401, 0x0f1d1301, 0x0f1d1301, 0x101e1200,
+};
+
+static const u32 bicubic8coefftab32_left[480] = {
+ 0x40000000, 0x40ff0000, 0x3ffe0000, 0x3efe0000,
+ 0x3dfd0000, 0x3cfc0000, 0x3bfc0000, 0x39fc0000,
+ 0x36fc0000, 0x35fb0000, 0x33fb0000, 0x31fb0000,
+ 0x2ffb0000, 0x2cfb0000, 0x29fc0000, 0x27fc0000,
+ 0x24fc0000, 0x21fc0000, 0x1efd0000, 0x1cfd0000,
+ 0x19fd0000, 0x16fe0000, 0x14fe0000, 0x11fe0000,
+ 0x0dff0000, 0x0cff0000, 0x0aff0000, 0x08ff0000,
+ 0x05000000, 0x03000000, 0x02000000, 0x01000000,
+
+ 0x3904ff00, 0x3903ff00, 0x3902ff00, 0x38010000,
+ 0x37000000, 0x36ff0000, 0x35ff0000, 0x34fe0000,
+ 0x32fe0000, 0x31fd0000, 0x30fd0000, 0x2efc0000,
+ 0x2cfc0000, 0x2afc0000, 0x28fc0000, 0x26fc0000,
+ 0x24fc0000, 0x22fc0000, 0x20fc0000, 0x1efc0000,
+ 0x1cfc0000, 0x19fc0000, 0x17fc0000, 0x15fd0000,
+ 0x12fd0000, 0x11fd0000, 0x0ffd0000, 0x0dfe0000,
+ 0x0bfe0000, 0x09fe0000, 0x08fe0000, 0x06ff0000,
+
+ 0x3209fe00, 0x3407fe00, 0x3306fe00, 0x3305fe00,
+ 0x3204fe00, 0x3102ff00, 0x3102ff00, 0x3001ff00,
+ 0x2f00ff00, 0x2effff00, 0x2cff0000, 0x2bfe0000,
+ 0x29fe0000, 0x28fe0000, 0x26fd0000, 0x24fd0000,
+ 0x23fd0000, 0x21fd0000, 0x20fc0000, 0x1efc0000,
+ 0x1dfc0000, 0x1bfc0000, 0x19fc0000, 0x17fc0000,
+ 0x16fc0000, 0x14fc0000, 0x12fc0000, 0x10fd0000,
+ 0x0ffd0000, 0x0dfd0000, 0x0cfd0000, 0x0afd0000,
+
+ 0x2e0cfd00, 0x2e0bfd00, 0x2e09fd00, 0x2e08fd00,
+ 0x2e07fd00, 0x2c06fe00, 0x2c05fe00, 0x2b04fe00,
+ 0x2b03fe00, 0x2a02fe00, 0x2901fe00, 0x2701ff00,
+ 0x2700ff00, 0x26ffff00, 0x24ffff00, 0x23ffff00,
+ 0x22feff00, 0x20fe0000, 0x1ffe0000, 0x1efd0000,
+ 0x1dfd0000, 0x1bfd0000, 0x1afd0000, 0x19fd0000,
+ 0x17fd0000, 0x15fd0000, 0x13fd0000, 0x12fd0000,
+ 0x11fd0000, 0x10fd0000, 0x0ffd0000, 0x0cfd0000,
+
+ 0x2a0efd00, 0x2a0dfd00, 0x2a0cfd00, 0x290bfd00,
+ 0x290afd00, 0x2909fd00, 0x2908fd00, 0x2807fd00,
+ 0x2706fd00, 0x2705fd00, 0x2604fe00, 0x2603fe00,
+ 0x2502fe00, 0x2402fe00, 0x2401fe00, 0x2200fe00,
+ 0x2200fe00, 0x2000ff00, 0x1fffff00, 0x1effff00,
+ 0x1dfeff00, 0x1cfeff00, 0x1afeff00, 0x19feff00,
+ 0x17fe0000, 0x16fd0000, 0x15fd0000, 0x14fd0000,
+ 0x12fd0000, 0x11fd0000, 0x10fd0000, 0x0ffd0000,
+
+ 0x2610fd00, 0x260ffd00, 0x260efd00, 0x260dfd00,
+ 0x260cfd00, 0x260bfd00, 0x260afd00, 0x2609fd00,
+ 0x2508fd00, 0x2507fd00, 0x2406fd00, 0x2406fd00,
+ 0x2305fd00, 0x2304fd00, 0x2203fe00, 0x2103fe00,
+ 0x2002fe00, 0x1f01fe00, 0x1e01fe00, 0x1e00fe00,
+ 0x1c00fe00, 0x1b00fe00, 0x1afffe00, 0x19ffff00,
+ 0x18ffff00, 0x17feff00, 0x16feff00, 0x15feff00,
+ 0x14feff00, 0x13feff00, 0x11feff00, 0x10fd0000,
+
+ 0x2411feff, 0x2410feff, 0x240ffeff, 0x230efeff,
+ 0x240dfeff, 0x240cfeff, 0x230cfd00, 0x230bfd00,
+ 0x230afd00, 0x2309fd00, 0x2208fd00, 0x2108fd00,
+ 0x2007fd00, 0x2106fd00, 0x2005fd00, 0x1f05fd00,
+ 0x1f04fd00, 0x1e03fd00, 0x1d03fe00, 0x1c02fe00,
+ 0x1b02fe00, 0x1a01fe00, 0x1a01fe00, 0x1900fe00,
+ 0x1800fe00, 0x1700fe00, 0x16fffe00, 0x15fffe00,
+ 0x13ffff00, 0x12ffff00, 0x12feff00, 0x11feff00,
+
+ 0x2212fffe, 0x2211fffe, 0x2210ffff, 0x220ffeff,
+ 0x220efeff, 0x210efeff, 0x210dfeff, 0x210cfeff,
+ 0x210bfeff, 0x200bfeff, 0x200afeff, 0x1f09feff,
+ 0x1f08feff, 0x1d08fe00, 0x1e07fd00, 0x1e06fd00,
+ 0x1d06fd00, 0x1c05fd00, 0x1b04fe00, 0x1a04fe00,
+ 0x1a03fe00, 0x1903fe00, 0x1802fe00, 0x1802fe00,
+ 0x1701fe00, 0x1601fe00, 0x1501fe00, 0x1500fe00,
+ 0x1400fe00, 0x1400fe00, 0x13fffe00, 0x12fffe00,
+
+ 0x201200fe, 0x201100fe, 0x1f1100fe, 0x2010fffe,
+ 0x200ffffe, 0x1f0ffffe, 0x1f0efffe, 0x1e0dffff,
+ 0x1f0cfeff, 0x1e0cfeff, 0x1e0bfeff, 0x1e0afeff,
+ 0x1d0afeff, 0x1d09feff, 0x1c08feff, 0x1b08feff,
+ 0x1b07feff, 0x1a07feff, 0x1a06feff, 0x1a05feff,
+ 0x1805fe00, 0x1904fe00, 0x1704fe00, 0x1703fe00,
+ 0x1603fe00, 0x1602fe00, 0x1402fe00, 0x1402fe00,
+ 0x1401fe00, 0x1301fe00, 0x1201fe00, 0x1200fe00,
+
+ 0x1c1202fe, 0x1c1102fe, 0x1b1102fe, 0x1c1001fe,
+ 0x1b1001fe, 0x1c0f01fe, 0x1b0f00fe, 0x1b0e00fe,
+ 0x1b0e00fe, 0x1b0d00fe, 0x1b0c00fe, 0x1a0cfffe,
+ 0x1a0bfffe, 0x1a0bfffe, 0x190afffe, 0x190afffe,
+ 0x1909fffe, 0x1709ffff, 0x1808ffff, 0x1708feff,
+ 0x1707feff, 0x1707feff, 0x1606feff, 0x1506feff,
+ 0x1505feff, 0x1505feff, 0x1404feff, 0x1404feff,
+ 0x1404feff, 0x1303feff, 0x1203feff, 0x1202feff,
+
+ 0x191104fe, 0x191104fe, 0x191003fe, 0x191003fe,
+ 0x171003fe, 0x180f03fe, 0x180f02fe, 0x180e02fe,
+ 0x180e02fe, 0x180d01fe, 0x180d01fe, 0x180d01fe,
+ 0x170c01fe, 0x160c01fe, 0x170b00fe, 0x170b00fe,
+ 0x160a00fe, 0x160a00fe, 0x160a00fe, 0x150900fe,
+ 0x1509fffe, 0x1508fffe, 0x1508fffe, 0x1408fffe,
+ 0x1407fffe, 0x1307ffff, 0x1306ffff, 0x1206ffff,
+ 0x1206ffff, 0x1205ffff, 0x1205ffff, 0x1104feff,
+
+ 0x161006ff, 0x161005ff, 0x161005ff, 0x160f05ff,
+ 0x160f04ff, 0x150f04ff, 0x150e04ff, 0x150e04ff,
+ 0x150e03ff, 0x150d03ff, 0x150d03ff, 0x150d02ff,
+ 0x140c02ff, 0x150c02fe, 0x150c02fe, 0x150b02fe,
+ 0x140b01fe, 0x140b01fe, 0x140a01fe, 0x140a01fe,
+ 0x140a01fe, 0x130900fe, 0x130900fe, 0x130900fe,
+ 0x130800fe, 0x120800fe, 0x120800fe, 0x120700fe,
+ 0x120700fe, 0x1107fffe, 0x1106fffe, 0x1106fffe,
+
+ 0x140f0700, 0x140f0600, 0x140f0600, 0x140f0600,
+ 0x140e0600, 0x130e0500, 0x140e05ff, 0x130e05ff,
+ 0x140d05ff, 0x130d04ff, 0x130d04ff, 0x120d04ff,
+ 0x130c04ff, 0x130c03ff, 0x130c03ff, 0x120c03ff,
+ 0x120b03ff, 0x120b02ff, 0x120b02ff, 0x120a02ff,
+ 0x120a02ff, 0x110a02ff, 0x110a01ff, 0x120901ff,
+ 0x100901ff, 0x100901ff, 0x110801ff, 0x110801ff,
+ 0x100800ff, 0x100800ff, 0x100700ff, 0x100700fe,
+
+ 0x120f0701, 0x120e0701, 0x120e0701, 0x120e0701,
+ 0x120e0600, 0x110e0600, 0x120d0600, 0x120d0600,
+ 0x120d0500, 0x120d0500, 0x110d0500, 0x110c0500,
+ 0x110c0500, 0x110c0400, 0x110c0400, 0x110b04ff,
+ 0x110b04ff, 0x110b04ff, 0x110b03ff, 0x110b03ff,
+ 0x110a03ff, 0x110a03ff, 0x100a03ff, 0x110a02ff,
+ 0x100902ff, 0x100902ff, 0x100902ff, 0x0f0902ff,
+ 0x0e0902ff, 0x100801ff, 0x0f0801ff, 0x0f0801ff,
+
+ 0x100e0802, 0x100e0802, 0x110e0702, 0x110d0701,
+ 0x110d0701, 0x100d0701, 0x100d0701, 0x110d0601,
+ 0x110d0601, 0x110c0601, 0x110c0601, 0x100c0600,
+ 0x100c0500, 0x100c0500, 0x100c0500, 0x100b0500,
+ 0x100b0500, 0x100b0400, 0x100b0400, 0x0f0b0400,
+ 0x100a0400, 0x0f0a0400, 0x0f0a0400, 0x0f0a0300,
+ 0x0f0a03ff, 0x0f0903ff, 0x0f0903ff, 0x0f0903ff,
+ 0x0f0903ff, 0x0f0902ff, 0x0f0902ff, 0x0f0802ff
+};
+
+static const u32 bicubic8coefftab32_right[480] = {
+ 0x00000000, 0x00000001, 0x00000003, 0x00000004,
+ 0x00000006, 0x0000ff09, 0x0000ff0a, 0x0000ff0c,
+ 0x0000ff0f, 0x0000fe12, 0x0000fe14, 0x0000fe16,
+ 0x0000fd19, 0x0000fd1c, 0x0000fd1e, 0x0000fc21,
+ 0x0000fc24, 0x0000fc27, 0x0000fc29, 0x0000fb2c,
+ 0x0000fb2f, 0x0000fb31, 0x0000fb33, 0x0000fb36,
+ 0x0000fc38, 0x0000fc39, 0x0000fc3b, 0x0000fc3d,
+ 0x0000fd3e, 0x0000fe3f, 0x0000fe40, 0x0000ff40,
+
+ 0x0000ff05, 0x0000ff06, 0x0000fe08, 0x0000fe09,
+ 0x0000fe0b, 0x0000fe0d, 0x0000fd0f, 0x0000fd11,
+ 0x0000fd13, 0x0000fd15, 0x0000fc17, 0x0000fc1a,
+ 0x0000fc1c, 0x0000fc1e, 0x0000fc20, 0x0000fc22,
+ 0x0000fc24, 0x0000fc26, 0x0000fc28, 0x0000fc2a,
+ 0x0000fc2c, 0x0000fc2f, 0x0000fd30, 0x0000fd31,
+ 0x0000fe33, 0x0000fe34, 0x0000ff35, 0x0000ff36,
+ 0x00000037, 0x00000138, 0x00ff0239, 0x00ff0339,
+
+ 0x0000fe09, 0x0000fd0a, 0x0000fd0c, 0x0000fd0d,
+ 0x0000fd0f, 0x0000fd11, 0x0000fc12, 0x0000fc14,
+ 0x0000fc16, 0x0000fc18, 0x0000fc19, 0x0000fc1b,
+ 0x0000fc1d, 0x0000fc1e, 0x0000fc21, 0x0000fd22,
+ 0x0000fd23, 0x0000fd25, 0x0000fd27, 0x0000fe28,
+ 0x0000fe29, 0x0000fe2b, 0x0000ff2c, 0x00ffff2f,
+ 0x00ff002f, 0x00ff0130, 0x00ff0231, 0x00ff0232,
+ 0x00fe0432, 0x00fe0533, 0x00fe0633, 0x00fe0734,
+
+ 0x0000fd0c, 0x0000fd0d, 0x0000fd0f, 0x0000fd10,
+ 0x0000fd11, 0x0000fd13, 0x0000fd14, 0x0000fd16,
+ 0x0000fd17, 0x0000fd19, 0x0000fd1b, 0x0000fd1c,
+ 0x0000fd1d, 0x0000fd1f, 0x0000fe20, 0x0000fe21,
+ 0x00fffe24, 0x00ffff24, 0x00ffff25, 0x00ffff27,
+ 0x00ff0027, 0x00ff0128, 0x00fe012a, 0x00fe022a,
+ 0x00fe032b, 0x00fe042c, 0x00fe052d, 0x00fe062d,
+ 0x00fd072e, 0x00fd082e, 0x00fd092e, 0x00fd0b2f,
+
+ 0x0000fd0e, 0x0000fd0f, 0x0000fd10, 0x0000fd12,
+ 0x0000fd13, 0x0000fd14, 0x0000fd15, 0x0000fd17,
+ 0x0000fe18, 0x00fffe1a, 0x00fffe1b, 0x00fffe1c,
+ 0x00fffe1e, 0x00ffff1e, 0x00ffff1f, 0x00ff0021,
+ 0x00fe0022, 0x00fe0023, 0x00fe0124, 0x00fe0224,
+ 0x00fe0226, 0x00fe0326, 0x00fe0427, 0x00fd0528,
+ 0x00fd0628, 0x00fd0729, 0x00fd0829, 0x00fd0929,
+ 0x00fd0a2a, 0x00fd0b2a, 0x00fd0c2a, 0x00fd0d2a,
+
+ 0x0000fd10, 0x0000fd11, 0x00fffe12, 0x00fffe13,
+ 0x00fffe14, 0x00fffe15, 0x00fffe16, 0x00fffe17,
+ 0x00ffff18, 0x00ffff19, 0x00feff1c, 0x00fe001b,
+ 0x00fe001d, 0x00fe001e, 0x00fe011e, 0x00fe011f,
+ 0x00fe0220, 0x00fe0321, 0x00fe0322, 0x00fd0423,
+ 0x00fd0524, 0x00fd0624, 0x00fd0626, 0x00fd0725,
+ 0x00fd0825, 0x00fd0926, 0x00fd0a26, 0x00fd0b26,
+ 0x00fd0c26, 0x00fd0d26, 0x00fd0e27, 0x00fd0f27,
+
+ 0x00fffe11, 0x00fffe12, 0x00fffe13, 0x00ffff14,
+ 0x00ffff14, 0x00feff16, 0x00feff17, 0x00fe0017,
+ 0x00fe0018, 0x00fe0019, 0x00fe011a, 0x00fe011b,
+ 0x00fe021c, 0x00fe021c, 0x00fe031d, 0x00fd031f,
+ 0x00fd041f, 0x00fd0520, 0x00fd0520, 0x00fd0621,
+ 0x00fd0721, 0x00fd0822, 0x00fd0822, 0x00fd0923,
+ 0x00fd0a23, 0x00fd0b23, 0x00fd0b25, 0x00fe0c24,
+ 0x00fe0d24, 0x00fe0e24, 0x00fe0f24, 0x00fe1024,
+
+ 0x00feff12, 0x00feff13, 0x00feff13, 0x00fe0014,
+ 0x00fe0015, 0x00fe0016, 0x00fe0116, 0x00fe0117,
+ 0x00fe0118, 0x00fe0218, 0x00fe0219, 0x00fe031a,
+ 0x00fe031b, 0x00fe041b, 0x00fd041d, 0x00fd051d,
+ 0x00fd061d, 0x00fd061f, 0x00fe071e, 0x00fe081e,
+ 0x00fe081f, 0x00fe091f, 0x00fe0a20, 0x00fe0a20,
+ 0x00fe0b21, 0x00fe0c21, 0x00fe0d21, 0x00fe0d22,
+ 0x00fe0e22, 0x00fe0f21, 0x00ff1021, 0x00ff1022,
+
+ 0x00fe0012, 0x00fe0013, 0x00fe0113, 0x00fe0114,
+ 0x00fe0115, 0x00fe0215, 0x00fe0216, 0x00fe0217,
+ 0x00fe0317, 0x00fe0318, 0x00fe0418, 0x00fe0419,
+ 0x00fe0519, 0x00fe051a, 0x00fe061b, 0x00fe071b,
+ 0x00fe071c, 0x00fe081c, 0x00fe081d, 0x00fe091d,
+ 0x00fe0a1d, 0x00fe0a1d, 0x00fe0b1e, 0x00fe0c1e,
+ 0x00ff0c1e, 0x00ff0d1e, 0x00ff0e1f, 0x00ff0e1f,
+ 0x00ff0f1f, 0x00ff0f20, 0x0000101f, 0x0000111f,
+
+ 0x00fe0212, 0x00fe0312, 0x00fe0313, 0x00fe0314,
+ 0x00fe0414, 0x00fe0414, 0x00fe0515, 0x00fe0516,
+ 0x00fe0516, 0x00fe0616, 0x00fe0617, 0x00fe0718,
+ 0x00fe0719, 0x00fe0818, 0x00ff0819, 0x00ff0918,
+ 0x00ff0919, 0x00ff0a19, 0x00ff0a19, 0x00ff0b1a,
+ 0x00ff0b1b, 0x00ff0c1a, 0x00000c1b, 0x00000d1b,
+ 0x00000d1c, 0x00000e1b, 0x00000e1d, 0x00010f1b,
+ 0x00010f1b, 0x0001101c, 0x0001101d, 0x0002111c,
+
+ 0x00fe0412, 0x00fe0412, 0x00ff0512, 0x00ff0512,
+ 0x00ff0613, 0x00ff0613, 0x00ff0614, 0x00ff0714,
+ 0x00ff0714, 0x00ff0815, 0x00ff0815, 0x00ff0815,
+ 0x00ff0916, 0x00000916, 0x00000a16, 0x00000a16,
+ 0x00000a18, 0x00000b17, 0x00000b17, 0x00010c17,
+ 0x00010c18, 0x00010d18, 0x00010d18, 0x00010d19,
+ 0x00020e18, 0x00020e18, 0x00020f18, 0x00030f18,
+ 0x00030f18, 0x00031018, 0x00031018, 0x00041119,
+
+ 0x00ff0610, 0x00ff0611, 0x00ff0611, 0x00ff0711,
+ 0x00000711, 0x00000712, 0x00000812, 0x00000812,
+ 0x00000813, 0x00000913, 0x00000913, 0x00000914,
+ 0x00010a14, 0x00010a14, 0x00010a14, 0x00010b14,
+ 0x00010b16, 0x00020b15, 0x00020c15, 0x00020c15,
+ 0x00020c15, 0x00020d17, 0x00030d16, 0x00030d16,
+ 0x00030e16, 0x00040e16, 0x00040e16, 0x00040f16,
+ 0x00040f16, 0x00050f17, 0x00051017, 0x00051017,
+
+ 0x0000070f, 0x00000710, 0x00000710, 0x00000710,
+ 0x00000810, 0x00010811, 0x00010811, 0x00010911,
+ 0x00010911, 0x00010913, 0x00010913, 0x00020a12,
+ 0x00020a12, 0x00020a13, 0x00020b12, 0x00020b13,
+ 0x00030b13, 0x00030c13, 0x00030c13, 0x00030c14,
+ 0x00040c13, 0x00040d13, 0x00040d14, 0x00040d14,
+ 0x00050e14, 0x00050e14, 0x00050e14, 0x00050e14,
+ 0x00060f14, 0x00060f14, 0x00060f15, 0x00061015,
+
+ 0x0001070f, 0x0001080f, 0x0001080f, 0x0001080f,
+ 0x00010811, 0x00020910, 0x00020910, 0x00020910,
+ 0x00020911, 0x00020a10, 0x00030a10, 0x00030a11,
+ 0x00030a11, 0x00030b11, 0x00030b11, 0x00040b12,
+ 0x00040b12, 0x00040c11, 0x00040c12, 0x00040c12,
+ 0x00050c12, 0x00050c12, 0x00050d12, 0x00050d12,
+ 0x00060d13, 0x00060d13, 0x00060e12, 0x00060e13,
+ 0x00070e13, 0x00070e13, 0x00070f13, 0x00070f13,
+
+ 0x0002080e, 0x0002080e, 0x0002080e, 0x00020810,
+ 0x0002090f, 0x0003090f, 0x0003090f, 0x0003090f,
+ 0x0003090f, 0x00030a0f, 0x00030a0f, 0x00040a10,
+ 0x00040a11, 0x00040b10, 0x00040b10, 0x00040b11,
+ 0x00050b10, 0x00050b11, 0x00050c10, 0x00050c11,
+ 0x00050c11, 0x00060c11, 0x00060c11, 0x00060d11,
+ 0x00060d12, 0x00070d12, 0x00070d12, 0x00070e11,
+ 0x00070e11, 0x00070e12, 0x00080e11, 0x00080e12
+};
+
+static const u32 bicubic4coefftab32[480] = {
+ 0x00004000, 0x000140ff, 0x00033ffe, 0x00043ffd,
+ 0x00063dfd, 0xff083dfc, 0xff0a3bfc, 0xff0c39fc,
+ 0xff0e37fc, 0xfe1136fb, 0xfe1433fb, 0xfe1631fb,
+ 0xfd192ffb, 0xfd1c2cfb, 0xfd1e29fc, 0xfc2127fc,
+ 0xfc2424fc, 0xfc2721fc, 0xfc291efd, 0xfb2c1cfd,
+ 0xfb2f19fd, 0xfb3116fe, 0xfb3314fe, 0xfb3611fe,
+ 0xfc370eff, 0xfc390cff, 0xfc3b0aff, 0xfc3d08ff,
+ 0xfd3d0600, 0xfd3f0400, 0xfe3f0300, 0xff400100,
+
+ 0xfe053904, 0xfe063903, 0xfe083901, 0xfe0a3800,
+ 0xfd0b3800, 0xfe0d36ff, 0xfd0f35ff, 0xfd1134fe,
+ 0xfd1332fe, 0xfd1531fd, 0xfc1730fd, 0xfc1a2efc,
+ 0xfc1c2cfc, 0xfc1e2afc, 0xfc2028fc, 0xfc2226fc,
+ 0xfc2424fc, 0xfc2622fc, 0xfc2820fc, 0xfc2a1efc,
+ 0xfc2c1cfc, 0xfc2e1afc, 0xfd3017fc, 0xfd3115fd,
+ 0xfe3213fd, 0xfe3411fd, 0xff350ffd, 0xff360dfe,
+ 0x00370bfe, 0x013809fe, 0x023808fe, 0x033806ff,
+
+ 0xfd093208, 0xfd0a3207, 0xfd0c3205, 0xfd0d3204,
+ 0xfc0f3203, 0xfc113102, 0xfc123002, 0xfc143000,
+ 0xfc152f00, 0xfc172d00, 0xfc192cff, 0xfc1b2bfe,
+ 0xfc1d29fe, 0xfc1e28fe, 0xfc2027fd, 0xfd2125fd,
+ 0xfd2323fd, 0xfd2521fd, 0xfd2720fc, 0xfe281efc,
+ 0xfe291dfc, 0xfe2b1bfc, 0xff2c19fc, 0x002d17fc,
+ 0x002e16fc, 0x012f14fc, 0x022f12fd, 0x023110fd,
+ 0x03310ffd, 0x05310dfd, 0x06320bfd, 0x07320afd,
+
+ 0xfc0c2d0b, 0xfc0d2d0a, 0xfc0e2d09, 0xfc102d07,
+ 0xfc112c07, 0xfc132c05, 0xfc142c04, 0xfc162b03,
+ 0xfc172a03, 0xfc192a01, 0xfc1a2901, 0xfd1b2800,
+ 0xfd1c2700, 0xfd1e2500, 0xfe1f24ff, 0xfe2023ff,
+ 0xfe2222fe, 0xff2320fe, 0xff241ffe, 0x00251efd,
+ 0x00271cfd, 0x01271bfd, 0x01281afd, 0x022918fd,
+ 0x032a16fd, 0x032b15fd, 0x042b14fd, 0x052c12fd,
+ 0x072c10fd, 0x082c0ffd, 0x092c0efd, 0x0a2c0dfd,
+
+ 0xfd0d290d, 0xfd0e290c, 0xfd0f290b, 0xfd11280a,
+ 0xfd122809, 0xfd132808, 0xfd142807, 0xfd162706,
+ 0xfd172705, 0xfd192604, 0xfe1a2503, 0xfe1b2502,
+ 0xfe1c2402, 0xfe1d2302, 0xff1e2201, 0xff1f2101,
+ 0x00202000, 0x00211f00, 0x01221eff, 0x02221dff,
+ 0x02241cfe, 0x03241bfe, 0x042519fe, 0x042618fe,
+ 0x052617fe, 0x062716fd, 0x072714fe, 0x082713fe,
+ 0x092812fd, 0x0a2811fd, 0x0b2810fd, 0x0c280ffd,
+
+ 0xfd0f250f, 0xfd10250e, 0xfd11250d, 0xfd12250c,
+ 0xfd13250b, 0xfe13250a, 0xfe152409, 0xfe162408,
+ 0xfe172308, 0xff182306, 0xff192305, 0xff1a2205,
+ 0x001b2104, 0x001c2103, 0x001d2003, 0x011e1f02,
+ 0x011f1f01, 0x021f1e01, 0x03201d00, 0x03211c00,
+ 0x04211b00, 0x05221aff, 0x062219ff, 0x062318ff,
+ 0x082316ff, 0x082316ff, 0x092415fe, 0x0a2414fe,
+ 0x0b2413fe, 0x0c2412fe, 0x0d2411fe, 0x0e2410fe,
+
+ 0xfe10230f, 0xfe11230e, 0xfe12220e, 0xfe13220d,
+ 0xfe14220c, 0xff14220b, 0xff15220a, 0xff16210a,
+ 0x00162109, 0x00172108, 0x00182008, 0x01192006,
+ 0x011a1f06, 0x021a1f05, 0x021b1e05, 0x031c1d04,
+ 0x031d1d03, 0x041d1c03, 0x041e1b03, 0x051e1b02,
+ 0x061f1a01, 0x06201901, 0x07201801, 0x08201800,
+ 0x09201700, 0x0a211500, 0x0b2115ff, 0x0c2114ff,
+ 0x0c2213ff, 0x0d2212ff, 0x0e2211ff, 0x0f2211fe,
+
+ 0xff112010, 0xff12200f, 0xff12200f, 0xff13200e,
+ 0x0013200d, 0x0014200c, 0x00151f0c, 0x00161f0b,
+ 0x01161f0a, 0x01171e0a, 0x02171e09, 0x02181e08,
+ 0x03191d07, 0x03191d07, 0x041a1c06, 0x041b1c05,
+ 0x051b1b05, 0x051c1b04, 0x061c1a04, 0x071c1a03,
+ 0x071d1903, 0x081e1802, 0x091d1802, 0x091e1702,
+ 0x0a1f1601, 0x0b1f1600, 0x0b1f1501, 0x0c201400,
+ 0x0d1f1400, 0x0e2013ff, 0x0f1f1200, 0x102011ff,
+
+ 0x00111f10, 0x00121e10, 0x00131e0f, 0x00131e0f,
+ 0x01131e0e, 0x01141e0d, 0x01151d0d, 0x02151d0c,
+ 0x02161d0b, 0x03161d0a, 0x03171c0a, 0x04171c09,
+ 0x04181c08, 0x05181b08, 0x05191b07, 0x06191a07,
+ 0x061a1a06, 0x071a1906, 0x071b1905, 0x081b1805,
+ 0x081c1804, 0x091c1704, 0x0a1c1703, 0x0a1d1603,
+ 0x0b1d1602, 0x0c1d1502, 0x0c1d1502, 0x0d1e1401,
+ 0x0e1d1401, 0x0e1e1301, 0x0f1e1300, 0x101e1200,
+
+ 0x02111c11, 0x02121c10, 0x02131b10, 0x03131b0f,
+ 0x03131b0f, 0x03141b0e, 0x04141b0d, 0x04151a0d,
+ 0x05151a0c, 0x05151a0c, 0x05161a0b, 0x0616190b,
+ 0x0616190b, 0x0716190a, 0x0717180a, 0x08171809,
+ 0x08181808, 0x09181708, 0x09181708, 0x0a181707,
+ 0x0a191607, 0x0b191606, 0x0b1a1605, 0x0c1a1505,
+ 0x0c1a1505, 0x0d1a1504, 0x0d1b1404, 0x0e1b1403,
+ 0x0f1b1303, 0x0f1b1303, 0x101b1302, 0x101c1202,
+
+ 0x04111a11, 0x04121911, 0x04131910, 0x0513190f,
+ 0x0513190f, 0x0513190f, 0x0613190e, 0x0614180e,
+ 0x0714180d, 0x0714180d, 0x0715180c, 0x0814180c,
+ 0x0815170c, 0x0816170b, 0x0916170a, 0x0916170a,
+ 0x0a16160a, 0x0a171609, 0x0a171609, 0x0b171608,
+ 0x0b171509, 0x0c171508, 0x0c181507, 0x0d171507,
+ 0x0d181407, 0x0e181406, 0x0e181406, 0x0e191306,
+ 0x0f191305, 0x0f191305, 0x10191304, 0x10191205,
+
+ 0x05121811, 0x06121810, 0x06121810, 0x06131710,
+ 0x0713170f, 0x0713170f, 0x0713170f, 0x0813170e,
+ 0x0813170e, 0x0814170d, 0x0914160d, 0x0914160d,
+ 0x0914160d, 0x0a14160c, 0x0a15160b, 0x0a15150c,
+ 0x0b15150b, 0x0b15150b, 0x0b16150a, 0x0c15150a,
+ 0x0c16140a, 0x0d161409, 0x0d161409, 0x0d171408,
+ 0x0e161408, 0x0e171308, 0x0e171308, 0x0f171307,
+ 0x0f171307, 0x10171306, 0x10181206, 0x10181206,
+
+ 0x07111711, 0x07121710, 0x07121611, 0x08121610,
+ 0x08121610, 0x0813160f, 0x0912160f, 0x0913160e,
+ 0x0913160e, 0x0913160e, 0x0a14150d, 0x0a14150d,
+ 0x0a14150d, 0x0b14150c, 0x0b14150c, 0x0b14150c,
+ 0x0c14140c, 0x0c15140b, 0x0c15140b, 0x0c15140b,
+ 0x0d15140a, 0x0d15140a, 0x0d15140a, 0x0e161309,
+ 0x0e161309, 0x0e161309, 0x0f151309, 0x0f161308,
+ 0x0f161209, 0x10161208, 0x10161208, 0x10171207,
+
+ 0x0a111411, 0x0b111410, 0x0b111410, 0x0b111410,
+ 0x0b111410, 0x0b12140f, 0x0b12140f, 0x0c12130f,
+ 0x0c12130f, 0x0c12130f, 0x0c12130f, 0x0c12130f,
+ 0x0d12130e, 0x0d12130e, 0x0d12130e, 0x0d13130d,
+ 0x0d13130d, 0x0d13130d, 0x0e12130d, 0x0e13120d,
+ 0x0e13120d, 0x0e13120d, 0x0e13120d, 0x0f13120c,
+ 0x0f13120c, 0x0f13120c, 0x0f14120b, 0x0f14120b,
+ 0x1013120b, 0x1013120b, 0x1013120b, 0x1014110b,
+
+ 0x0c111310, 0x0c111310, 0x0c111310, 0x0d101310,
+ 0x0d101310, 0x0d111210, 0x0d111210, 0x0d111210,
+ 0x0d12120f, 0x0d12120f, 0x0d12120f, 0x0d12120f,
+ 0x0e11120f, 0x0e12120e, 0x0e12120e, 0x0e12120e,
+ 0x0e12120e, 0x0e12120e, 0x0e12120e, 0x0e12120e,
+ 0x0f11120e, 0x0f12120d, 0x0f12120d, 0x0f12120d,
+ 0x0f12120d, 0x0f12110e, 0x0f12110e, 0x0f12110e,
+ 0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
+};
+
+static int sun8i_vi_scaler_coef_index(unsigned int step)
+{
+ unsigned int scale, int_part, float_part;
+
+ scale = step >> (SUN8I_VI_SCALER_SCALE_FRAC - 3);
+ int_part = scale >> 3;
+ float_part = scale & 0x7;
+
+ switch (int_part) {
+ case 0:
+ return 0;
+ case 1:
+ return float_part;
+ case 2:
+ return 8 + (float_part >> 1);
+ case 3:
+ return 12;
+ case 4:
+ return 13;
+ default:
+ return 14;
+ }
+}
+
+static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
+ u32 hstep, u32 vstep,
+ const struct drm_format_info *format)
+{
+ const u32 *ch_left, *ch_right, *cy;
+ int offset, i;
+
+ if (format->hsub == 1 && format->vsub == 1) {
+ ch_left = lan3coefftab32_left;
+ ch_right = lan3coefftab32_right;
+ cy = lan2coefftab32;
+ } else {
+ ch_left = bicubic8coefftab32_left;
+ ch_right = bicubic8coefftab32_right;
+ cy = bicubic4coefftab32;
+ }
+
+ offset = sun8i_vi_scaler_coef_index(hstep) *
+ SUN8I_VI_SCALER_COEFF_COUNT;
+ for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(layer, i),
+ lan3coefftab32_left[offset + i]);
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(layer, i),
+ lan3coefftab32_right[offset + i]);
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(layer, i),
+ ch_left[offset + i]);
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(layer, i),
+ ch_right[offset + i]);
+ }
+
+ offset = sun8i_vi_scaler_coef_index(hstep) *
+ SUN8I_VI_SCALER_COEFF_COUNT;
+ for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
+ regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(layer, i),
+ lan2coefftab32[offset + i]);
+ regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(layer, i),
+ cy[offset + i]);
+ }
+}
+
+void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = SUN8I_SCALER_VSU_CTRL_EN |
+ SUN8I_SCALER_VSU_CTRL_COEFF_RDY;
+ else
+ val = 0;
+
+ regmap_write(mixer->engine.regs, SUN8I_SCALER_VSU_CTRL(layer), val);
+}
+
+void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
+ u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
+ u32 hscale, u32 vscale, u32 hphase, u32 vphase,
+ const struct drm_format_info *format)
+{
+ u32 chphase, cvphase;
+ u32 insize, outsize;
+
+ hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
+ vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
+ hscale <<= SUN8I_VI_SCALER_SCALE_FRAC - 16;
+ vscale <<= SUN8I_VI_SCALER_SCALE_FRAC - 16;
+
+ insize = SUN8I_VI_SCALER_SIZE(src_w, src_h);
+ outsize = SUN8I_VI_SCALER_SIZE(dst_w, dst_h);
+
+ /*
+ * This is chroma V/H phase calculation as it appears in
+ * BSP driver. There is no detailed explanation. YUV 420
+ * chroma is threated specialy for some reason.
+ */
+ if (format->hsub == 2 && format->vsub == 2) {
+ chphase = hphase >> 1;
+ cvphase = (vphase >> 1) -
+ (1UL << (SUN8I_VI_SCALER_SCALE_FRAC - 2));
+ } else {
+ chphase = hphase;
+ cvphase = vphase;
+ }
+
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_OUTSIZE(layer), outsize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_YINSIZE(layer), insize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_YHSTEP(layer), hscale);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_YVSTEP(layer), vscale);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_YHPHASE(layer), hphase);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_YVPHASE(layer), vphase);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CINSIZE(layer),
+ SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
+ src_h / format->vsub));
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CHSTEP(layer),
+ hscale / format->hsub);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CVSTEP(layer),
+ vscale / format->vsub);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CHPHASE(layer), chphase);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CVPHASE(layer), cvphase);
+ sun8i_vi_scaler_set_coeff(mixer->engine.regs, layer,
+ hscale, vscale, format);
+}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
new file mode 100644
index 0000000..a595ab6
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _SUN8I_VI_SCALER_H_
+#define _SUN8I_VI_SCALER_H_
+
+#include <drm/drm_fourcc.h>
+#include "sun8i_mixer.h"
+
+/* this two macros assumes 16 fractional bits which is standard in DRM */
+#define SUN8I_VI_SCALER_SCALE_MIN 1
+#define SUN8I_VI_SCALER_SCALE_MAX ((1UL << 20) - 1)
+
+#define SUN8I_VI_SCALER_SCALE_FRAC 20
+#define SUN8I_VI_SCALER_PHASE_FRAC 20
+#define SUN8I_VI_SCALER_COEFF_COUNT 32
+#define SUN8I_VI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+#define SUN8I_SCALER_VSU_CTRL(ch) (0x20000 + 0x20000 * (ch) + 0x0)
+#define SUN8I_SCALER_VSU_OUTSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x40)
+#define SUN8I_SCALER_VSU_YINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x80)
+#define SUN8I_SCALER_VSU_YHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x88)
+#define SUN8I_SCALER_VSU_YVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x8c)
+#define SUN8I_SCALER_VSU_YHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x90)
+#define SUN8I_SCALER_VSU_YVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x98)
+#define SUN8I_SCALER_VSU_CINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0xc0)
+#define SUN8I_SCALER_VSU_CHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xc8)
+#define SUN8I_SCALER_VSU_CVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xcc)
+#define SUN8I_SCALER_VSU_CHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd0)
+#define SUN8I_SCALER_VSU_CVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd8)
+#define SUN8I_SCALER_VSU_YHCOEFF0(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x200 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YHCOEFF1(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x300 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YVCOEFF(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x400 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF0(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x600 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF1(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x700 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CVCOEFF(ch, i) \
+ (0x20000 + 0x20000 * (ch) + 0x800 + 0x4 * (i))
+
+#define SUN8I_SCALER_VSU_CTRL_EN BIT(0)
+#define SUN8I_SCALER_VSU_CTRL_COEFF_RDY BIT(4)
+
+void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
+void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
+ u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
+ u32 hscale, u32 vscale, u32 hphase, u32 vphase,
+ const struct drm_format_info *format);
+
+#endif
diff --git a/drivers/gpu/drm/sun4i/sunxi_engine.h b/drivers/gpu/drm/sun4i/sunxi_engine.h
index 4cb70ae..d317ea0 100644
--- a/drivers/gpu/drm/sun4i/sunxi_engine.h
+++ b/drivers/gpu/drm/sun4i/sunxi_engine.h
@@ -12,16 +12,106 @@
struct drm_plane;
struct drm_device;
+struct drm_crtc_state;
struct sunxi_engine;
+/**
+ * struct sunxi_engine_ops - helper operations for sunXi engines
+ *
+ * These hooks are used by the common part of the DRM driver to
+ * implement the proper behaviour.
+ */
struct sunxi_engine_ops {
+ /**
+ * @atomic_begin:
+ *
+ * This callback allows to prepare our engine for an atomic
+ * update. This is mirroring the
+ * &drm_crtc_helper_funcs.atomic_begin callback, so any
+ * documentation there applies.
+ *
+ * This function is optional.
+ */
+ void (*atomic_begin)(struct sunxi_engine *engine,
+ struct drm_crtc_state *old_state);
+
+ /**
+ * @atomic_check:
+ *
+ * This callback allows to validate plane-update related CRTC
+ * constraints specific to engines. This is mirroring the
+ * &drm_crtc_helper_funcs.atomic_check callback, so any
+ * documentation there applies.
+ *
+ * This function is optional.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code.
+ */
+ int (*atomic_check)(struct sunxi_engine *engine,
+ struct drm_crtc_state *state);
+
+ /**
+ * @commit:
+ *
+ * This callback will trigger the hardware switch to commit
+ * the new configuration that has been setup during the next
+ * vblank period.
+ *
+ * This function is optional.
+ */
void (*commit)(struct sunxi_engine *engine);
+
+ /**
+ * @layers_init:
+ *
+ * This callback is used to allocate, initialize and register
+ * the layers supported by that engine.
+ *
+ * This function is mandatory.
+ *
+ * RETURNS:
+ *
+ * The array of struct drm_plane backing the layers, or an
+ * error pointer on failure.
+ */
struct drm_plane **(*layers_init)(struct drm_device *drm,
struct sunxi_engine *engine);
+ /**
+ * @apply_color_correction:
+ *
+ * This callback will enable the color correction in the
+ * engine. This is useful only for the composite output.
+ *
+ * This function is optional.
+ */
void (*apply_color_correction)(struct sunxi_engine *engine);
+
+ /**
+ * @disable_color_correction:
+ *
+ * This callback will stop the color correction in the
+ * engine. This is useful only for the composite output.
+ *
+ * This function is optional.
+ */
void (*disable_color_correction)(struct sunxi_engine *engine);
+
+ /**
+ * @vblank_quirk:
+ *
+ * This callback is used to implement engine-specific
+ * behaviour part of the VBLANK event. It is run with all the
+ * constraints of an interrupt (can't sleep, all local
+ * interrupts disabled) and therefore should be as fast as
+ * possible.
+ *
+ * This function is optional.
+ */
+ void (*vblank_quirk)(struct sunxi_engine *engine);
};
/**
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 46d65d3..2e0d621 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,8 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ hub.o \
+ plane.o \
dc.o \
output.o \
rgb.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 24a5ef4..9f83a65 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -19,82 +19,79 @@
#include "dc.h"
#include "drm.h"
#include "gem.h"
+#include "hub.h"
+#include "plane.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-struct tegra_plane {
- struct drm_plane base;
- unsigned int index;
-};
-
-static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
{
- return container_of(plane, struct tegra_plane, base);
+ stats->frames = 0;
+ stats->vblank = 0;
+ stats->underflow = 0;
+ stats->overflow = 0;
}
-struct tegra_dc_state {
- struct drm_crtc_state base;
+/* Reads the active copy of a register. */
+static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset)
+{
+ u32 value;
- struct clk *clk;
- unsigned long pclk;
- unsigned int div;
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ value = tegra_dc_readl(dc, offset);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
- u32 planes;
-};
+ return value;
+}
-static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
+static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
+ unsigned int offset)
{
- if (state)
- return container_of(state, struct tegra_dc_state, base);
-
- return NULL;
-}
+ if (offset >= 0x500 && offset <= 0x638) {
+ offset = 0x000 + (offset - 0x500);
+ return plane->offset + offset;
+ }
-struct tegra_plane_state {
- struct drm_plane_state base;
+ if (offset >= 0x700 && offset <= 0x719) {
+ offset = 0x180 + (offset - 0x700);
+ return plane->offset + offset;
+ }
- struct tegra_bo_tiling tiling;
- u32 format;
- u32 swap;
-};
+ if (offset >= 0x800 && offset <= 0x839) {
+ offset = 0x1c0 + (offset - 0x800);
+ return plane->offset + offset;
+ }
-static inline struct tegra_plane_state *
-to_tegra_plane_state(struct drm_plane_state *state)
-{
- if (state)
- return container_of(state, struct tegra_plane_state, base);
+ dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
- return NULL;
+ return plane->offset + offset;
}
-static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+static inline u32 tegra_plane_readl(struct tegra_plane *plane,
+ unsigned int offset)
{
- stats->frames = 0;
- stats->vblank = 0;
- stats->underflow = 0;
- stats->overflow = 0;
+ return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
}
-/*
- * Reads the active copy of a register. This takes the dc->lock spinlock to
- * prevent races with the VBLANK processing which also needs access to the
- * active copy of some registers.
- */
-static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset)
+static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
+ unsigned int offset)
{
- unsigned long flags;
- u32 value;
+ tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
+}
- spin_lock_irqsave(&dc->lock, flags);
+bool tegra_dc_has_output(struct tegra_dc *dc, struct device *dev)
+{
+ struct device_node *np = dc->dev->of_node;
+ struct of_phandle_iterator it;
+ int err;
- tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
- value = tegra_dc_readl(dc, offset);
- tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ of_for_each_phandle(&it, err, np, "nvidia,outputs", NULL, 0)
+ if (it.node == dev->of_node)
+ return true;
- spin_unlock_irqrestore(&dc->lock, flags);
- return value;
+ return false;
}
/*
@@ -115,81 +112,6 @@ void tegra_dc_commit(struct tegra_dc *dc)
tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
}
-static int tegra_dc_format(u32 fourcc, u32 *format, u32 *swap)
-{
- /* assume no swapping of fetched data */
- if (swap)
- *swap = BYTE_SWAP_NOSWAP;
-
- switch (fourcc) {
- case DRM_FORMAT_XBGR8888:
- *format = WIN_COLOR_DEPTH_R8G8B8A8;
- break;
-
- case DRM_FORMAT_XRGB8888:
- *format = WIN_COLOR_DEPTH_B8G8R8A8;
- break;
-
- case DRM_FORMAT_RGB565:
- *format = WIN_COLOR_DEPTH_B5G6R5;
- break;
-
- case DRM_FORMAT_UYVY:
- *format = WIN_COLOR_DEPTH_YCbCr422;
- break;
-
- case DRM_FORMAT_YUYV:
- if (swap)
- *swap = BYTE_SWAP_SWAP2;
-
- *format = WIN_COLOR_DEPTH_YCbCr422;
- break;
-
- case DRM_FORMAT_YUV420:
- *format = WIN_COLOR_DEPTH_YCbCr420P;
- break;
-
- case DRM_FORMAT_YUV422:
- *format = WIN_COLOR_DEPTH_YCbCr422P;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
-{
- switch (format) {
- case WIN_COLOR_DEPTH_YCbCr422:
- case WIN_COLOR_DEPTH_YUV422:
- if (planar)
- *planar = false;
-
- return true;
-
- case WIN_COLOR_DEPTH_YCbCr420P:
- case WIN_COLOR_DEPTH_YUV420P:
- case WIN_COLOR_DEPTH_YCbCr422P:
- case WIN_COLOR_DEPTH_YUV422P:
- case WIN_COLOR_DEPTH_YCbCr422R:
- case WIN_COLOR_DEPTH_YUV422R:
- case WIN_COLOR_DEPTH_YCbCr422RA:
- case WIN_COLOR_DEPTH_YUV422RA:
- if (planar)
- *planar = true;
-
- return true;
- }
-
- if (planar)
- *planar = false;
-
- return false;
-}
-
static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
unsigned int bpp)
{
@@ -230,36 +152,104 @@ static inline u32 compute_initial_dda(unsigned int in)
return dfixed_frac(inf);
}
-static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+static void tegra_plane_setup_blending_legacy(struct tegra_plane *plane)
+{
+ u32 background[3] = {
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ };
+ u32 foreground = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255) |
+ BLEND_COLOR_KEY_NONE;
+ u32 blendnokey = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255);
+ struct tegra_plane_state *state;
+ unsigned int i;
+
+ state = to_tegra_plane_state(plane->base.state);
+
+ /* alpha contribution is 1 minus sum of overlapping windows */
+ for (i = 0; i < 3; i++) {
+ if (state->dependent[i])
+ background[i] |= BLEND_CONTROL_DEPENDENT;
+ }
+
+ /* enable alpha blending if pixel format has an alpha component */
+ if (!state->opaque)
+ foreground |= BLEND_CONTROL_ALPHA;
+
+ /*
+ * Disable blending and assume Window A is the bottom-most window,
+ * Window C is the top-most window and Window B is in the middle.
+ */
+ tegra_plane_writel(plane, blendnokey, DC_WIN_BLEND_NOKEY);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_1WIN);
+
+ switch (plane->index) {
+ case 0:
+ tegra_plane_writel(plane, background[0], DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+}
+
+static void tegra_plane_setup_blending(struct tegra_plane *plane,
+ const struct tegra_dc_window *window)
+{
+ u32 value;
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_MATCH_SELECT);
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_NOMATCH_SELECT);
+
+ value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - window->zpos);
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_LAYER_CONTROL);
+}
+
+static void tegra_dc_setup_window(struct tegra_plane *plane,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value, flags;
+ struct tegra_dc *dc = plane->dc;
bool yuv, planar;
+ u32 value;
/*
* For YUV planar modes, the number of bytes per pixel takes into
* account only the luma component and therefore is 1.
*/
- yuv = tegra_dc_format_is_yuv(window->format, &planar);
+ yuv = tegra_plane_format_is_yuv(window->format, &planar);
if (!yuv)
bpp = window->bits_per_pixel / 8;
else
bpp = planar ? 1 : 2;
- spin_lock_irqsave(&dc->lock, flags);
-
- value = WINDOW_A_SELECT << index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
- tegra_dc_writel(dc, window->swap, DC_WIN_BYTE_SWAP);
+ tegra_plane_writel(plane, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_plane_writel(plane, window->swap, DC_WIN_BYTE_SWAP);
value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
- tegra_dc_writel(dc, value, DC_WIN_POSITION);
+ tegra_plane_writel(plane, value, DC_WIN_POSITION);
value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
- tegra_dc_writel(dc, value, DC_WIN_SIZE);
+ tegra_plane_writel(plane, value, DC_WIN_SIZE);
h_offset = window->src.x * bpp;
v_offset = window->src.y;
@@ -267,7 +257,7 @@ static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
v_size = window->src.h;
value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
- tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+ tegra_plane_writel(plane, value, DC_WIN_PRESCALED_SIZE);
/*
* For DDA computations the number of bytes per pixel for YUV planar
@@ -280,33 +270,33 @@ static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
- tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+ tegra_plane_writel(plane, value, DC_WIN_DDA_INC);
h_dda = compute_initial_dda(window->src.x);
v_dda = compute_initial_dda(window->src.y);
- tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
- tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+ tegra_plane_writel(plane, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_plane_writel(plane, v_dda, DC_WIN_V_INITIAL_DDA);
- tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
- tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+ tegra_plane_writel(plane, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_plane_writel(plane, 0, DC_WIN_BUF_STRIDE);
- tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+ tegra_plane_writel(plane, window->base[0], DC_WINBUF_START_ADDR);
if (yuv && planar) {
- tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
- tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+ tegra_plane_writel(plane, window->base[1], DC_WINBUF_START_ADDR_U);
+ tegra_plane_writel(plane, window->base[2], DC_WINBUF_START_ADDR_V);
value = window->stride[1] << 16 | window->stride[0];
- tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+ tegra_plane_writel(plane, value, DC_WIN_LINE_STRIDE);
} else {
- tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+ tegra_plane_writel(plane, window->stride[0], DC_WIN_LINE_STRIDE);
}
if (window->bottom_up)
v_offset += window->src.h - 1;
- tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
- tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+ tegra_plane_writel(plane, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_plane_writel(plane, v_offset, DC_WINBUF_ADDR_V_OFFSET);
if (dc->soc->supports_block_linear) {
unsigned long height = window->tiling.value;
@@ -326,7 +316,7 @@ static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
}
- tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
+ tegra_plane_writel(plane, value, DC_WINBUF_SURFACE_KIND);
} else {
switch (window->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
@@ -347,21 +337,21 @@ static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
break;
}
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ tegra_plane_writel(plane, value, DC_WIN_BUFFER_ADDR_MODE);
}
value = WIN_ENABLE;
if (yuv) {
/* setup default colorspace conversion coefficients */
- tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
- tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
- tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
- tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
- tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
- tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
- tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
- tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+ tegra_plane_writel(plane, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_plane_writel(plane, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KUR);
+ tegra_plane_writel(plane, 0x0198, DC_WIN_CSC_KVR);
+ tegra_plane_writel(plane, 0x039b, DC_WIN_CSC_KUG);
+ tegra_plane_writel(plane, 0x032f, DC_WIN_CSC_KVG);
+ tegra_plane_writel(plane, 0x0204, DC_WIN_CSC_KUB);
+ tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KVB);
value |= CSC_ENABLE;
} else if (window->bits_per_pixel < 24) {
@@ -371,137 +361,91 @@ static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
if (window->bottom_up)
value |= V_DIRECTION;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- /*
- * Disable blending and assume Window A is the bottom-most window,
- * Window C is the top-most window and Window B is in the middle.
- */
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
-
- switch (index) {
- case 0:
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
- break;
-
- case 1:
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
- break;
+ tegra_plane_writel(plane, value, DC_WIN_WIN_OPTIONS);
- case 2:
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
- tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
- break;
- }
-
- spin_unlock_irqrestore(&dc->lock, flags);
-}
-
-static void tegra_plane_destroy(struct drm_plane *plane)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
-
- drm_plane_cleanup(plane);
- kfree(p);
+ if (dc->soc->supports_blending)
+ tegra_plane_setup_blending(plane, window);
+ else
+ tegra_plane_setup_blending_legacy(plane);
}
-static const u32 tegra_primary_plane_formats[] = {
+static const u32 tegra20_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* non-native formats */
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
};
-static void tegra_primary_plane_destroy(struct drm_plane *plane)
-{
- tegra_plane_destroy(plane);
-}
-
-static void tegra_plane_reset(struct drm_plane *plane)
-{
- struct tegra_plane_state *state;
-
- if (plane->state)
- __drm_atomic_helper_plane_destroy_state(plane->state);
-
- kfree(plane->state);
- plane->state = NULL;
-
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- plane->state = &state->base;
- plane->state->plane = plane;
- }
-}
-
-static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
- struct tegra_plane_state *copy;
-
- copy = kmalloc(sizeof(*copy), GFP_KERNEL);
- if (!copy)
- return NULL;
-
- __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
- copy->tiling = state->tiling;
- copy->format = state->format;
- copy->swap = state->swap;
-
- return &copy->base;
-}
-
-static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- __drm_atomic_helper_plane_destroy_state(state);
- kfree(state);
-}
-
-static const struct drm_plane_funcs tegra_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = tegra_primary_plane_destroy,
- .reset = tegra_plane_reset,
- .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
- .atomic_destroy_state = tegra_plane_atomic_destroy_state,
+static const u64 tegra20_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED,
+ DRM_FORMAT_MOD_INVALID
};
-static int tegra_plane_state_add(struct tegra_plane *plane,
- struct drm_plane_state *state)
-{
- struct drm_crtc_state *crtc_state;
- struct tegra_dc_state *tegra;
- struct drm_rect clip;
- int err;
-
- /* Propagate errors from allocation or locking failures. */
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->mode.hdisplay;
- clip.y2 = crtc_state->mode.vdisplay;
-
- /* Check plane state for visibility and calculate clipping bounds */
- err = drm_plane_helper_check_state(state, &clip, 0, INT_MAX,
- true, true);
- if (err < 0)
- return err;
-
- tegra = to_dc_state(crtc_state);
+static const u32 tegra114_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+};
- tegra->planes |= WIN_A_ACT_REQ << plane->index;
+static const u32 tegra124_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* new on Tegra124 */
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+};
- return 0;
-}
+static const u64 tegra124_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
static int tegra_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
@@ -510,17 +454,40 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
struct tegra_bo_tiling *tiling = &plane_state->tiling;
struct tegra_plane *tegra = to_tegra_plane(plane);
struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ unsigned int format;
int err;
/* no need for further checks if the plane is being disabled */
if (!state->crtc)
return 0;
- err = tegra_dc_format(state->fb->format->format, &plane_state->format,
- &plane_state->swap);
+ err = tegra_plane_format(state->fb->format->format, &format,
+ &plane_state->swap);
if (err < 0)
return err;
+ /*
+ * Tegra20 and Tegra30 are special cases here because they support
+ * only variants of specific formats with an alpha component, but not
+ * the corresponding opaque formats. However, the opaque formats can
+ * be emulated by disabling alpha blending for the plane.
+ */
+ if (!dc->soc->supports_blending) {
+ if (!tegra_plane_format_has_alpha(format)) {
+ err = tegra_plane_format_get_alpha(format, &format);
+ if (err < 0)
+ return err;
+
+ plane_state->opaque = true;
+ } else {
+ plane_state->opaque = false;
+ }
+
+ tegra_plane_check_dependent(tegra, plane_state);
+ }
+
+ plane_state->format = format;
+
err = tegra_fb_get_tiling(state->fb, tiling);
if (err < 0)
return err;
@@ -553,32 +520,22 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
static void tegra_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
- unsigned long flags;
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
- spin_lock_irqsave(&dc->lock, flags);
-
- value = WINDOW_A_SELECT << p->index;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
-
- value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+ value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
value &= ~WIN_ENABLE;
- tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
-
- spin_unlock_irqrestore(&dc->lock, flags);
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
}
static void tegra_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
- struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc_window window;
@@ -604,6 +561,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.bottom_up = tegra_fb_is_bottom_up(fb);
/* copy from state */
+ window.zpos = plane->state->normalized_zpos;
window.tiling = state->tiling;
window.format = state->format;
window.swap = state->swap;
@@ -622,7 +580,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.stride[i] = fb->pitches[i];
}
- tegra_dc_setup_window(dc, p->index, &window);
+ tegra_dc_setup_window(p, &window);
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
@@ -631,8 +589,7 @@ static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
.atomic_update = tegra_plane_atomic_update,
};
-static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
- struct tegra_dc *dc)
+static unsigned long tegra_plane_get_possible_crtcs(struct drm_device *drm)
{
/*
* Ideally this would use drm_crtc_mask(), but that would require the
@@ -646,9 +603,17 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
* of CRTCs that have been registered, and should therefore always be
* the same as drm_crtc_index() after registration.
*/
- unsigned long possible_crtcs = 1 << drm->mode_config.num_crtc;
+ return 1 << drm->mode_config.num_crtc;
+}
+
+static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
+ enum drm_plane_type type = DRM_PLANE_TYPE_PRIMARY;
struct tegra_plane *plane;
unsigned int num_formats;
+ const u64 *modifiers;
const u32 *formats;
int err;
@@ -656,13 +621,18 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
if (!plane)
return ERR_PTR(-ENOMEM);
- num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
- formats = tegra_primary_plane_formats;
+ /* Always use window A as primary window */
+ plane->offset = 0xa00;
+ plane->index = 0;
+ plane->dc = dc;
+
+ num_formats = dc->soc->num_primary_formats;
+ formats = dc->soc->primary_formats;
+ modifiers = dc->soc->modifiers;
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
- &tegra_primary_plane_funcs, formats,
- num_formats, NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ &tegra_plane_funcs, formats,
+ num_formats, modifiers, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -670,6 +640,9 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
+ if (dc->soc->supports_blending)
+ drm_plane_create_zpos_property(&plane->base, 0, 0, 255);
+
return &plane->base;
}
@@ -786,15 +759,6 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
}
-static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = tegra_plane_destroy,
- .reset = tegra_plane_reset,
- .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
- .atomic_destroy_state = tegra_plane_atomic_destroy_state,
-};
-
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
@@ -804,6 +768,7 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
struct tegra_plane *plane;
unsigned int num_formats;
const u32 *formats;
@@ -821,12 +786,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
* need to special-casing the cursor plane.
*/
plane->index = 6;
+ plane->dc = dc;
num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
formats = tegra_cursor_plane_formats;
- err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
- &tegra_cursor_plane_funcs, formats,
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ &tegra_plane_funcs, formats,
num_formats, NULL,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
@@ -839,24 +805,76 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
return &plane->base;
}
-static void tegra_overlay_plane_destroy(struct drm_plane *plane)
-{
- tegra_plane_destroy(plane);
-}
-
-static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = tegra_overlay_plane_destroy,
- .reset = tegra_plane_reset,
- .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
- .atomic_destroy_state = tegra_plane_atomic_destroy_state,
+static const u32 tegra20_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* non-native formats */
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
};
-static const uint32_t tegra_overlay_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
+static const u32 tegra114_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static const u32 tegra124_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* new on Tegra124 */
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ /* planar formats */
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YUV420,
@@ -865,10 +883,13 @@ static const uint32_t tegra_overlay_plane_formats[] = {
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
- unsigned int index)
+ unsigned int index,
+ bool cursor)
{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
struct tegra_plane *plane;
unsigned int num_formats;
+ enum drm_plane_type type;
const u32 *formats;
int err;
@@ -876,15 +897,21 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
if (!plane)
return ERR_PTR(-ENOMEM);
+ plane->offset = 0xa00 + 0x200 * index;
plane->index = index;
+ plane->dc = dc;
- num_formats = ARRAY_SIZE(tegra_overlay_plane_formats);
- formats = tegra_overlay_plane_formats;
+ num_formats = dc->soc->num_overlay_formats;
+ formats = dc->soc->overlay_formats;
- err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
- &tegra_overlay_plane_funcs, formats,
- num_formats, NULL,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (!cursor)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_CURSOR;
+
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, NULL, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -892,97 +919,78 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
+ if (dc->soc->supports_blending)
+ drm_plane_create_zpos_property(&plane->base, 0, 0, 255);
+
return &plane->base;
}
-static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
+ struct tegra_dc *dc)
{
- struct drm_plane *plane;
- unsigned int i;
-
- for (i = 0; i < 2; i++) {
- plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
+ struct drm_plane *plane, *primary = NULL;
+ unsigned int i, j;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe) {
+ for (j = 0; j < wgrp->num_windows; j++) {
+ unsigned int index = wgrp->windows[j];
+
+ plane = tegra_shared_plane_create(drm, dc,
+ wgrp->index,
+ index);
+ if (IS_ERR(plane))
+ return plane;
+
+ /*
+ * Choose the first shared plane owned by this
+ * head as the primary plane.
+ */
+ if (!primary) {
+ plane->type = DRM_PLANE_TYPE_PRIMARY;
+ primary = plane;
+ }
+ }
+ }
}
- return 0;
-}
-
-static u32 tegra_dc_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->syncpt)
- return host1x_syncpt_read(dc->syncpt);
-
- /* fallback to software emulated VBLANK counter */
- return drm_crtc_vblank_count(&dc->base);
-}
-
-static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value, flags;
-
- spin_lock_irqsave(&dc->lock, flags);
-
- value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
- value |= VBLANK_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- spin_unlock_irqrestore(&dc->lock, flags);
-
- return 0;
-}
-
-static void tegra_dc_disable_vblank(struct drm_crtc *crtc)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- unsigned long value, flags;
-
- spin_lock_irqsave(&dc->lock, flags);
-
- value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
- value &= ~VBLANK_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- spin_unlock_irqrestore(&dc->lock, flags);
+ return primary;
}
-static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
+static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
+ struct tegra_dc *dc)
{
- struct drm_device *drm = dc->base.dev;
- struct drm_crtc *crtc = &dc->base;
- unsigned long flags, base;
- struct tegra_bo *bo;
-
- spin_lock_irqsave(&drm->event_lock, flags);
-
- if (!dc->event) {
- spin_unlock_irqrestore(&drm->event_lock, flags);
- return;
- }
+ struct drm_plane *planes[2], *primary;
+ unsigned int planes_num;
+ unsigned int i;
+ int err;
- bo = tegra_fb_get_plane(crtc->primary->fb, 0);
+ primary = tegra_primary_plane_create(drm, dc);
+ if (IS_ERR(primary))
+ return primary;
- spin_lock(&dc->lock);
+ if (dc->soc->supports_cursor)
+ planes_num = 2;
+ else
+ planes_num = 1;
- /* check if new start address has been latched */
- tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
- tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
- base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
- tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ for (i = 0; i < planes_num; i++) {
+ planes[i] = tegra_dc_overlay_plane_create(drm, dc, 1 + i,
+ false);
+ if (IS_ERR(planes[i])) {
+ err = PTR_ERR(planes[i]);
- spin_unlock(&dc->lock);
+ while (i--)
+ tegra_plane_funcs.destroy(planes[i]);
- if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
- drm_crtc_send_vblank_event(crtc, dc->event);
- drm_crtc_vblank_put(crtc);
- dc->event = NULL;
+ tegra_plane_funcs.destroy(primary);
+ return ERR_PTR(err);
+ }
}
- spin_unlock_irqrestore(&drm->event_lock, flags);
+ return primary;
}
static void tegra_dc_destroy(struct drm_crtc *crtc)
@@ -1035,6 +1043,379 @@ static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
kfree(state);
}
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_dc_regs[] = {
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_CONT_SYNCPT_VSYNC),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND_OPTION0),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_POWER_CONTROL),
+ DEBUGFS_REG32(DC_CMD_INT_STATUS),
+ DEBUGFS_REG32(DC_CMD_INT_MASK),
+ DEBUGFS_REG32(DC_CMD_INT_ENABLE),
+ DEBUGFS_REG32(DC_CMD_INT_TYPE),
+ DEBUGFS_REG32(DC_CMD_INT_POLARITY),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE1),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE2),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE3),
+ DEBUGFS_REG32(DC_CMD_STATE_ACCESS),
+ DEBUGFS_REG32(DC_CMD_STATE_CONTROL),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_WINDOW_HEADER),
+ DEBUGFS_REG32(DC_CMD_REG_ACT_CONTROL),
+ DEBUGFS_REG32(DC_COM_CRC_CONTROL),
+ DEBUGFS_REG32(DC_COM_CRC_CHECKSUM),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(3)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(0)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(1)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(2)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(3)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(0)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(4)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(5)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(6)),
+ DEBUGFS_REG32(DC_COM_PIN_MISC_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM0_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM0_DUTY_CYCLE),
+ DEBUGFS_REG32(DC_COM_PIN_PM1_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM1_DUTY_CYCLE),
+ DEBUGFS_REG32(DC_COM_SPI_CONTROL),
+ DEBUGFS_REG32(DC_COM_SPI_START_BYTE),
+ DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_AB),
+ DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_CD),
+ DEBUGFS_REG32(DC_COM_HSPI_CS_DC),
+ DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_A),
+ DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_B),
+ DEBUGFS_REG32(DC_COM_GPIO_CTRL),
+ DEBUGFS_REG32(DC_COM_GPIO_DEBOUNCE_COUNTER),
+ DEBUGFS_REG32(DC_COM_CRC_CHECKSUM_LATCHED),
+ DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS0),
+ DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS1),
+ DEBUGFS_REG32(DC_DISP_DISP_WIN_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY),
+ DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER),
+ DEBUGFS_REG32(DC_DISP_DISP_TIMING_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_REF_TO_SYNC),
+ DEBUGFS_REG32(DC_DISP_SYNC_WIDTH),
+ DEBUGFS_REG32(DC_DISP_BACK_PORCH),
+ DEBUGFS_REG32(DC_DISP_ACTIVE),
+ DEBUGFS_REG32(DC_DISP_FRONT_PORCH),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_V_PULSE2_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE2_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE3_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE3_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_M0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_M1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DI_CONTROL),
+ DEBUGFS_REG32(DC_DISP_PP_CONTROL),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_A),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_B),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_C),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_D),
+ DEBUGFS_REG32(DC_DISP_DISP_CLOCK_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DISP_INTERFACE_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DISP_COLOR_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SHIFT_CLOCK_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_DATA_ENABLE_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_SERIAL_INTERFACE_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_LCD_SPI_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_BORDER_COLOR),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY0_LOWER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY0_UPPER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY1_LOWER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY1_UPPER),
+ DEBUGFS_REG32(DC_DISP_CURSOR_FOREGROUND),
+ DEBUGFS_REG32(DC_DISP_CURSOR_BACKGROUND),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_NS),
+ DEBUGFS_REG32(DC_DISP_CURSOR_POSITION),
+ DEBUGFS_REG32(DC_DISP_CURSOR_POSITION_NS),
+ DEBUGFS_REG32(DC_DISP_INIT_SEQ_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_A),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_B),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_C),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_D),
+ DEBUGFS_REG32(DC_DISP_DC_MCCIF_FIFOCTRL),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0A_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0B_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1A_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1B_HYST),
+ DEBUGFS_REG32(DC_DISP_DAC_CRT_CTRL),
+ DEBUGFS_REG32(DC_DISP_DISP_MISC_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_CSC_COEFF),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(0)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(1)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(2)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(3)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(4)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(5)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(6)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(7)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(8)),
+ DEBUGFS_REG32(DC_DISP_SD_FLICKER_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DC_PIXEL_COUNT),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(0)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(1)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(2)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(3)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(4)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(5)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(6)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(7)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(0)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(1)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(2)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(3)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_HW_K_VALUES),
+ DEBUGFS_REG32(DC_DISP_SD_MAN_K_VALUES),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_HI),
+ DEBUGFS_REG32(DC_DISP_BLEND_CURSOR_CONTROL),
+ DEBUGFS_REG32(DC_WIN_WIN_OPTIONS),
+ DEBUGFS_REG32(DC_WIN_BYTE_SWAP),
+ DEBUGFS_REG32(DC_WIN_BUFFER_CONTROL),
+ DEBUGFS_REG32(DC_WIN_COLOR_DEPTH),
+ DEBUGFS_REG32(DC_WIN_POSITION),
+ DEBUGFS_REG32(DC_WIN_SIZE),
+ DEBUGFS_REG32(DC_WIN_PRESCALED_SIZE),
+ DEBUGFS_REG32(DC_WIN_H_INITIAL_DDA),
+ DEBUGFS_REG32(DC_WIN_V_INITIAL_DDA),
+ DEBUGFS_REG32(DC_WIN_DDA_INC),
+ DEBUGFS_REG32(DC_WIN_LINE_STRIDE),
+ DEBUGFS_REG32(DC_WIN_BUF_STRIDE),
+ DEBUGFS_REG32(DC_WIN_UV_BUF_STRIDE),
+ DEBUGFS_REG32(DC_WIN_BUFFER_ADDR_MODE),
+ DEBUGFS_REG32(DC_WIN_DV_CONTROL),
+ DEBUGFS_REG32(DC_WIN_BLEND_NOKEY),
+ DEBUGFS_REG32(DC_WIN_BLEND_1WIN),
+ DEBUGFS_REG32(DC_WIN_BLEND_2WIN_X),
+ DEBUGFS_REG32(DC_WIN_BLEND_2WIN_Y),
+ DEBUGFS_REG32(DC_WIN_BLEND_3WIN_XY),
+ DEBUGFS_REG32(DC_WIN_HP_FETCH_CONTROL),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_NS),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_U),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_U_NS),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_V),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_V_NS),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET_NS),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET_NS),
+ DEBUGFS_REG32(DC_WINBUF_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_AD_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_BD_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_CD_UFLOW_STATUS),
+};
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock(&dc->base.mutex, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dc_regs); i++) {
+ unsigned int offset = tegra_dc_regs[i].offset;
+
+ seq_printf(s, "%-40s %#05x %08x\n", tegra_dc_regs[i].name,
+ offset, tegra_dc_readl(dc, offset));
+ }
+
+unlock:
+ drm_modeset_unlock(&dc->base.mutex);
+ return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+ u32 value;
+
+ drm_modeset_lock(&dc->base.mutex, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+ tegra_dc_commit(dc);
+
+ drm_crtc_wait_one_vblank(&dc->base);
+ drm_crtc_wait_one_vblank(&dc->base);
+
+ value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+ seq_printf(s, "%08x\n", value);
+
+ tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+ drm_modeset_unlock(&dc->base.mutex);
+ return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+ seq_printf(s, "frames: %lu\n", dc->stats.frames);
+ seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+ seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+ seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dc_show_regs, 0, NULL },
+ { "crc", tegra_dc_show_crc, 0, NULL },
+ { "stats", tegra_dc_show_stats, 0, NULL },
+};
+
+static int tegra_dc_late_register(struct drm_crtc *crtc)
+{
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ int err;
+
+#ifdef CONFIG_DEBUG_FS
+ root = crtc->debugfs_entry;
+#else
+ root = NULL;
+#endif
+
+ dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dc->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ dc->debugfs_files[i].data = dc;
+
+ err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
+ if (err < 0)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+
+ return err;
+}
+
+static void tegra_dc_early_unregister(struct drm_crtc *crtc)
+{
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = crtc->dev->primary;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ drm_debugfs_remove_files(dc->debugfs_files, count, minor);
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+}
+
+static u32 tegra_dc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ /* XXX vblank syncpoints don't work with nvdisplay yet */
+ if (dc->syncpt && !dc->soc->has_nvdisplay)
+ return host1x_syncpt_read(dc->syncpt);
+
+ /* fallback to software emulated VBLANK counter */
+ return (u32)drm_crtc_vblank_count(&dc->base);
+}
+
+static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ return 0;
+}
+
+static void tegra_dc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value &= ~VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+}
+
static const struct drm_crtc_funcs tegra_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
@@ -1042,6 +1423,8 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
.reset = tegra_crtc_reset,
.atomic_duplicate_state = tegra_crtc_atomic_duplicate_state,
.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
+ .late_register = tegra_dc_late_register,
+ .early_unregister = tegra_dc_early_unregister,
.get_vblank_counter = tegra_dc_get_vblank_counter,
.enable_vblank = tegra_dc_enable_vblank,
.disable_vblank = tegra_dc_disable_vblank,
@@ -1054,10 +1437,12 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
unsigned int v_ref_to_sync = 1;
unsigned long value;
- tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+ if (!dc->soc->has_nvdisplay) {
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
- value = (v_ref_to_sync << 16) | h_ref_to_sync;
- tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+ value = (v_ref_to_sync << 16) | h_ref_to_sync;
+ tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+ }
value = ((mode->vsync_end - mode->vsync_start) << 16) |
((mode->hsync_end - mode->hsync_start) << 0);
@@ -1136,8 +1521,10 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
state->div);
DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
- value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
- tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ if (!dc->soc->has_nvdisplay) {
+ value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ }
err = clk_set_rate(dc->clk, state->pclk);
if (err < 0)
@@ -1223,6 +1610,15 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
tegra_dc_stats_reset(&dc->stats);
drm_crtc_vblank_off(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+
pm_runtime_put_sync(dc->dev);
}
@@ -1238,41 +1634,70 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
/* initialize display controller */
if (dc->syncpt) {
- u32 syncpt = host1x_syncpt_id(dc->syncpt);
+ u32 syncpt = host1x_syncpt_id(dc->syncpt), enable;
+
+ if (dc->soc->has_nvdisplay)
+ enable = 1 << 31;
+ else
+ enable = 1 << 8;
value = SYNCPT_CNTRL_NO_STALL;
tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- value = SYNCPT_VSYNC_ENABLE | syncpt;
+ value = enable | syncpt;
tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
}
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+ if (dc->soc->has_nvdisplay) {
+ value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
+ DSC_OBUF_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
- /* initialize timer */
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
- WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+ value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
+ DSC_OBUF_UF_INT | SD3_BUCKET_WALK_DONE_INT |
+ HEAD_UF_INT | MSF_INT | REG_TMOUT_INT |
+ REGION_CRC_INT | V_PULSE2_INT | V_PULSE3_INT |
+ VBLANK_INT | FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
- WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+ value = SD3_BUCKET_WALK_DONE_INT | HEAD_UF_INT | VBLANK_INT |
+ FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+ value = HEAD_UF_INT | REG_TMOUT_INT | FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ } else {
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ }
- if (dc->soc->supports_border_color)
+ if (dc->soc->supports_background_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BLEND_BACKGROUND_COLOR);
+ else
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
/* apply PLL and pixel clock changes */
@@ -1293,33 +1718,39 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
value |= DISP_CTRL_MODE_C_DISPLAY;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
- value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
- PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
- tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ if (!dc->soc->has_nvdisplay) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ }
+
+ /* enable underflow reporting and display red for missing pixels */
+ if (dc->soc->has_nvdisplay) {
+ value = UNDERFLOW_MODE_RED | UNDERFLOW_REPORT_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW);
+ }
tegra_dc_commit(dc);
drm_crtc_vblank_on(crtc);
}
-static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- return 0;
-}
-
static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned long flags;
if (crtc->state->event) {
- crtc->state->event->pipe = drm_crtc_index(crtc);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- dc->event = crtc->state->event;
crtc->state->event = NULL;
}
}
@@ -1329,13 +1760,18 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct tegra_dc_state *state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = state->planes << 8 | GENERAL_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, state->planes << 8, DC_CMD_STATE_CONTROL);
- tegra_dc_writel(dc, state->planes, DC_CMD_STATE_CONTROL);
+ value = state->planes | GENERAL_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
.atomic_enable = tegra_crtc_atomic_enable,
@@ -1362,7 +1798,6 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_crtc_handle_vblank(&dc->base);
- tegra_dc_finish_page_flip(dc);
dc->stats.vblank++;
}
@@ -1380,357 +1815,18 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
dc->stats.overflow++;
}
- return IRQ_HANDLED;
-}
-
-static int tegra_dc_show_regs(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = s->private;
- struct tegra_dc *dc = node->info_ent->data;
- int err = 0;
-
- drm_modeset_lock(&dc->base.mutex, NULL);
-
- if (!dc->base.state->active) {
- err = -EBUSY;
- goto unlock;
- }
-
-#define DUMP_REG(name) \
- seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
- tegra_dc_readl(dc, name))
-
- DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
- DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
- DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
- DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
- DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
- DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
- DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
- DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
- DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
- DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
- DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
- DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
- DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
- DUMP_REG(DC_CMD_DISPLAY_COMMAND);
- DUMP_REG(DC_CMD_SIGNAL_RAISE);
- DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
- DUMP_REG(DC_CMD_INT_STATUS);
- DUMP_REG(DC_CMD_INT_MASK);
- DUMP_REG(DC_CMD_INT_ENABLE);
- DUMP_REG(DC_CMD_INT_TYPE);
- DUMP_REG(DC_CMD_INT_POLARITY);
- DUMP_REG(DC_CMD_SIGNAL_RAISE1);
- DUMP_REG(DC_CMD_SIGNAL_RAISE2);
- DUMP_REG(DC_CMD_SIGNAL_RAISE3);
- DUMP_REG(DC_CMD_STATE_ACCESS);
- DUMP_REG(DC_CMD_STATE_CONTROL);
- DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
- DUMP_REG(DC_CMD_REG_ACT_CONTROL);
- DUMP_REG(DC_COM_CRC_CONTROL);
- DUMP_REG(DC_COM_CRC_CHECKSUM);
- DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
- DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
- DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
- DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
- DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
- DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
- DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
- DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
- DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
- DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
- DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
- DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
- DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
- DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
- DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
- DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
- DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
- DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
- DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
- DUMP_REG(DC_COM_PIN_MISC_CONTROL);
- DUMP_REG(DC_COM_PIN_PM0_CONTROL);
- DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
- DUMP_REG(DC_COM_PIN_PM1_CONTROL);
- DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
- DUMP_REG(DC_COM_SPI_CONTROL);
- DUMP_REG(DC_COM_SPI_START_BYTE);
- DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
- DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
- DUMP_REG(DC_COM_HSPI_CS_DC);
- DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
- DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
- DUMP_REG(DC_COM_GPIO_CTRL);
- DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
- DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
- DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
- DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
- DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
- DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
- DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
- DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
- DUMP_REG(DC_DISP_REF_TO_SYNC);
- DUMP_REG(DC_DISP_SYNC_WIDTH);
- DUMP_REG(DC_DISP_BACK_PORCH);
- DUMP_REG(DC_DISP_ACTIVE);
- DUMP_REG(DC_DISP_FRONT_PORCH);
- DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
- DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
- DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
- DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
- DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
- DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
- DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
- DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
- DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
- DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
- DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
- DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
- DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
- DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
- DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
- DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
- DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
- DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
- DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
- DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
- DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
- DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
- DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
- DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
- DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
- DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
- DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
- DUMP_REG(DC_DISP_M0_CONTROL);
- DUMP_REG(DC_DISP_M1_CONTROL);
- DUMP_REG(DC_DISP_DI_CONTROL);
- DUMP_REG(DC_DISP_PP_CONTROL);
- DUMP_REG(DC_DISP_PP_SELECT_A);
- DUMP_REG(DC_DISP_PP_SELECT_B);
- DUMP_REG(DC_DISP_PP_SELECT_C);
- DUMP_REG(DC_DISP_PP_SELECT_D);
- DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
- DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
- DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
- DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
- DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
- DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
- DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
- DUMP_REG(DC_DISP_BORDER_COLOR);
- DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
- DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
- DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
- DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
- DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
- DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
- DUMP_REG(DC_DISP_CURSOR_START_ADDR);
- DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
- DUMP_REG(DC_DISP_CURSOR_POSITION);
- DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
- DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
- DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
- DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
- DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
- DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
- DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
- DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
- DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
- DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
- DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
- DUMP_REG(DC_DISP_DAC_CRT_CTRL);
- DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
- DUMP_REG(DC_DISP_SD_CONTROL);
- DUMP_REG(DC_DISP_SD_CSC_COEFF);
- DUMP_REG(DC_DISP_SD_LUT(0));
- DUMP_REG(DC_DISP_SD_LUT(1));
- DUMP_REG(DC_DISP_SD_LUT(2));
- DUMP_REG(DC_DISP_SD_LUT(3));
- DUMP_REG(DC_DISP_SD_LUT(4));
- DUMP_REG(DC_DISP_SD_LUT(5));
- DUMP_REG(DC_DISP_SD_LUT(6));
- DUMP_REG(DC_DISP_SD_LUT(7));
- DUMP_REG(DC_DISP_SD_LUT(8));
- DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
- DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
- DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
- DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
- DUMP_REG(DC_DISP_SD_BL_TF(0));
- DUMP_REG(DC_DISP_SD_BL_TF(1));
- DUMP_REG(DC_DISP_SD_BL_TF(2));
- DUMP_REG(DC_DISP_SD_BL_TF(3));
- DUMP_REG(DC_DISP_SD_BL_CONTROL);
- DUMP_REG(DC_DISP_SD_HW_K_VALUES);
- DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
- DUMP_REG(DC_DISP_CURSOR_START_ADDR_HI);
- DUMP_REG(DC_DISP_BLEND_CURSOR_CONTROL);
- DUMP_REG(DC_WIN_WIN_OPTIONS);
- DUMP_REG(DC_WIN_BYTE_SWAP);
- DUMP_REG(DC_WIN_BUFFER_CONTROL);
- DUMP_REG(DC_WIN_COLOR_DEPTH);
- DUMP_REG(DC_WIN_POSITION);
- DUMP_REG(DC_WIN_SIZE);
- DUMP_REG(DC_WIN_PRESCALED_SIZE);
- DUMP_REG(DC_WIN_H_INITIAL_DDA);
- DUMP_REG(DC_WIN_V_INITIAL_DDA);
- DUMP_REG(DC_WIN_DDA_INC);
- DUMP_REG(DC_WIN_LINE_STRIDE);
- DUMP_REG(DC_WIN_BUF_STRIDE);
- DUMP_REG(DC_WIN_UV_BUF_STRIDE);
- DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
- DUMP_REG(DC_WIN_DV_CONTROL);
- DUMP_REG(DC_WIN_BLEND_NOKEY);
- DUMP_REG(DC_WIN_BLEND_1WIN);
- DUMP_REG(DC_WIN_BLEND_2WIN_X);
- DUMP_REG(DC_WIN_BLEND_2WIN_Y);
- DUMP_REG(DC_WIN_BLEND_3WIN_XY);
- DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
- DUMP_REG(DC_WINBUF_START_ADDR);
- DUMP_REG(DC_WINBUF_START_ADDR_NS);
- DUMP_REG(DC_WINBUF_START_ADDR_U);
- DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
- DUMP_REG(DC_WINBUF_START_ADDR_V);
- DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
- DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
- DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
- DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
- DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
- DUMP_REG(DC_WINBUF_UFLOW_STATUS);
- DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
- DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
- DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
-
-#undef DUMP_REG
-
-unlock:
- drm_modeset_unlock(&dc->base.mutex);
- return err;
-}
-
-static int tegra_dc_show_crc(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = s->private;
- struct tegra_dc *dc = node->info_ent->data;
- int err = 0;
- u32 value;
-
- drm_modeset_lock(&dc->base.mutex, NULL);
-
- if (!dc->base.state->active) {
- err = -EBUSY;
- goto unlock;
- }
-
- value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
- tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
- tegra_dc_commit(dc);
-
- drm_crtc_wait_one_vblank(&dc->base);
- drm_crtc_wait_one_vblank(&dc->base);
-
- value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
- seq_printf(s, "%08x\n", value);
-
- tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
-
-unlock:
- drm_modeset_unlock(&dc->base.mutex);
- return err;
-}
-
-static int tegra_dc_show_stats(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = s->private;
- struct tegra_dc *dc = node->info_ent->data;
-
- seq_printf(s, "frames: %lu\n", dc->stats.frames);
- seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
- seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
- seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
-
- return 0;
-}
-
-static struct drm_info_list debugfs_files[] = {
- { "regs", tegra_dc_show_regs, 0, NULL },
- { "crc", tegra_dc_show_crc, 0, NULL },
- { "stats", tegra_dc_show_stats, 0, NULL },
-};
-
-static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
-{
- unsigned int i;
- char *name;
- int err;
-
- name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
- dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
- kfree(name);
-
- if (!dc->debugfs)
- return -ENOMEM;
-
- dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
- GFP_KERNEL);
- if (!dc->debugfs_files) {
- err = -ENOMEM;
- goto remove;
+ if (status & HEAD_UF_INT) {
+ dev_dbg_ratelimited(dc->dev, "%s(): head underflow\n", __func__);
+ dc->stats.underflow++;
}
- for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- dc->debugfs_files[i].data = dc;
-
- err = drm_debugfs_create_files(dc->debugfs_files,
- ARRAY_SIZE(debugfs_files),
- dc->debugfs, minor);
- if (err < 0)
- goto free;
-
- dc->minor = minor;
-
- return 0;
-
-free:
- kfree(dc->debugfs_files);
- dc->debugfs_files = NULL;
-remove:
- debugfs_remove(dc->debugfs);
- dc->debugfs = NULL;
-
- return err;
-}
-
-static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
-{
- drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
- dc->minor);
- dc->minor = NULL;
-
- kfree(dc->debugfs_files);
- dc->debugfs_files = NULL;
-
- debugfs_remove(dc->debugfs);
- dc->debugfs = NULL;
-
- return 0;
+ return IRQ_HANDLED;
}
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct iommu_group *group = iommu_group_get(client->dev);
unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
struct tegra_dc *dc = host1x_client_to_dc(client);
struct tegra_drm *tegra = drm->dev_private;
@@ -1742,18 +1838,27 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- if (tegra->domain) {
- err = iommu_attach_device(tegra->domain, dc->dev);
- if (err < 0) {
- dev_err(dc->dev, "failed to attach to domain: %d\n",
- err);
- return err;
+ if (group && tegra->domain) {
+ if (group != tegra->group) {
+ err = iommu_attach_group(tegra->domain, group);
+ if (err < 0) {
+ dev_err(dc->dev,
+ "failed to attach to domain: %d\n",
+ err);
+ return err;
+ }
+
+ tegra->group = group;
}
dc->domain = tegra->domain;
}
- primary = tegra_dc_primary_plane_create(drm, dc);
+ if (dc->soc->wgrps)
+ primary = tegra_dc_add_shared_planes(drm, dc);
+ else
+ primary = tegra_dc_add_planes(drm, dc);
+
if (IS_ERR(primary)) {
err = PTR_ERR(primary);
goto cleanup;
@@ -1765,6 +1870,13 @@ static int tegra_dc_init(struct host1x_client *client)
err = PTR_ERR(cursor);
goto cleanup;
}
+ } else {
+ /* dedicate one overlay to mouse cursor */
+ cursor = tegra_dc_overlay_plane_create(drm, dc, 2, true);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
}
err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
@@ -1787,16 +1899,6 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
- err = tegra_dc_add_planes(drm, dc);
- if (err < 0)
- goto cleanup;
-
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dc_debugfs_init(dc, drm->primary);
- if (err < 0)
- dev_err(dc->dev, "debugfs setup failed: %d\n", err);
- }
-
err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
dev_name(dc->dev), dc);
if (err < 0) {
@@ -1808,14 +1910,18 @@ static int tegra_dc_init(struct host1x_client *client)
return 0;
cleanup:
- if (cursor)
+ if (!IS_ERR_OR_NULL(cursor))
drm_plane_cleanup(cursor);
- if (primary)
+ if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- if (tegra->domain) {
- iommu_detach_device(tegra->domain, dc->dev);
+ if (group && dc->domain) {
+ if (group == tegra->group) {
+ iommu_detach_group(dc->domain, group);
+ tegra->group = NULL;
+ }
+
dc->domain = NULL;
}
@@ -1824,25 +1930,26 @@ cleanup:
static int tegra_dc_exit(struct host1x_client *client)
{
+ struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct iommu_group *group = iommu_group_get(client->dev);
struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
devm_free_irq(dc->dev, dc->irq, dc);
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dc_debugfs_exit(dc);
- if (err < 0)
- dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
- }
-
err = tegra_dc_rgb_exit(dc);
if (err) {
dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
return err;
}
- if (dc->domain) {
- iommu_detach_device(dc->domain, dc->dev);
+ if (group && dc->domain) {
+ if (group == tegra->group) {
+ iommu_detach_group(dc->domain, group);
+ tegra->group = NULL;
+ }
+
dc->domain = NULL;
}
@@ -1857,57 +1964,143 @@ static const struct host1x_client_ops dc_client_ops = {
};
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
- .supports_border_color = true,
+ .supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_blending = false,
.pitch_align = 8,
.has_powergate = false,
- .broken_reset = true,
+ .coupled_pm = true,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
+ .primary_formats = tegra20_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
+ .overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
- .supports_border_color = true,
+ .supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_blending = false,
.pitch_align = 8,
.has_powergate = false,
- .broken_reset = false,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
+ .primary_formats = tegra20_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
+ .overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
- .supports_border_color = true,
+ .supports_background_color = false,
.supports_interlacing = false,
.supports_cursor = false,
.supports_block_linear = false,
+ .supports_blending = false,
.pitch_align = 64,
.has_powergate = true,
- .broken_reset = false,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
+ .primary_formats = tegra114_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
+ .overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
- .supports_border_color = false,
+ .supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_blending = true,
.pitch_align = 64,
.has_powergate = true,
- .broken_reset = false,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
+ .primary_formats = tegra124_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
+ .overlay_formats = tegra124_overlay_formats,
+ .modifiers = tegra124_modifiers,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
- .supports_border_color = false,
+ .supports_background_color = true,
.supports_interlacing = true,
.supports_cursor = true,
.supports_block_linear = true,
+ .supports_blending = true,
.pitch_align = 64,
.has_powergate = true,
- .broken_reset = false,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
+ .primary_formats = tegra114_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
+ .overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra124_modifiers,
+};
+
+static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
+ {
+ .index = 0,
+ .dc = 0,
+ .windows = (const unsigned int[]) { 0 },
+ .num_windows = 1,
+ }, {
+ .index = 1,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 1 },
+ .num_windows = 1,
+ }, {
+ .index = 2,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 2 },
+ .num_windows = 1,
+ }, {
+ .index = 3,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 3 },
+ .num_windows = 1,
+ }, {
+ .index = 4,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 4 },
+ .num_windows = 1,
+ }, {
+ .index = 5,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 5 },
+ .num_windows = 1,
+ },
+};
+
+static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .supports_blending = true,
+ .pitch_align = 64,
+ .has_powergate = false,
+ .coupled_pm = false,
+ .has_nvdisplay = true,
+ .wgrps = tegra186_dc_wgrps,
+ .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
};
static const struct of_device_id tegra_dc_of_match[] = {
{
+ .compatible = "nvidia,tegra186-dc",
+ .data = &tegra186_dc_soc_info,
+ }, {
.compatible = "nvidia,tegra210-dc",
.data = &tegra210_dc_soc_info,
}, {
@@ -1965,6 +2158,43 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
return 0;
}
+static int tegra_dc_match_by_pipe(struct device *dev, void *data)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ unsigned int pipe = (unsigned long)data;
+
+ return dc->pipe == pipe;
+}
+
+static int tegra_dc_couple(struct tegra_dc *dc)
+{
+ /*
+ * On Tegra20, DC1 requires DC0 to be taken out of reset in order to
+ * be enabled, otherwise CPU hangs on writing to CMD_DISPLAY_COMMAND /
+ * POWER_CONTROL registers during CRTC enabling.
+ */
+ if (dc->soc->coupled_pm && dc->pipe == 1) {
+ u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE;
+ struct device_link *link;
+ struct device *partner;
+
+ partner = driver_find_device(dc->dev->driver, NULL, NULL,
+ tegra_dc_match_by_pipe);
+ if (!partner)
+ return -EPROBE_DEFER;
+
+ link = device_link_add(dc->dev, partner, flags);
+ if (!link) {
+ dev_err(dc->dev, "failed to link controllers\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+ }
+
+ return 0;
+}
+
static int tegra_dc_probe(struct platform_device *pdev)
{
struct resource *regs;
@@ -1977,7 +2207,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
dc->soc = of_device_get_match_data(&pdev->dev);
- spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
@@ -1985,6 +2214,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ err = tegra_dc_couple(dc);
+ if (err < 0)
+ return err;
+
dc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dc->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -1998,21 +2231,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
}
/* assert reset and disable clock */
- if (!dc->soc->broken_reset) {
- err = clk_prepare_enable(dc->clk);
- if (err < 0)
- return err;
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
- usleep_range(2000, 4000);
+ usleep_range(2000, 4000);
- err = reset_control_assert(dc->rst);
- if (err < 0)
- return err;
+ err = reset_control_assert(dc->rst);
+ if (err < 0)
+ return err;
- usleep_range(2000, 4000);
+ usleep_range(2000, 4000);
- clk_disable_unprepare(dc->clk);
- }
+ clk_disable_unprepare(dc->clk);
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
@@ -2086,12 +2317,10 @@ static int tegra_dc_suspend(struct device *dev)
struct tegra_dc *dc = dev_get_drvdata(dev);
int err;
- if (!dc->soc->broken_reset) {
- err = reset_control_assert(dc->rst);
- if (err < 0) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- return err;
- }
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
}
if (dc->soc->has_powergate)
@@ -2121,13 +2350,10 @@ static int tegra_dc_resume(struct device *dev)
return err;
}
- if (!dc->soc->broken_reset) {
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(dev,
- "failed to deassert reset: %d\n", err);
- return err;
- }
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ return err;
}
}
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index cb100b6..d2b50d3 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -18,6 +18,24 @@
struct tegra_output;
+struct tegra_dc_state {
+ struct drm_crtc_state base;
+
+ struct clk *clk;
+ unsigned long pclk;
+ unsigned int div;
+
+ u32 planes;
+};
+
+static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
+{
+ if (state)
+ return container_of(state, struct tegra_dc_state, base);
+
+ return NULL;
+}
+
struct tegra_dc_stats {
unsigned long frames;
unsigned long vblank;
@@ -25,21 +43,36 @@ struct tegra_dc_stats {
unsigned long overflow;
};
+struct tegra_windowgroup_soc {
+ unsigned int index;
+ unsigned int dc;
+ const unsigned int *windows;
+ unsigned int num_windows;
+};
+
struct tegra_dc_soc_info {
- bool supports_border_color;
+ bool supports_background_color;
bool supports_interlacing;
bool supports_cursor;
bool supports_block_linear;
+ bool supports_blending;
unsigned int pitch_align;
bool has_powergate;
- bool broken_reset;
+ bool coupled_pm;
+ bool has_nvdisplay;
+ const struct tegra_windowgroup_soc *wgrps;
+ unsigned int num_wgrps;
+ const u32 *primary_formats;
+ unsigned int num_primary_formats;
+ const u32 *overlay_formats;
+ unsigned int num_overlay_formats;
+ const u64 *modifiers;
};
struct tegra_dc {
struct host1x_client client;
struct host1x_syncpt *syncpt;
struct device *dev;
- spinlock_t lock;
struct drm_crtc base;
unsigned int powergate;
@@ -56,11 +89,6 @@ struct tegra_dc {
struct list_head list;
struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
-
- /* page-flip handling */
- struct drm_pending_vblank_event *event;
const struct tegra_dc_soc_info *soc;
@@ -110,6 +138,7 @@ struct tegra_dc_window {
unsigned int bits_per_pixel;
unsigned int stride[2];
unsigned long base[3];
+ unsigned int zpos;
bool bottom_up;
struct tegra_bo_tiling tiling;
@@ -118,6 +147,7 @@ struct tegra_dc_window {
};
/* from dc.c */
+bool tegra_dc_has_output(struct tegra_dc *dc, struct device *dev);
void tegra_dc_commit(struct tegra_dc *dc);
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
@@ -167,15 +197,26 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_CMD_INT_ENABLE 0x039
#define DC_CMD_INT_TYPE 0x03a
#define DC_CMD_INT_POLARITY 0x03b
-#define CTXSW_INT (1 << 0)
-#define FRAME_END_INT (1 << 1)
-#define VBLANK_INT (1 << 2)
-#define WIN_A_UF_INT (1 << 8)
-#define WIN_B_UF_INT (1 << 9)
-#define WIN_C_UF_INT (1 << 10)
-#define WIN_A_OF_INT (1 << 14)
-#define WIN_B_OF_INT (1 << 15)
-#define WIN_C_OF_INT (1 << 16)
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT (1 << 2)
+#define V_PULSE3_INT (1 << 4)
+#define V_PULSE2_INT (1 << 5)
+#define REGION_CRC_INT (1 << 6)
+#define REG_TMOUT_INT (1 << 7)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define MSF_INT (1 << 12)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+#define HEAD_UF_INT (1 << 23)
+#define SD3_BUCKET_WALK_DONE_INT (1 << 24)
+#define DSC_OBUF_UF_INT (1 << 26)
+#define DSC_RBUF_UF_INT (1 << 27)
+#define DSC_BBUF_UF_INT (1 << 28)
+#define DSC_TO_UF_INT (1 << 29)
#define DC_CMD_SIGNAL_RAISE1 0x03c
#define DC_CMD_SIGNAL_RAISE2 0x03d
@@ -196,6 +237,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define WIN_B_UPDATE (1 << 10)
#define WIN_C_UPDATE (1 << 11)
#define CURSOR_UPDATE (1 << 15)
+#define COMMON_ACTREQ (1 << 16)
+#define COMMON_UPDATE (1 << 17)
#define NC_HOST_TRIG (1 << 24)
#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
@@ -238,6 +281,10 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+#define DC_COM_RG_UNDERFLOW 0x365
+#define UNDERFLOW_MODE_RED (1 << 8)
+#define UNDERFLOW_REPORT_ENABLE (1 << 0)
+
#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
#define H_PULSE0_ENABLE (1 << 8)
#define H_PULSE1_ENABLE (1 << 10)
@@ -249,10 +296,10 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define HDMI_ENABLE (1 << 30)
#define DSI_ENABLE (1 << 29)
#define SOR1_TIMING_CYA (1 << 27)
-#define SOR1_ENABLE (1 << 26)
-#define SOR_ENABLE (1 << 25)
#define CURSOR_ENABLE (1 << 16)
+#define SOR_ENABLE(x) (1 << (25 + (x)))
+
#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
@@ -360,29 +407,33 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DISP_ORDER_BLUE_RED (1 << 9)
#define DC_DISP_DISP_COLOR_CONTROL 0x430
-#define BASE_COLOR_SIZE666 (0 << 0)
-#define BASE_COLOR_SIZE111 (1 << 0)
-#define BASE_COLOR_SIZE222 (2 << 0)
-#define BASE_COLOR_SIZE333 (3 << 0)
-#define BASE_COLOR_SIZE444 (4 << 0)
-#define BASE_COLOR_SIZE555 (5 << 0)
-#define BASE_COLOR_SIZE565 (6 << 0)
-#define BASE_COLOR_SIZE332 (7 << 0)
-#define BASE_COLOR_SIZE888 (8 << 0)
+#define BASE_COLOR_SIZE666 ( 0 << 0)
+#define BASE_COLOR_SIZE111 ( 1 << 0)
+#define BASE_COLOR_SIZE222 ( 2 << 0)
+#define BASE_COLOR_SIZE333 ( 3 << 0)
+#define BASE_COLOR_SIZE444 ( 4 << 0)
+#define BASE_COLOR_SIZE555 ( 5 << 0)
+#define BASE_COLOR_SIZE565 ( 6 << 0)
+#define BASE_COLOR_SIZE332 ( 7 << 0)
+#define BASE_COLOR_SIZE888 ( 8 << 0)
+#define BASE_COLOR_SIZE101010 (10 << 0)
+#define BASE_COLOR_SIZE121212 (12 << 0)
#define DITHER_CONTROL_MASK (3 << 8)
#define DITHER_CONTROL_DISABLE (0 << 8)
#define DITHER_CONTROL_ORDERED (2 << 8)
#define DITHER_CONTROL_ERRDIFF (3 << 8)
#define BASE_COLOR_SIZE_MASK (0xf << 0)
-#define BASE_COLOR_SIZE_666 (0 << 0)
-#define BASE_COLOR_SIZE_111 (1 << 0)
-#define BASE_COLOR_SIZE_222 (2 << 0)
-#define BASE_COLOR_SIZE_333 (3 << 0)
-#define BASE_COLOR_SIZE_444 (4 << 0)
-#define BASE_COLOR_SIZE_555 (5 << 0)
-#define BASE_COLOR_SIZE_565 (6 << 0)
-#define BASE_COLOR_SIZE_332 (7 << 0)
-#define BASE_COLOR_SIZE_888 (8 << 0)
+#define BASE_COLOR_SIZE_666 ( 0 << 0)
+#define BASE_COLOR_SIZE_111 ( 1 << 0)
+#define BASE_COLOR_SIZE_222 ( 2 << 0)
+#define BASE_COLOR_SIZE_333 ( 3 << 0)
+#define BASE_COLOR_SIZE_444 ( 4 << 0)
+#define BASE_COLOR_SIZE_555 ( 5 << 0)
+#define BASE_COLOR_SIZE_565 ( 6 << 0)
+#define BASE_COLOR_SIZE_332 ( 7 << 0)
+#define BASE_COLOR_SIZE_888 ( 8 << 0)
+#define BASE_COLOR_SIZE_101010 ( 10 << 0)
+#define BASE_COLOR_SIZE_121212 ( 12 << 0)
#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
#define SC1_H_QUALIFIER_NONE (1 << 16)
@@ -449,6 +500,12 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_DISP_SD_HW_K_VALUES 0x4dd
#define DC_DISP_SD_MAN_K_VALUES 0x4de
+#define DC_DISP_BLEND_BACKGROUND_COLOR 0x4e4
+#define BACKGROUND_COLOR_ALPHA(x) (((x) & 0xff) << 24)
+#define BACKGROUND_COLOR_BLUE(x) (((x) & 0xff) << 16)
+#define BACKGROUND_COLOR_GREEN(x) (((x) & 0xff) << 8)
+#define BACKGROUND_COLOR_RED(x) (((x) & 0xff) << 0)
+
#define DC_DISP_INTERLACE_CONTROL 0x4e5
#define INTERLACE_STATUS (1 << 2)
#define INTERLACE_START (1 << 1)
@@ -467,6 +524,35 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define CURSOR_SRC_BLEND_MASK (3 << 8)
#define CURSOR_ALPHA 0xff
+#define DC_WIN_CORE_ACT_CONTROL 0x50e
+#define VCOUNTER (0 << 0)
+#define HCOUNTER (1 << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA 0x543
+#define LATENCY_CTL_MODE_ENABLE (1 << 2)
+
+#define DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB 0x544
+#define WATERMARK_MASK 0x1fffffff
+
+#define DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER 0x560
+#define PIPE_METER_INT(x) (((x) & 0xff) << 8)
+#define PIPE_METER_FRAC(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG 0x561
+#define MEMPOOL_ENTRIES(x) (((x) & 0xffff) << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_FETCH_METER 0x562
+#define SLOTS(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_CORE_IHUB_LINEBUF_CONFIG 0x563
+#define MODE_TWO_LINES (0 << 14)
+#define MODE_FOUR_LINES (1 << 14)
+
+#define DC_WIN_CORE_IHUB_THREAD_GROUP 0x568
+#define THREAD_NUM_MASK (0x1f << 1)
+#define THREAD_NUM(x) (((x) & 0x1f) << 1)
+#define THREAD_GROUP_ENABLE (1 << 0)
+
#define DC_WIN_CSC_YOF 0x611
#define DC_WIN_CSC_KYRGB 0x612
#define DC_WIN_CSC_KUR 0x613
@@ -502,9 +588,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define WIN_COLOR_DEPTH_P4 2
#define WIN_COLOR_DEPTH_P8 3
#define WIN_COLOR_DEPTH_B4G4R4A4 4
-#define WIN_COLOR_DEPTH_B5G5R5A 5
+#define WIN_COLOR_DEPTH_B5G5R5A1 5
#define WIN_COLOR_DEPTH_B5G6R5 6
-#define WIN_COLOR_DEPTH_AB5G5R5 7
+#define WIN_COLOR_DEPTH_A1B5G5R5 7
#define WIN_COLOR_DEPTH_B8G8R8A8 12
#define WIN_COLOR_DEPTH_R8G8B8A8 13
#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
@@ -519,18 +605,32 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define WIN_COLOR_DEPTH_YUV422R 23
#define WIN_COLOR_DEPTH_YCbCr422RA 24
#define WIN_COLOR_DEPTH_YUV422RA 25
+#define WIN_COLOR_DEPTH_R4G4B4A4 27
+#define WIN_COLOR_DEPTH_R5G5B5A 28
+#define WIN_COLOR_DEPTH_AR5G5B5 29
+#define WIN_COLOR_DEPTH_B5G5R5X1 30
+#define WIN_COLOR_DEPTH_X1B5G5R5 31
+#define WIN_COLOR_DEPTH_R5G5B5X1 32
+#define WIN_COLOR_DEPTH_X1R5G5B5 33
+#define WIN_COLOR_DEPTH_R5G6B5 34
+#define WIN_COLOR_DEPTH_A8R8G8B8 35
+#define WIN_COLOR_DEPTH_A8B8G8R8 36
+#define WIN_COLOR_DEPTH_B8G8R8X8 37
+#define WIN_COLOR_DEPTH_R8G8B8X8 38
+#define WIN_COLOR_DEPTH_X8B8G8R8 65
+#define WIN_COLOR_DEPTH_X8R8G8B8 66
#define DC_WIN_POSITION 0x704
-#define H_POSITION(x) (((x) & 0x1fff) << 0)
-#define V_POSITION(x) (((x) & 0x1fff) << 16)
+#define H_POSITION(x) (((x) & 0x1fff) << 0) /* XXX 0x7fff on Tegra186 */
+#define V_POSITION(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
#define DC_WIN_SIZE 0x705
-#define H_SIZE(x) (((x) & 0x1fff) << 0)
-#define V_SIZE(x) (((x) & 0x1fff) << 16)
+#define H_SIZE(x) (((x) & 0x1fff) << 0) /* XXX 0x7fff on Tegra186 */
+#define V_SIZE(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
#define DC_WIN_PRESCALED_SIZE 0x706
#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
-#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
#define DC_WIN_H_INITIAL_DDA 0x707
#define DC_WIN_V_INITIAL_DDA 0x708
@@ -546,11 +646,24 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
+
#define DC_WIN_DV_CONTROL 0x70e
#define DC_WIN_BLEND_NOKEY 0x70f
+#define BLEND_WEIGHT1(x) (((x) & 0xff) << 16)
+#define BLEND_WEIGHT0(x) (((x) & 0xff) << 8)
+
#define DC_WIN_BLEND_1WIN 0x710
+#define BLEND_CONTROL_FIX (0 << 2)
+#define BLEND_CONTROL_ALPHA (1 << 2)
+#define BLEND_COLOR_KEY_NONE (0 << 0)
+#define BLEND_COLOR_KEY_0 (1 << 0)
+#define BLEND_COLOR_KEY_1 (2 << 0)
+#define BLEND_COLOR_KEY_BOTH (3 << 0)
+
#define DC_WIN_BLEND_2WIN_X 0x711
+#define BLEND_CONTROL_DEPENDENT (2 << 2)
+
#define DC_WIN_BLEND_2WIN_Y 0x712
#define DC_WIN_BLEND_3WIN_XY 0x713
@@ -575,8 +688,97 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
+#define DC_WINBUF_START_ADDR_HI 0x80d
+
+#define DC_WINBUF_CDE_CONTROL 0x82f
+#define ENABLE_SURFACE (1 << 0)
+
#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+/* Tegra186 and later */
+#define DC_DISP_CORE_SOR_SET_CONTROL(x) (0x403 + (x))
+#define PROTOCOL_MASK (0xf << 8)
+#define PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
+
+#define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702
+#define OWNER_MASK (0xf << 0)
+#define OWNER(x) (((x) & 0xf) << 0)
+
+#define DC_WIN_CROPPED_SIZE 0x706
+
+#define DC_WIN_PLANAR_STORAGE 0x709
+#define PITCH(x) (((x) >> 6) & 0x1fff)
+
+#define DC_WIN_SET_PARAMS 0x70d
+#define CLAMP_BEFORE_BLEND (1 << 15)
+#define DEGAMMA_NONE (0 << 13)
+#define DEGAMMA_SRGB (1 << 13)
+#define DEGAMMA_YUV8_10 (2 << 13)
+#define DEGAMMA_YUV12 (3 << 13)
+#define INPUT_RANGE_BYPASS (0 << 10)
+#define INPUT_RANGE_LIMITED (1 << 10)
+#define INPUT_RANGE_FULL (2 << 10)
+#define COLOR_SPACE_RGB (0 << 8)
+#define COLOR_SPACE_YUV_601 (1 << 8)
+#define COLOR_SPACE_YUV_709 (2 << 8)
+#define COLOR_SPACE_YUV_2020 (3 << 8)
+
+#define DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER 0x70e
+#define HORIZONTAL_TAPS_2 (1 << 3)
+#define HORIZONTAL_TAPS_5 (4 << 3)
+#define VERTICAL_TAPS_2 (1 << 0)
+#define VERTICAL_TAPS_5 (4 << 0)
+
+#define DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE 0x711
+#define INPUT_SCALER_USE422 (1 << 2)
+#define INPUT_SCALER_VBYPASS (1 << 1)
+#define INPUT_SCALER_HBYPASS (1 << 0)
+
+#define DC_WIN_BLEND_LAYER_CONTROL 0x716
+#define COLOR_KEY_NONE (0 << 25)
+#define COLOR_KEY_SRC (1 << 25)
+#define COLOR_KEY_DST (2 << 25)
+#define BLEND_BYPASS (1 << 24)
+#define K2(x) (((x) & 0xff) << 16)
+#define K1(x) (((x) & 0xff) << 8)
+#define WINDOW_LAYER_DEPTH(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_BLEND_MATCH_SELECT 0x717
+#define BLEND_FACTOR_DST_ALPHA_ZERO (0 << 12)
+#define BLEND_FACTOR_DST_ALPHA_ONE (1 << 12)
+#define BLEND_FACTOR_DST_ALPHA_NEG_K1_TIMES_SRC (2 << 12)
+#define BLEND_FACTOR_DST_ALPHA_K2 (3 << 12)
+#define BLEND_FACTOR_SRC_ALPHA_ZERO (0 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_K1 (1 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_K2 (2 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_NEG_K1_TIMES_DST (3 << 8)
+#define BLEND_FACTOR_DST_COLOR_ZERO (0 << 4)
+#define BLEND_FACTOR_DST_COLOR_ONE (1 << 4)
+#define BLEND_FACTOR_DST_COLOR_K1 (2 << 4)
+#define BLEND_FACTOR_DST_COLOR_K2 (3 << 4)
+#define BLEND_FACTOR_DST_COLOR_K1_TIMES_DST (4 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_DST (5 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC (6 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1 (7 << 4)
+#define BLEND_FACTOR_SRC_COLOR_ZERO (0 << 0)
+#define BLEND_FACTOR_SRC_COLOR_ONE (1 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1 (2 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1_TIMES_DST (3 << 0)
+#define BLEND_FACTOR_SRC_COLOR_NEG_K1_TIMES_DST (4 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC (5 << 0)
+
+#define DC_WIN_BLEND_NOMATCH_SELECT 0x718
+
+#define DC_WIN_PRECOMP_WGRP_PARAMS 0x724
+#define SWAP_UV (1 << 0)
+
+#define DC_WIN_WINDOW_SET_CONTROL 0x730
+#define CONTROL_CSC_ENABLE (1 << 5)
+
+#define DC_WINBUF_CROPPED_POINT 0x806
+#define OFFSET_Y(x) (((x) & 0xffff) << 16)
+#define OFFSET_X(x) (((x) & 0xffff) << 0)
+
#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index e4da041..d84e81f 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -15,6 +15,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
@@ -321,6 +322,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -467,52 +471,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->clk);
}
- err = clk_prepare_enable(dpaux->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable module clock: %d\n",
- err);
- return err;
- }
-
- if (dpaux->rst)
- reset_control_deassert(dpaux->rst);
-
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent)) {
dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
PTR_ERR(dpaux->clk_parent));
- err = PTR_ERR(dpaux->clk_parent);
- goto assert_reset;
- }
-
- err = clk_prepare_enable(dpaux->clk_parent);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
- err);
- goto assert_reset;
+ return PTR_ERR(dpaux->clk_parent);
}
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
- goto disable_parent_clk;
+ return err;
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
PTR_ERR(dpaux->vdd));
- err = PTR_ERR(dpaux->vdd);
- goto disable_parent_clk;
+ return PTR_ERR(dpaux->vdd);
}
+ platform_set_drvdata(pdev, dpaux);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
dev_name(dpaux->dev), dpaux);
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- goto disable_parent_clk;
+ return err;
}
disable_irq(dpaux->irq);
@@ -522,7 +511,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
- goto disable_parent_clk;
+ return err;
/*
* Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -560,47 +549,97 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
- platform_set_drvdata(pdev, dpaux);
-
return 0;
-
-disable_parent_clk:
- clk_disable_unprepare(dpaux->clk_parent);
-assert_reset:
- if (dpaux->rst)
- reset_control_assert(dpaux->rst);
-
- clk_disable_unprepare(dpaux->clk);
-
- return err;
}
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+ cancel_work_sync(&dpaux->work);
+
/* make sure pads are powered down when not in use */
tegra_dpaux_pad_power_down(dpaux);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
drm_dp_aux_unregister(&dpaux->aux);
mutex_lock(&dpaux_lock);
list_del(&dpaux->list);
mutex_unlock(&dpaux_lock);
- cancel_work_sync(&dpaux->work);
+ return 0;
+}
- clk_disable_unprepare(dpaux->clk_parent);
+#ifdef CONFIG_PM
+static int tegra_dpaux_suspend(struct device *dev)
+{
+ struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (dpaux->rst) {
+ err = reset_control_assert(dpaux->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
- if (dpaux->rst)
- reset_control_assert(dpaux->rst);
+ usleep_range(1000, 2000);
+ clk_disable_unprepare(dpaux->clk_parent);
clk_disable_unprepare(dpaux->clk);
+ return err;
+}
+
+static int tegra_dpaux_resume(struct device *dev)
+{
+ struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(dpaux->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dpaux->clk_parent);
+ if (err < 0) {
+ dev_err(dev, "failed to enable parent clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dpaux->rst) {
+ err = reset_control_deassert(dpaux->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_parent;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
return 0;
+
+disable_parent:
+ clk_disable_unprepare(dpaux->clk_parent);
+disable_clk:
+ clk_disable_unprepare(dpaux->clk);
+ return err;
}
+#endif
+
+static const struct dev_pm_ops tegra_dpaux_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
+};
static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra186-dpaux", },
{ .compatible = "nvidia,tegra210-dpaux", },
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
@@ -611,6 +650,7 @@ struct platform_driver tegra_dpaux_driver = {
.driver = {
.name = "tegra-dpaux",
.of_match_table = tegra_dpaux_of_match,
+ .pm = &tegra_dpaux_pm_ops,
},
.probe = tegra_dpaux_probe,
.remove = tegra_dpaux_remove,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 52552b9..7afe2f6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -33,97 +33,63 @@ struct tegra_drm_file {
struct mutex lock;
};
-static void tegra_atomic_schedule(struct tegra_drm *tegra,
- struct drm_atomic_state *state)
+static int tegra_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
{
- tegra->commit.state = state;
- schedule_work(&tegra->commit.work);
-}
-
-static void tegra_atomic_complete(struct tegra_drm *tegra,
- struct drm_atomic_state *state)
-{
- struct drm_device *drm = tegra->drm;
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
-}
-
-static void tegra_atomic_work(struct work_struct *work)
-{
- struct tegra_drm *tegra = container_of(work, struct tegra_drm,
- commit.work);
-
- tegra_atomic_complete(tegra, tegra->commit.state);
-}
-
-static int tegra_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool nonblock)
-{
- struct tegra_drm *tegra = drm->dev_private;
int err;
- err = drm_atomic_helper_prepare_planes(drm, state);
- if (err)
+ err = drm_atomic_helper_check_modeset(drm, state);
+ if (err < 0)
return err;
- /* serialize outstanding nonblocking commits */
- mutex_lock(&tegra->commit.lock);
- flush_work(&tegra->commit.work);
+ err = tegra_display_hub_atomic_check(drm, state);
+ if (err < 0)
+ return err;
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
+ err = drm_atomic_normalize_zpos(drm, state);
+ if (err < 0)
+ return err;
- err = drm_atomic_helper_swap_state(state, true);
- if (err) {
- mutex_unlock(&tegra->commit.lock);
- drm_atomic_helper_cleanup_planes(drm, state);
+ err = drm_atomic_helper_check_planes(drm, state);
+ if (err < 0)
return err;
- }
- drm_atomic_state_get(state);
- if (nonblock)
- tegra_atomic_schedule(tegra, state);
- else
- tegra_atomic_complete(tegra, state);
+ if (state->legacy_cursor_update)
+ state->async_update = !drm_atomic_helper_async_check(drm, state);
- mutex_unlock(&tegra->commit.lock);
return 0;
}
-static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
- .output_poll_changed = tegra_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
#endif
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = tegra_atomic_commit,
+ .atomic_check = tegra_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *drm = old_state->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ if (tegra->hub) {
+ drm_atomic_helper_commit_modeset_disables(drm, old_state);
+ tegra_display_hub_atomic_commit(drm, old_state);
+ drm_atomic_helper_commit_planes(drm, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(drm, old_state);
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_vblanks(drm, old_state);
+ drm_atomic_helper_cleanup_planes(drm, old_state);
+ } else {
+ drm_atomic_helper_commit_tail_rpm(old_state);
+ }
+}
+
+static const struct drm_mode_config_helper_funcs
+tegra_drm_mode_config_helpers = {
+ .atomic_commit_tail = tegra_atomic_commit_tail,
};
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
@@ -172,9 +138,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
- mutex_init(&tegra->commit.lock);
- INIT_WORK(&tegra->commit.work, tegra_atomic_work);
-
drm->dev_private = tegra;
tegra->drm = drm;
@@ -188,7 +151,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.allow_fb_modifiers = true;
- drm->mode_config.funcs = &tegra_drm_mode_funcs;
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
err = tegra_drm_fb_prepare(drm);
if (err < 0)
@@ -200,6 +164,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
goto fbdev;
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
@@ -212,16 +182,19 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
- goto device;
+ goto hub;
drm_mode_config_reset(drm);
err = tegra_drm_fb_init(drm);
if (err < 0)
- goto device;
+ goto hub;
return 0;
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
device:
host1x_device_exit(device);
fbdev:
@@ -249,6 +222,7 @@ static void tegra_drm_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
err = host1x_device_exit(device);
@@ -286,15 +260,6 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
kfree(context);
}
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct tegra_drm *tegra = drm->dev_private;
-
- tegra_fbdev_restore_mode(tegra->fbdev);
-#endif
-}
-
static struct host1x_bo *
host1x_bo_lookup(struct drm_file *file, u32 handle)
{
@@ -660,7 +625,8 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
if (!sp)
return -EINVAL;
- return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+ return host1x_syncpt_wait(sp, args->thresh,
+ msecs_to_jiffies(args->timeout),
&args->value);
}
@@ -1100,7 +1066,7 @@ static struct drm_driver tegra_drm_driver = {
.unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
- .lastclose = tegra_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
@@ -1148,8 +1114,7 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
- dma_addr_t *dma)
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
{
struct iova *alloc;
void *virt;
@@ -1317,6 +1282,10 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra186-display", },
+ { .compatible = "nvidia,tegra186-dc", },
+ { .compatible = "nvidia,tegra186-sor", },
+ { .compatible = "nvidia,tegra186-sor1", },
{ .compatible = "nvidia,tegra186-vic", },
{ /* sentinel */ }
};
@@ -1332,6 +1301,7 @@ static struct host1x_driver host1x_drm_driver = {
};
static struct platform_driver * const drivers[] = {
+ &tegra_display_hub_driver,
&tegra_dc_driver,
&tegra_hdmi_driver,
&tegra_dsi_driver,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ddae331..4f41aae 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -16,6 +16,7 @@
#include <linux/of_gpio.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -23,6 +24,7 @@
#include <drm/drm_fixed.h>
#include "gem.h"
+#include "hub.h"
#include "trace.h"
struct reset_control;
@@ -44,6 +46,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
+ struct iommu_group *group;
struct mutex mm_lock;
struct drm_mm mm;
@@ -62,11 +65,7 @@ struct tegra_drm {
unsigned int pitch_align;
- struct {
- struct drm_atomic_state *state;
- struct work_struct work;
- struct mutex lock;
- } commit;
+ struct tegra_display_hub *hub;
struct drm_atomic_state *state;
};
@@ -152,6 +151,8 @@ int tegra_output_probe(struct tegra_output *output);
void tegra_output_remove(struct tegra_output *output);
int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
+void tegra_output_find_possible_crtcs(struct tegra_output *output,
+ struct drm_device *drm);
int tegra_output_connector_get_modes(struct drm_connector *connector);
enum drm_connector_status
@@ -188,11 +189,8 @@ int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
void tegra_drm_fb_suspend(struct drm_device *drm);
void tegra_drm_fb_resume(struct drm_device *drm);
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
-void tegra_fb_output_poll_changed(struct drm_device *drm);
-#endif
+extern struct platform_driver tegra_display_hub_driver;
extern struct platform_driver tegra_dc_driver;
extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_dsi_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 046649e..87c5d89 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -65,8 +65,6 @@ struct tegra_dsi {
struct clk *clk;
struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
unsigned long flags;
enum mipi_dsi_pixel_format format;
@@ -122,12 +120,89 @@ static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
writel(value, dsi->regs + (offset << 2));
}
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_dsi_regs[] = {
+ DEBUGFS_REG32(DSI_INCR_SYNCPT),
+ DEBUGFS_REG32(DSI_INCR_SYNCPT_CONTROL),
+ DEBUGFS_REG32(DSI_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DSI_CTXSW),
+ DEBUGFS_REG32(DSI_RD_DATA),
+ DEBUGFS_REG32(DSI_WR_DATA),
+ DEBUGFS_REG32(DSI_POWER_CONTROL),
+ DEBUGFS_REG32(DSI_INT_ENABLE),
+ DEBUGFS_REG32(DSI_INT_STATUS),
+ DEBUGFS_REG32(DSI_INT_MASK),
+ DEBUGFS_REG32(DSI_HOST_CONTROL),
+ DEBUGFS_REG32(DSI_CONTROL),
+ DEBUGFS_REG32(DSI_SOL_DELAY),
+ DEBUGFS_REG32(DSI_MAX_THRESHOLD),
+ DEBUGFS_REG32(DSI_TRIGGER),
+ DEBUGFS_REG32(DSI_TX_CRC),
+ DEBUGFS_REG32(DSI_STATUS),
+ DEBUGFS_REG32(DSI_INIT_SEQ_CONTROL),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_0),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_1),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_2),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_3),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_4),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_5),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_6),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_7),
+ DEBUGFS_REG32(DSI_PKT_SEQ_0_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_0_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_1_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_1_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_2_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_2_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_3_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_3_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_4_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_4_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_5_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_5_HI),
+ DEBUGFS_REG32(DSI_DCS_CMDS),
+ DEBUGFS_REG32(DSI_PKT_LEN_0_1),
+ DEBUGFS_REG32(DSI_PKT_LEN_2_3),
+ DEBUGFS_REG32(DSI_PKT_LEN_4_5),
+ DEBUGFS_REG32(DSI_PKT_LEN_6_7),
+ DEBUGFS_REG32(DSI_PHY_TIMING_0),
+ DEBUGFS_REG32(DSI_PHY_TIMING_1),
+ DEBUGFS_REG32(DSI_PHY_TIMING_2),
+ DEBUGFS_REG32(DSI_BTA_TIMING),
+ DEBUGFS_REG32(DSI_TIMEOUT_0),
+ DEBUGFS_REG32(DSI_TIMEOUT_1),
+ DEBUGFS_REG32(DSI_TO_TALLY),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_0),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_CD),
+ DEBUGFS_REG32(DSI_PAD_CD_STATUS),
+ DEBUGFS_REG32(DSI_VIDEO_MODE_CONTROL),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_1),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_2),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_3),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_4),
+ DEBUGFS_REG32(DSI_GANGED_MODE_CONTROL),
+ DEBUGFS_REG32(DSI_GANGED_MODE_START),
+ DEBUGFS_REG32(DSI_GANGED_MODE_SIZE),
+ DEBUGFS_REG32(DSI_RAW_DATA_BYTE_COUNT),
+ DEBUGFS_REG32(DSI_ULTRA_LOW_POWER_CONTROL),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_8),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_9),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_10),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_11),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_12),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_13),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_14),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_15),
+};
+
static int tegra_dsi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_dsi *dsi = node->info_ent->data;
struct drm_crtc *crtc = dsi->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
+ unsigned int i;
int err = 0;
drm_modeset_lock_all(drm);
@@ -137,93 +212,12 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
goto unlock;
}
-#define DUMP_REG(name) \
- seq_printf(s, "%-32s %#05x %08x\n", #name, name, \
- tegra_dsi_readl(dsi, name))
-
- DUMP_REG(DSI_INCR_SYNCPT);
- DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
- DUMP_REG(DSI_INCR_SYNCPT_ERROR);
- DUMP_REG(DSI_CTXSW);
- DUMP_REG(DSI_RD_DATA);
- DUMP_REG(DSI_WR_DATA);
- DUMP_REG(DSI_POWER_CONTROL);
- DUMP_REG(DSI_INT_ENABLE);
- DUMP_REG(DSI_INT_STATUS);
- DUMP_REG(DSI_INT_MASK);
- DUMP_REG(DSI_HOST_CONTROL);
- DUMP_REG(DSI_CONTROL);
- DUMP_REG(DSI_SOL_DELAY);
- DUMP_REG(DSI_MAX_THRESHOLD);
- DUMP_REG(DSI_TRIGGER);
- DUMP_REG(DSI_TX_CRC);
- DUMP_REG(DSI_STATUS);
-
- DUMP_REG(DSI_INIT_SEQ_CONTROL);
- DUMP_REG(DSI_INIT_SEQ_DATA_0);
- DUMP_REG(DSI_INIT_SEQ_DATA_1);
- DUMP_REG(DSI_INIT_SEQ_DATA_2);
- DUMP_REG(DSI_INIT_SEQ_DATA_3);
- DUMP_REG(DSI_INIT_SEQ_DATA_4);
- DUMP_REG(DSI_INIT_SEQ_DATA_5);
- DUMP_REG(DSI_INIT_SEQ_DATA_6);
- DUMP_REG(DSI_INIT_SEQ_DATA_7);
-
- DUMP_REG(DSI_PKT_SEQ_0_LO);
- DUMP_REG(DSI_PKT_SEQ_0_HI);
- DUMP_REG(DSI_PKT_SEQ_1_LO);
- DUMP_REG(DSI_PKT_SEQ_1_HI);
- DUMP_REG(DSI_PKT_SEQ_2_LO);
- DUMP_REG(DSI_PKT_SEQ_2_HI);
- DUMP_REG(DSI_PKT_SEQ_3_LO);
- DUMP_REG(DSI_PKT_SEQ_3_HI);
- DUMP_REG(DSI_PKT_SEQ_4_LO);
- DUMP_REG(DSI_PKT_SEQ_4_HI);
- DUMP_REG(DSI_PKT_SEQ_5_LO);
- DUMP_REG(DSI_PKT_SEQ_5_HI);
-
- DUMP_REG(DSI_DCS_CMDS);
-
- DUMP_REG(DSI_PKT_LEN_0_1);
- DUMP_REG(DSI_PKT_LEN_2_3);
- DUMP_REG(DSI_PKT_LEN_4_5);
- DUMP_REG(DSI_PKT_LEN_6_7);
-
- DUMP_REG(DSI_PHY_TIMING_0);
- DUMP_REG(DSI_PHY_TIMING_1);
- DUMP_REG(DSI_PHY_TIMING_2);
- DUMP_REG(DSI_BTA_TIMING);
-
- DUMP_REG(DSI_TIMEOUT_0);
- DUMP_REG(DSI_TIMEOUT_1);
- DUMP_REG(DSI_TO_TALLY);
-
- DUMP_REG(DSI_PAD_CONTROL_0);
- DUMP_REG(DSI_PAD_CONTROL_CD);
- DUMP_REG(DSI_PAD_CD_STATUS);
- DUMP_REG(DSI_VIDEO_MODE_CONTROL);
- DUMP_REG(DSI_PAD_CONTROL_1);
- DUMP_REG(DSI_PAD_CONTROL_2);
- DUMP_REG(DSI_PAD_CONTROL_3);
- DUMP_REG(DSI_PAD_CONTROL_4);
-
- DUMP_REG(DSI_GANGED_MODE_CONTROL);
- DUMP_REG(DSI_GANGED_MODE_START);
- DUMP_REG(DSI_GANGED_MODE_SIZE);
-
- DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
- DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
-
- DUMP_REG(DSI_INIT_SEQ_DATA_8);
- DUMP_REG(DSI_INIT_SEQ_DATA_9);
- DUMP_REG(DSI_INIT_SEQ_DATA_10);
- DUMP_REG(DSI_INIT_SEQ_DATA_11);
- DUMP_REG(DSI_INIT_SEQ_DATA_12);
- DUMP_REG(DSI_INIT_SEQ_DATA_13);
- DUMP_REG(DSI_INIT_SEQ_DATA_14);
- DUMP_REG(DSI_INIT_SEQ_DATA_15);
-
-#undef DUMP_REG
+ for (i = 0; i < ARRAY_SIZE(tegra_dsi_regs); i++) {
+ unsigned int offset = tegra_dsi_regs[i].offset;
+
+ seq_printf(s, "%-32s %#05x %08x\n", tegra_dsi_regs[i].name,
+ offset, tegra_dsi_readl(dsi, offset));
+ }
unlock:
drm_modeset_unlock_all(drm);
@@ -234,58 +228,46 @@ static struct drm_info_list debugfs_files[] = {
{ "regs", tegra_dsi_show_regs, 0, NULL },
};
-static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
- struct drm_minor *minor)
+static int tegra_dsi_late_register(struct drm_connector *connector)
{
- const char *name = dev_name(dsi->dev);
- unsigned int i;
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_dsi *dsi = to_dsi(output);
int err;
- dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
- if (!dsi->debugfs)
- return -ENOMEM;
-
dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
- if (!dsi->debugfs_files) {
- err = -ENOMEM;
- goto remove;
- }
+ if (!dsi->debugfs_files)
+ return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ for (i = 0; i < count; i++)
dsi->debugfs_files[i].data = dsi;
- err = drm_debugfs_create_files(dsi->debugfs_files,
- ARRAY_SIZE(debugfs_files),
- dsi->debugfs, minor);
+ err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
if (err < 0)
goto free;
- dsi->minor = minor;
-
return 0;
free:
kfree(dsi->debugfs_files);
dsi->debugfs_files = NULL;
-remove:
- debugfs_remove(dsi->debugfs);
- dsi->debugfs = NULL;
return err;
}
-static void tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+static void tegra_dsi_early_unregister(struct drm_connector *connector)
{
- drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
- dsi->minor);
- dsi->minor = NULL;
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_dsi *dsi = to_dsi(output);
+ drm_debugfs_remove_files(dsi->debugfs_files, count,
+ connector->dev->primary);
kfree(dsi->debugfs_files);
dsi->debugfs_files = NULL;
-
- debugfs_remove(dsi->debugfs);
- dsi->debugfs = NULL;
}
#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
@@ -827,6 +809,8 @@ static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = tegra_dsi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_dsi_late_register,
+ .early_unregister = tegra_dsi_early_unregister,
};
static enum drm_mode_status
@@ -1080,12 +1064,6 @@ static int tegra_dsi_init(struct host1x_client *client)
dsi->output.encoder.possible_crtcs = 0x3;
}
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_dsi_debugfs_init(dsi, drm->primary);
- if (err < 0)
- dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
- }
-
return 0;
}
@@ -1095,11 +1073,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
tegra_output_exit(&dsi->output);
- if (IS_ENABLED(CONFIG_DEBUG_FS))
- tegra_dsi_debugfs_exit(dsi);
-
- regulator_disable(dsi->vdd);
-
return 0;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 80540c1..e694349 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -54,23 +54,49 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_fb *fb = to_tegra_fb(framebuffer);
uint64_t modifier = fb->base.modifier;
- switch (fourcc_mod_tegra_mod(modifier)) {
- case NV_FORMAT_MOD_TEGRA_TILED:
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
+ tiling->value = 0;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
tiling->mode = TEGRA_BO_TILING_MODE_TILED;
tiling->value = 0;
break;
- case NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(0):
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
- tiling->value = fourcc_mod_tegra_param(modifier);
- if (tiling->value > 5)
- return -EINVAL;
+ tiling->value = 0;
break;
- default:
- /* TODO: handle YUV formats? */
- *tiling = fb->planes[0]->tiling;
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 1;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 2;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 3;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 4;
break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 5;
+ break;
+
+ default:
+ return -EINVAL;
}
return 0;
@@ -201,12 +227,28 @@ unreference:
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
+static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_fb_get_plane(helper->fb, 0);
+
+ err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
+ if (err < 0)
+ return err;
+
+ return __tegra_gem_mmap(&bo->gem, vma);
+}
+
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = tegra_fb_mmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
@@ -230,6 +272,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
cmd.height = sizes->surface_height;
cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
tegra->pitch_align);
+
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
@@ -361,20 +404,6 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
drm_fb_helper_fini(&fbdev->base);
tegra_fbdev_free(fbdev);
}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
- if (fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base);
-}
-
-void tegra_fb_output_poll_changed(struct drm_device *drm)
-{
- struct tegra_drm *tegra = drm->dev_private;
-
- if (tegra->fbdev)
- drm_fb_helper_hotplug_event(&tegra->fbdev->base);
-}
#endif
int tegra_drm_fb_prepare(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ab1e53d..8b0b4ff 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -114,7 +114,7 @@ static const struct host1x_bo_ops tegra_bo_ops = {
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
int prot = IOMMU_READ | IOMMU_WRITE;
- ssize_t err;
+ int err;
if (bo->mm)
return -EBUSY;
@@ -128,22 +128,21 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
err = drm_mm_insert_node_generic(&tegra->mm,
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) {
- dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
+ dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
err);
goto unlock;
}
bo->paddr = bo->mm->start;
- err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
- bo->sgt->nents, prot);
- if (err < 0) {
- dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
+ bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->sgt->nents, prot);
+ if (!bo->size) {
+ dev_err(tegra->drm->dev, "failed to map buffer\n");
+ err = -ENOMEM;
goto remove;
}
- bo->size = err;
-
mutex_unlock(&tegra->mm_lock);
return 0;
@@ -204,6 +203,8 @@ free:
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
+ dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -214,8 +215,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
- struct scatterlist *s;
- unsigned int i;
+ int err;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
@@ -224,27 +224,26 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt))
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
goto put_pages;
+ }
- /*
- * Fake up the SG table so that dma_sync_sg_for_device() can be used
- * to flush the pages associated with it.
- *
- * TODO: Replace this by drm_clflash_sg() once it can be implemented
- * without relying on symbols that are not exported.
- */
- for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
- sg_dma_address(s) = sg_phys(s);
-
- dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_TO_DEVICE);
+ err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
+ if (err == 0) {
+ err = -EFAULT;
+ goto free_sgt;
+ }
return 0;
+free_sgt:
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ return err;
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
@@ -460,8 +459,7 @@ const struct vm_operations_struct tegra_bo_vm_ops = {
.close = drm_gem_vm_close,
};
-static int tegra_gem_mmap(struct drm_gem_object *gem,
- struct vm_area_struct *vma)
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
{
struct tegra_bo *bo = to_tegra_bo(gem);
@@ -508,7 +506,7 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
gem = vma->vm_private_data;
- return tegra_gem_mmap(gem, vma);
+ return __tegra_gem_mmap(gem, vma);
}
static struct sg_table *
@@ -570,6 +568,34 @@ static void tegra_gem_prime_release(struct dma_buf *buf)
drm_gem_dmabuf_release(buf);
}
+static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
unsigned long page)
{
@@ -601,7 +627,7 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
if (err < 0)
return err;
- return tegra_gem_mmap(gem, vma);
+ return __tegra_gem_mmap(gem, vma);
}
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
@@ -620,6 +646,8 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release,
+ .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
+ .end_cpu_access = tegra_gem_prime_end_cpu_access,
.map_atomic = tegra_gem_prime_kmap_atomic,
.unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 8eb9fd2..6bd7dd7 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -68,10 +68,11 @@ void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
-int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
-
extern const struct vm_operations_struct tegra_bo_vm_ops;
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
struct drm_gem_object *gem,
int flags);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6434b3d..784739a 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -79,8 +79,6 @@ struct tegra_hdmi {
bool dvi;
struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
};
static inline struct tegra_hdmi *
@@ -910,6 +908,249 @@ tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_hdmi_regs[] = {
+ DEBUGFS_REG32(HDMI_CTXSW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CMODE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_RI),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_SUBPACK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1_RDATA),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPARE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CAP),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PWR),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TEST),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CSTM),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LVDS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCA),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_BLANK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_CTL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(0)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(1)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(2)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(3)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(4)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(5)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(6)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(7)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(8)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(9)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(10)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(11)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(12)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(13)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(14)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(15)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TRIG),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_MSCHECK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(0)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(1)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(2)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(3)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(4)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(5)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(6)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_THRESHOLD),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_CNTRL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_N),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_REFCLK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_CRC_CONTROL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INPUT_CONTROL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SCRATCH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_PE_CURRENT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_3),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_SKEY_INDEX),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_SPARE0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_MASK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_ENABLE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT),
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_hdmi *hdmi = node->info_ent->data;
+ struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hdmi_regs); i++) {
+ unsigned int offset = tegra_hdmi_regs[i].offset;
+
+ seq_printf(s, "%-56s %#05x %08x\n", tegra_hdmi_regs[i].name,
+ offset, tegra_hdmi_readl(hdmi, offset));
+ }
+
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_late_register(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ int err;
+
+ hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!hdmi->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ hdmi->debugfs_files[i].data = hdmi;
+
+ err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
+ if (err < 0)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+
+ return err;
+}
+
+static void tegra_hdmi_early_unregister(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct drm_minor *minor = connector->dev->primary;
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ drm_debugfs_remove_files(hdmi->debugfs_files, count, minor);
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+}
+
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_hdmi_connector_detect,
@@ -917,6 +1158,8 @@ static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_hdmi_late_register,
+ .early_unregister = tegra_hdmi_early_unregister,
};
static enum drm_mode_status
@@ -1225,254 +1468,6 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
.atomic_check = tegra_hdmi_encoder_atomic_check,
};
-static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
-{
- struct drm_info_node *node = s->private;
- struct tegra_hdmi *hdmi = node->info_ent->data;
- struct drm_crtc *crtc = hdmi->output.encoder.crtc;
- struct drm_device *drm = node->minor->dev;
- int err = 0;
-
- drm_modeset_lock_all(drm);
-
- if (!crtc || !crtc->state->active) {
- err = -EBUSY;
- goto unlock;
- }
-
-#define DUMP_REG(name) \
- seq_printf(s, "%-56s %#05x %08x\n", #name, name, \
- tegra_hdmi_readl(hdmi, name))
-
- DUMP_REG(HDMI_CTXSW);
- DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
- DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
- DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
- DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
- DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
- DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
- DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
- DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
- DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
- DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
- DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
- DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
- DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
- DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
- DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
- DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
- DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
- DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
- DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
- DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
- DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
- DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
- DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
- DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
- DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
- DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
- DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
- DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
- DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
- DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
- DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
- DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
- DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
- DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
- DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
- DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
- DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
- DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
- DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
- DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
- DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
- DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
- DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
- DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
- DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
- DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
- DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
- DUMP_REG(HDMI_NV_PDISP_SCRATCH);
- DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
- DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
- DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
- DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
- DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
- DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
- DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
- DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
- DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
- DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
- DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
- DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
- DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
- DUMP_REG(HDMI_NV_PDISP_INT_MASK);
- DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
- DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
-
-#undef DUMP_REG
-
-unlock:
- drm_modeset_unlock_all(drm);
- return err;
-}
-
-static struct drm_info_list debugfs_files[] = {
- { "regs", tegra_hdmi_show_regs, 0, NULL },
-};
-
-static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
- struct drm_minor *minor)
-{
- unsigned int i;
- int err;
-
- hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
- if (!hdmi->debugfs)
- return -ENOMEM;
-
- hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
- GFP_KERNEL);
- if (!hdmi->debugfs_files) {
- err = -ENOMEM;
- goto remove;
- }
-
- for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- hdmi->debugfs_files[i].data = hdmi;
-
- err = drm_debugfs_create_files(hdmi->debugfs_files,
- ARRAY_SIZE(debugfs_files),
- hdmi->debugfs, minor);
- if (err < 0)
- goto free;
-
- hdmi->minor = minor;
-
- return 0;
-
-free:
- kfree(hdmi->debugfs_files);
- hdmi->debugfs_files = NULL;
-remove:
- debugfs_remove(hdmi->debugfs);
- hdmi->debugfs = NULL;
-
- return err;
-}
-
-static void tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
-{
- drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
- hdmi->minor);
- hdmi->minor = NULL;
-
- kfree(hdmi->debugfs_files);
- hdmi->debugfs_files = NULL;
-
- debugfs_remove(hdmi->debugfs);
- hdmi->debugfs = NULL;
-}
-
static int tegra_hdmi_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1505,12 +1500,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
hdmi->output.encoder.possible_crtcs = 0x3;
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
- if (err < 0)
- dev_err(client->dev, "debugfs setup failed: %d\n", err);
- }
-
err = regulator_enable(hdmi->hdmi);
if (err < 0) {
dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
@@ -1543,9 +1532,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
regulator_disable(hdmi->pll);
regulator_disable(hdmi->hdmi);
- if (IS_ENABLED(CONFIG_DEBUG_FS))
- tegra_hdmi_debugfs_exit(hdmi);
-
return 0;
}
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
new file mode 100644
index 0000000..9a3f23d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "drm.h"
+#include "dc.h"
+#include "plane.h"
+
+static const u32 tegra_shared_plane_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static const u64 tegra_shared_plane_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
+static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ if (offset >= 0x500 && offset <= 0x581) {
+ offset = 0x000 + (offset - 0x500);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x700 && offset <= 0x73c) {
+ offset = 0x180 + (offset - 0x700);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x800 && offset <= 0x83e) {
+ offset = 0x1c0 + (offset - 0x800);
+ return plane->offset + offset;
+ }
+
+ dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
+
+ return plane->offset + offset;
+}
+
+static inline u32 tegra_plane_readl(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
+}
+
+static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
+ unsigned int offset)
+{
+ tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
+}
+
+static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
+{
+ mutex_lock(&wgrp->lock);
+
+ if (wgrp->usecount == 0) {
+ pm_runtime_get_sync(wgrp->parent);
+ reset_control_deassert(wgrp->rst);
+ }
+
+ wgrp->usecount++;
+ mutex_unlock(&wgrp->lock);
+
+ return 0;
+}
+
+static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
+{
+ int err;
+
+ mutex_lock(&wgrp->lock);
+
+ if (wgrp->usecount == 1) {
+ err = reset_control_assert(wgrp->rst);
+ if (err < 0) {
+ pr_err("failed to assert reset for window group %u\n",
+ wgrp->index);
+ }
+
+ pm_runtime_put(wgrp->parent);
+ }
+
+ wgrp->usecount--;
+ mutex_unlock(&wgrp->lock);
+}
+
+int tegra_display_hub_prepare(struct tegra_display_hub *hub)
+{
+ unsigned int i;
+
+ /*
+ * XXX Enabling/disabling windowgroups needs to happen when the owner
+ * display controller is disabled. There's currently no good point at
+ * which this could be executed, so unconditionally enable all window
+ * groups for now.
+ */
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+ tegra_windowgroup_enable(wgrp);
+ }
+
+ return 0;
+}
+
+void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
+{
+ unsigned int i;
+
+ /*
+ * XXX Remove this once window groups can be more fine-grainedly
+ * enabled and disabled.
+ */
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+ tegra_windowgroup_disable(wgrp);
+ }
+}
+
+static void tegra_shared_plane_update(struct tegra_plane *plane)
+{
+ struct tegra_dc *dc = plane->dc;
+ unsigned long timeout;
+ u32 mask, value;
+
+ mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
+ tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ if ((value & mask) == 0)
+ break;
+
+ usleep_range(100, 400);
+ }
+}
+
+static void tegra_shared_plane_activate(struct tegra_plane *plane)
+{
+ struct tegra_dc *dc = plane->dc;
+ unsigned long timeout;
+ u32 mask, value;
+
+ mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
+ tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ if ((value & mask) == 0)
+ break;
+
+ usleep_range(100, 400);
+ }
+}
+
+static unsigned int
+tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
+{
+ unsigned int offset =
+ tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
+
+ return tegra_dc_readl(dc, offset) & OWNER_MASK;
+}
+
+static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ struct device *dev = dc->dev;
+
+ if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
+ if (plane->dc == dc)
+ return true;
+
+ dev_WARN(dev, "head %u owns window %u but is not attached\n",
+ dc->pipe, plane->index);
+ }
+
+ return false;
+}
+
+static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
+ struct tegra_dc *new)
+{
+ unsigned int offset =
+ tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
+ struct tegra_dc *old = plane->dc, *dc = new ? new : old;
+ struct device *dev = new ? new->dev : old->dev;
+ unsigned int owner, index = plane->index;
+ u32 value;
+
+ value = tegra_dc_readl(dc, offset);
+ owner = value & OWNER_MASK;
+
+ if (new && (owner != OWNER_MASK && owner != new->pipe)) {
+ dev_WARN(dev, "window %u owned by head %u\n", index, owner);
+ return -EBUSY;
+ }
+
+ /*
+ * This seems to happen whenever the head has been disabled with one
+ * or more windows being active. This is harmless because we'll just
+ * reassign the window to the new head anyway.
+ */
+ if (old && owner == OWNER_MASK)
+ dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
+ old->pipe, owner);
+
+ value &= ~OWNER_MASK;
+
+ if (new)
+ value |= OWNER(new->pipe);
+ else
+ value |= OWNER_MASK;
+
+ tegra_dc_writel(dc, value, offset);
+
+ plane->dc = new;
+
+ return 0;
+}
+
+static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ u32 value;
+ int err;
+
+ if (!tegra_dc_owns_shared_plane(dc, plane)) {
+ err = tegra_shared_plane_set_owner(plane, dc);
+ if (err < 0)
+ return;
+ }
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
+ value |= MODE_FOUR_LINES;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
+ value = SLOTS(1);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
+
+ /* disable watermark */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
+ value &= ~LATENCY_CTL_MODE_ENABLE;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
+ value |= WATERMARK_MASK;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
+
+ /* pipe meter */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
+ value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
+
+ /* mempool entries */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
+ value = MEMPOOL_ENTRIES(0x331);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
+ value &= ~THREAD_NUM_MASK;
+ value |= THREAD_NUM(plane->base.index);
+ value |= THREAD_GROUP_ENABLE;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
+
+ tegra_shared_plane_update(plane);
+ tegra_shared_plane_activate(plane);
+}
+
+static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ tegra_shared_plane_set_owner(plane, NULL);
+}
+
+static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
+ struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
+ struct tegra_bo_tiling *tiling = &plane_state->tiling;
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ int err;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ err = tegra_plane_format(state->fb->format->format,
+ &plane_state->format,
+ &plane_state->swap);
+ if (err < 0)
+ return err;
+
+ err = tegra_fb_get_tiling(state->fb, tiling);
+ if (err < 0)
+ return err;
+
+ if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
+ !dc->soc->supports_block_linear) {
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Tegra doesn't support different strides for U and V planes so we
+ * error out if the user tries to display a framebuffer with such a
+ * configuration.
+ */
+ if (state->fb->format->num_planes > 2) {
+ if (state->fb->pitches[2] != state->fb->pitches[1]) {
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ /* XXX scaling is not yet supported, add a check here */
+
+ err = tegra_plane_state_add(&tegra->base, state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ /*
+ * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
+ * on planes that are already disabled. Make sure we fallback to the
+ * head for this particular state instead of crashing.
+ */
+ if (WARN_ON(p->dc == NULL))
+ p->dc = dc;
+
+ pm_runtime_get_sync(dc->dev);
+
+ value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_remove_shared_plane(dc, p);
+
+ pm_runtime_put(dc->dev);
+}
+
+static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
+ struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
+ unsigned int zpos = plane->state->normalized_zpos;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_bo *bo;
+ dma_addr_t base;
+ u32 value;
+
+ /* rien ne va plus */
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (!plane->state->visible) {
+ tegra_shared_plane_atomic_disable(plane, old_state);
+ return;
+ }
+
+ pm_runtime_get_sync(dc->dev);
+
+ tegra_dc_assign_shared_plane(dc, p);
+
+ tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
+
+ /* blending */
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
+
+ value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
+ tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
+
+ /* bypass scaling */
+ value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
+ tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
+
+ value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
+ tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
+
+ /* disable compression */
+ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
+
+ bo = tegra_fb_get_plane(fb, 0);
+ base = bo->paddr;
+
+ tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
+ tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
+
+ value = V_POSITION(plane->state->crtc_y) |
+ H_POSITION(plane->state->crtc_x);
+ tegra_plane_writel(p, value, DC_WIN_POSITION);
+
+ value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
+ tegra_plane_writel(p, value, DC_WIN_SIZE);
+
+ value = WIN_ENABLE | COLOR_EXPAND;
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
+
+ value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
+ tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
+
+ value = PITCH(fb->pitches[0]);
+ tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
+
+ value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
+ tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
+
+ value = OFFSET_X(plane->state->src_y >> 16) |
+ OFFSET_Y(plane->state->src_x >> 16);
+ tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
+
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = state->tiling.value;
+
+ /* XXX */
+ switch (state->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
+ DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ /* XXX not supported on Tegra186 and later */
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
+ }
+
+ /* disable gamut CSC */
+ value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
+ value &= ~CONTROL_CSC_ENABLE;
+ tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
+
+ pm_runtime_put(dc->dev);
+}
+
+static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .atomic_check = tegra_shared_plane_atomic_check,
+ .atomic_update = tegra_shared_plane_atomic_update,
+ .atomic_disable = tegra_shared_plane_atomic_disable,
+};
+
+struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int wgrp,
+ unsigned int index)
+{
+ enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub *hub = tegra->hub;
+ /* planes can be assigned to arbitrary CRTCs */
+ unsigned int possible_crtcs = 0x7;
+ struct tegra_shared_plane *plane;
+ unsigned int num_formats;
+ const u64 *modifiers;
+ struct drm_plane *p;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ plane->base.offset = 0x0a00 + 0x0300 * index;
+ plane->base.index = index;
+
+ plane->wgrp = &hub->wgrps[wgrp];
+ plane->wgrp->parent = dc->dev;
+
+ p = &plane->base.base;
+
+ num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
+ formats = tegra_shared_plane_formats;
+ modifiers = tegra_shared_plane_modifiers;
+
+ err = drm_universal_plane_init(drm, p, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, modifiers, type, NULL);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
+ drm_plane_create_zpos_property(p, 0, 0, 255);
+
+ return p;
+}
+
+static struct drm_private_state *
+tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
+{
+ struct tegra_display_hub_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct tegra_display_hub_state *hub_state =
+ to_tegra_display_hub_state(state);
+
+ kfree(hub_state);
+}
+
+static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
+ .atomic_duplicate_state = tegra_display_hub_duplicate_state,
+ .atomic_destroy_state = tegra_display_hub_destroy_state,
+};
+
+static struct tegra_display_hub_state *
+tegra_display_hub_get_state(struct tegra_display_hub *hub,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = dev_get_drvdata(hub->client.parent);
+ struct drm_private_state *priv;
+
+ WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
+
+ priv = drm_atomic_get_private_obj_state(state, &hub->base);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ return to_tegra_display_hub_state(priv);
+}
+
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *hub_state;
+ struct drm_crtc_state *old, *new;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ if (!tegra->hub)
+ return 0;
+
+ hub_state = tegra_display_hub_get_state(tegra->hub, state);
+ if (IS_ERR(hub_state))
+ return PTR_ERR(hub_state);
+
+ /*
+ * The display hub display clock needs to be fed by the display clock
+ * with the highest frequency to ensure proper functioning of all the
+ * displays.
+ *
+ * Note that this isn't used before Tegra186, but it doesn't hurt and
+ * conditionalizing it would make the code less clean.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
+ struct tegra_dc_state *dc = to_dc_state(new);
+
+ if (new->active) {
+ if (!hub_state->clk || dc->pclk > hub_state->rate) {
+ hub_state->dc = to_tegra_dc(dc->base.crtc);
+ hub_state->clk = hub_state->dc->clk;
+ hub_state->rate = dc->pclk;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_display_hub_update(struct tegra_dc *dc)
+{
+ u32 value;
+
+ pm_runtime_get_sync(dc->dev);
+
+ value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
+ value &= ~LATENCY_EVENT;
+ tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
+
+ value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
+ value = CURS_SLOTS(1) | WGRP_SLOTS(1);
+ tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
+
+ tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ pm_runtime_put(dc->dev);
+}
+
+void tegra_display_hub_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub *hub = tegra->hub;
+ struct tegra_display_hub_state *hub_state;
+ struct device *dev = hub->client.dev;
+ int err;
+
+ hub_state = tegra_display_hub_get_state(hub, state);
+
+ if (hub_state->clk) {
+ err = clk_set_rate(hub_state->clk, hub_state->rate);
+ if (err < 0)
+ dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
+ hub_state->clk, hub_state->rate);
+
+ err = clk_set_parent(hub->clk_disp, hub_state->clk);
+ if (err < 0)
+ dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
+ hub->clk_disp, hub_state->clk, err);
+ }
+
+ if (hub_state->dc)
+ tegra_display_hub_update(hub_state->dc);
+}
+
+static int tegra_display_hub_init(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&hub->base, &state->base,
+ &tegra_display_hub_state_funcs);
+
+ tegra->hub = hub;
+
+ return 0;
+}
+
+static int tegra_display_hub_exit(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ drm_atomic_private_obj_fini(&tegra->hub->base);
+ tegra->hub = NULL;
+
+ return 0;
+}
+
+static const struct host1x_client_ops tegra_display_hub_ops = {
+ .init = tegra_display_hub_init,
+ .exit = tegra_display_hub_exit,
+};
+
+static int tegra_display_hub_probe(struct platform_device *pdev)
+{
+ struct tegra_display_hub *hub;
+ unsigned int i;
+ int err;
+
+ hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->soc = of_device_get_match_data(&pdev->dev);
+
+ hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
+ if (IS_ERR(hub->clk_disp)) {
+ err = PTR_ERR(hub->clk_disp);
+ return err;
+ }
+
+ hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
+ if (IS_ERR(hub->clk_dsc)) {
+ err = PTR_ERR(hub->clk_dsc);
+ return err;
+ }
+
+ hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
+ if (IS_ERR(hub->clk_hub)) {
+ err = PTR_ERR(hub->clk_hub);
+ return err;
+ }
+
+ hub->rst = devm_reset_control_get(&pdev->dev, "misc");
+ if (IS_ERR(hub->rst)) {
+ err = PTR_ERR(hub->rst);
+ return err;
+ }
+
+ hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
+ sizeof(*hub->wgrps), GFP_KERNEL);
+ if (!hub->wgrps)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+ char id[8];
+
+ snprintf(id, sizeof(id), "wgrp%u", i);
+ mutex_init(&wgrp->lock);
+ wgrp->usecount = 0;
+ wgrp->index = i;
+
+ wgrp->rst = devm_reset_control_get(&pdev->dev, id);
+ if (IS_ERR(wgrp->rst))
+ return PTR_ERR(wgrp->rst);
+
+ err = reset_control_assert(wgrp->rst);
+ if (err < 0)
+ return err;
+ }
+
+ /* XXX: enable clock across reset? */
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, hub);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&hub->client.list);
+ hub->client.ops = &tegra_display_hub_ops;
+ hub->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&hub->client);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+
+ return err;
+}
+
+static int tegra_display_hub_remove(struct platform_device *pdev)
+{
+ struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&hub->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
+{
+ struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ clk_disable_unprepare(hub->clk_hub);
+ clk_disable_unprepare(hub->clk_dsc);
+ clk_disable_unprepare(hub->clk_disp);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_display_hub_resume(struct device *dev)
+{
+ struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(hub->clk_disp);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(hub->clk_dsc);
+ if (err < 0)
+ goto disable_disp;
+
+ err = clk_prepare_enable(hub->clk_hub);
+ if (err < 0)
+ goto disable_dsc;
+
+ err = reset_control_deassert(hub->rst);
+ if (err < 0)
+ goto disable_hub;
+
+ return 0;
+
+disable_hub:
+ clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+ clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+ clk_disable_unprepare(hub->clk_disp);
+ return err;
+}
+
+static const struct dev_pm_ops tegra_display_hub_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_display_hub_suspend,
+ tegra_display_hub_resume, NULL)
+};
+
+static const struct tegra_display_hub_soc tegra186_display_hub = {
+ .num_wgrps = 6,
+};
+
+static const struct of_device_id tegra_display_hub_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-display",
+ .data = &tegra186_display_hub
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
+
+struct platform_driver tegra_display_hub_driver = {
+ .driver = {
+ .name = "tegra-display-hub",
+ .of_match_table = tegra_display_hub_of_match,
+ .pm = &tegra_display_hub_pm_ops,
+ },
+ .probe = tegra_display_hub_probe,
+ .remove = tegra_display_hub_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
new file mode 100644
index 0000000..85b8bf4
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HUB_H
+#define TEGRA_HUB_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_plane.h>
+
+#include "plane.h"
+
+struct tegra_dc;
+
+struct tegra_windowgroup {
+ unsigned int usecount;
+ struct mutex lock;
+
+ unsigned int index;
+ struct device *parent;
+ struct reset_control *rst;
+};
+
+struct tegra_shared_plane {
+ struct tegra_plane base;
+ struct tegra_windowgroup *wgrp;
+};
+
+static inline struct tegra_shared_plane *
+to_tegra_shared_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_shared_plane, base.base);
+}
+
+struct tegra_display_hub_soc {
+ unsigned int num_wgrps;
+};
+
+struct tegra_display_hub {
+ struct drm_private_obj base;
+ struct host1x_client client;
+ struct clk *clk_disp;
+ struct clk *clk_dsc;
+ struct clk *clk_hub;
+ struct reset_control *rst;
+
+ const struct tegra_display_hub_soc *soc;
+ struct tegra_windowgroup *wgrps;
+};
+
+static inline struct tegra_display_hub *
+to_tegra_display_hub(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_display_hub, client);
+}
+
+struct tegra_display_hub_state {
+ struct drm_private_state base;
+
+ struct tegra_dc *dc;
+ unsigned long rate;
+ struct clk *clk;
+};
+
+static inline struct tegra_display_hub_state *
+to_tegra_display_hub_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct tegra_display_hub_state, base);
+}
+
+struct tegra_dc;
+struct tegra_plane;
+
+int tegra_display_hub_prepare(struct tegra_display_hub *hub);
+void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
+
+struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int wgrp,
+ unsigned int index);
+
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state);
+void tegra_display_hub_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state);
+
+#define DC_CMD_IHUB_COMMON_MISC_CTL 0x068
+#define LATENCY_EVENT (1 << 3)
+
+#define DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER 0x451
+#define CURS_SLOTS(x) (((x) & 0xff) << 8)
+#define WGRP_SLOTS(x) (((x) & 0xff) << 0)
+
+#endif /* TEGRA_HUB_H */
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 1cfbace..ffe34bd 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -9,7 +9,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+
#include "drm.h"
+#include "dc.h"
#include <media/cec-notifier.h>
@@ -39,7 +41,6 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
if (edid) {
err = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
kfree(edid);
}
@@ -219,3 +220,25 @@ void tegra_output_exit(struct tegra_output *output)
if (output->panel)
drm_panel_detach(output->panel);
}
+
+void tegra_output_find_possible_crtcs(struct tegra_output *output,
+ struct drm_device *drm)
+{
+ struct device *dev = output->dev;
+ struct drm_crtc *crtc;
+ unsigned int mask = 0;
+
+ drm_for_each_crtc(crtc, drm) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (tegra_dc_has_output(dc, dev))
+ mask |= drm_crtc_mask(crtc);
+ }
+
+ if (mask == 0) {
+ dev_warn(dev, "missing output definition for heads in DT\n");
+ mask = 0x3;
+ }
+
+ output->encoder.possible_crtcs = mask;
+}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
new file mode 100644
index 0000000..176ef46
--- /dev/null
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "dc.h"
+#include "plane.h"
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(p);
+}
+
+static void tegra_plane_reset(struct drm_plane *plane)
+{
+ struct tegra_plane_state *state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ }
+}
+
+static struct drm_plane_state *
+tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
+ struct tegra_plane_state *copy;
+ unsigned int i;
+
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ copy->tiling = state->tiling;
+ copy->format = state->format;
+ copy->swap = state->swap;
+ copy->opaque = state->opaque;
+
+ for (i = 0; i < 3; i++)
+ copy->dependent[i] = state->dependent[i];
+
+ return &copy->base;
+}
+
+static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(state);
+}
+
+static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ const struct drm_format_info *info = drm_format_info(format);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (info->num_planes == 1)
+ return true;
+
+ return false;
+}
+
+const struct drm_plane_funcs tegra_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = tegra_plane_destroy,
+ .reset = tegra_plane_reset,
+ .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_plane_atomic_destroy_state,
+ .format_mod_supported = tegra_plane_format_mod_supported,
+};
+
+int tegra_plane_state_add(struct tegra_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct tegra_dc_state *tegra;
+ int err;
+
+ /* Propagate errors from allocation or locking failures. */
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /* Check plane state for visibility and calculate clipping bounds */
+ err = drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX, true, true);
+ if (err < 0)
+ return err;
+
+ tegra = to_dc_state(crtc_state);
+
+ tegra->planes |= WIN_A_ACT_REQ << plane->index;
+
+ return 0;
+}
+
+int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
+{
+ /* assume no swapping of fetched data */
+ if (swap)
+ *swap = BYTE_SWAP_NOSWAP;
+
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB4444:
+ *format = WIN_COLOR_DEPTH_B4G4R4A4;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *format = WIN_COLOR_DEPTH_B5G5R5A1;
+ break;
+
+ case DRM_FORMAT_RGB565:
+ *format = WIN_COLOR_DEPTH_B5G6R5;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *format = WIN_COLOR_DEPTH_A1B5G5R5;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ *format = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
+
+ case DRM_FORMAT_ABGR8888:
+ *format = WIN_COLOR_DEPTH_R8G8B8A8;
+ break;
+
+ case DRM_FORMAT_ABGR4444:
+ *format = WIN_COLOR_DEPTH_R4G4B4A4;
+ break;
+
+ case DRM_FORMAT_ABGR1555:
+ *format = WIN_COLOR_DEPTH_R5G5B5A;
+ break;
+
+ case DRM_FORMAT_BGRA5551:
+ *format = WIN_COLOR_DEPTH_AR5G5B5;
+ break;
+
+ case DRM_FORMAT_XRGB1555:
+ *format = WIN_COLOR_DEPTH_B5G5R5X1;
+ break;
+
+ case DRM_FORMAT_RGBX5551:
+ *format = WIN_COLOR_DEPTH_X1B5G5R5;
+ break;
+
+ case DRM_FORMAT_XBGR1555:
+ *format = WIN_COLOR_DEPTH_R5G5B5X1;
+ break;
+
+ case DRM_FORMAT_BGRX5551:
+ *format = WIN_COLOR_DEPTH_X1R5G5B5;
+ break;
+
+ case DRM_FORMAT_BGR565:
+ *format = WIN_COLOR_DEPTH_R5G6B5;
+ break;
+
+ case DRM_FORMAT_BGRA8888:
+ *format = WIN_COLOR_DEPTH_A8R8G8B8;
+ break;
+
+ case DRM_FORMAT_RGBA8888:
+ *format = WIN_COLOR_DEPTH_A8B8G8R8;
+ break;
+
+ case DRM_FORMAT_XRGB8888:
+ *format = WIN_COLOR_DEPTH_B8G8R8X8;
+ break;
+
+ case DRM_FORMAT_XBGR8888:
+ *format = WIN_COLOR_DEPTH_R8G8B8X8;
+ break;
+
+ case DRM_FORMAT_UYVY:
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ break;
+
+ case DRM_FORMAT_YUYV:
+ if (!swap)
+ return -EINVAL;
+
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ *swap = BYTE_SWAP_SWAP2;
+ break;
+
+ case DRM_FORMAT_YUV420:
+ *format = WIN_COLOR_DEPTH_YCbCr420P;
+ break;
+
+ case DRM_FORMAT_YUV422:
+ *format = WIN_COLOR_DEPTH_YCbCr422P;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool tegra_plane_format_is_yuv(unsigned int format, bool *planar)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planar)
+ *planar = false;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ if (planar)
+ *planar = true;
+
+ return true;
+ }
+
+ if (planar)
+ *planar = false;
+
+ return false;
+}
+
+static bool __drm_format_has_alpha(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
+ * be emulated using the alpha formats and alpha blending disabled.
+ */
+bool tegra_plane_format_has_alpha(unsigned int format)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_B5G5R5A1:
+ case WIN_COLOR_DEPTH_A1B5G5R5:
+ case WIN_COLOR_DEPTH_R8G8B8A8:
+ case WIN_COLOR_DEPTH_B8G8R8A8:
+ return true;
+ }
+
+ return false;
+}
+
+int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
+{
+ if (tegra_plane_format_is_yuv(opaque, NULL)) {
+ *alpha = opaque;
+ return 0;
+ }
+
+ switch (opaque) {
+ case WIN_COLOR_DEPTH_B5G5R5X1:
+ *alpha = WIN_COLOR_DEPTH_B5G5R5A1;
+ return 0;
+
+ case WIN_COLOR_DEPTH_X1B5G5R5:
+ *alpha = WIN_COLOR_DEPTH_A1B5G5R5;
+ return 0;
+
+ case WIN_COLOR_DEPTH_R8G8B8X8:
+ *alpha = WIN_COLOR_DEPTH_R8G8B8A8;
+ return 0;
+
+ case WIN_COLOR_DEPTH_B8G8R8X8:
+ *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
+ return 0;
+
+ case WIN_COLOR_DEPTH_B5G6R5:
+ *alpha = opaque;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
+ struct tegra_plane *other)
+{
+ unsigned int index = 0, i;
+
+ WARN_ON(plane == other);
+
+ for (i = 0; i < 3; i++) {
+ if (i == plane->index)
+ continue;
+
+ if (i == other->index)
+ break;
+
+ index++;
+ }
+
+ return index;
+}
+
+void tegra_plane_check_dependent(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ struct drm_plane_state *old, *new;
+ struct drm_plane *plane;
+ unsigned int zpos[2];
+ unsigned int i;
+
+ for (i = 0; i < 2; i++)
+ zpos[i] = 0;
+
+ for_each_oldnew_plane_in_state(state->base.state, plane, old, new, i) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned index;
+
+ /* skip this plane and planes on different CRTCs */
+ if (p == tegra || new->crtc != state->base.crtc)
+ continue;
+
+ index = tegra_plane_get_overlap_index(tegra, p);
+
+ state->dependent[index] = false;
+
+ /*
+ * If any of the other planes is on top of this plane and uses
+ * a format with an alpha component, mark this plane as being
+ * dependent, meaning it's alpha value will be 1 minus the sum
+ * of alpha components of the overlapping planes.
+ */
+ if (p->index > tegra->index) {
+ if (__drm_format_has_alpha(new->fb->format->format))
+ state->dependent[index] = true;
+
+ /* keep track of the Z position */
+ zpos[index] = p->index;
+ }
+ }
+
+ /*
+ * The region where three windows overlap is the intersection of the
+ * two regions where two windows overlap. It contributes to the area
+ * if any of the windows on top of it have an alpha component.
+ */
+ for (i = 0; i < 2; i++)
+ state->dependent[2] = state->dependent[2] ||
+ state->dependent[i];
+
+ /*
+ * However, if any of the windows on top of this window is opaque, it
+ * will completely conceal this window within that area, so avoid the
+ * window from contributing to the area.
+ */
+ for (i = 0; i < 2; i++) {
+ if (zpos[i] > tegra->index)
+ state->dependent[2] = state->dependent[2] &&
+ state->dependent[i];
+ }
+}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
new file mode 100644
index 0000000..6938719
--- /dev/null
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_PLANE_H
+#define TEGRA_PLANE_H 1
+
+#include <drm/drm_plane.h>
+
+struct tegra_bo;
+struct tegra_dc;
+
+struct tegra_plane {
+ struct drm_plane base;
+ struct tegra_dc *dc;
+ unsigned int offset;
+ unsigned int index;
+};
+
+struct tegra_cursor {
+ struct tegra_plane base;
+
+ struct tegra_bo *bo;
+ unsigned int width;
+ unsigned int height;
+};
+
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_plane, base);
+}
+
+struct tegra_plane_state {
+ struct drm_plane_state base;
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+
+ /* used for legacy blending support only */
+ bool opaque;
+ bool dependent[3];
+};
+
+static inline struct tegra_plane_state *
+to_tegra_plane_state(struct drm_plane_state *state)
+{
+ if (state)
+ return container_of(state, struct tegra_plane_state, base);
+
+ return NULL;
+}
+
+extern const struct drm_plane_funcs tegra_plane_funcs;
+
+int tegra_plane_state_add(struct tegra_plane *plane,
+ struct drm_plane_state *state);
+
+int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap);
+bool tegra_plane_format_is_yuv(unsigned int format, bool *planar);
+bool tegra_plane_format_has_alpha(unsigned int format);
+int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha);
+void tegra_plane_check_dependent(struct tegra_plane *tegra,
+ struct tegra_plane_state *state);
+
+#endif /* TEGRA_PLANE_H */
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 476079f..7d2a955 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -22,23 +22,37 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_scdc_helper.h>
#include "dc.h"
#include "drm.h"
#include "sor.h"
#include "trace.h"
+/*
+ * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
+ * merged. Having this around after the commit is merged should be safe since
+ * the preprocessor will effectively replace all occurrences and therefore no
+ * duplicate will be defined.
+ */
+#define TEGRA_IO_PAD_HDMI_DP0 26
+
#define SOR_REKEY 0x38
struct tegra_sor_hdmi_settings {
unsigned long frequency;
u8 vcocap;
+ u8 filter;
u8 ichpmp;
u8 loadadj;
- u8 termadj;
- u8 tx_pu;
- u8 bg_vref;
+ u8 tmds_termadj;
+ u8 tx_pu_value;
+ u8 bg_temp_coef;
+ u8 bg_vref_level;
+ u8 avdd10_level;
+ u8 avdd14_level;
+ u8 sparepll;
u8 drive_current[4];
u8 preemphasis[4];
@@ -49,51 +63,76 @@ static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
{
.frequency = 54000000,
.vcocap = 0x0,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x10,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x10,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 75000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x40,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x40,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x66,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 300000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x66,
- .bg_vref = 0xa,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xa,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
.preemphasis = { 0x00, 0x17, 0x17, 0x17 },
}, {
.frequency = 600000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x66,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
},
@@ -103,53 +142,170 @@ static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
{
.frequency = 75000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x40,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x40,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x29, 0x29, 0x29, 0x29 },
.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
}, {
.frequency = 150000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x1,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x66,
- .bg_vref = 0x8,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x30, 0x37, 0x37, 0x37 },
.preemphasis = { 0x01, 0x02, 0x02, 0x02 },
}, {
.frequency = 300000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0x6,
.loadadj = 0x3,
- .termadj = 0x9,
- .tx_pu = 0x66,
- .bg_vref = 0xf,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xf,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x30, 0x37, 0x37, 0x37 },
.preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
}, {
.frequency = 600000000,
.vcocap = 0x3,
+ .filter = 0x0,
.ichpmp = 0xa,
.loadadj = 0x3,
- .termadj = 0xb,
- .tx_pu = 0x66,
- .bg_vref = 0xe,
+ .tmds_termadj = 0xb,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xe,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
.drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
.preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
},
};
#endif
+static const struct tegra_sor_hdmi_settings tegra186_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x54,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 1,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x44,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 0x66 /* 0 */,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x00, /* 0x34 */
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 64,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 12,
+ .tx_pu_value = 96,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }
+};
+
+struct tegra_sor_regs {
+ unsigned int head_state0;
+ unsigned int head_state1;
+ unsigned int head_state2;
+ unsigned int head_state3;
+ unsigned int head_state4;
+ unsigned int head_state5;
+ unsigned int pll0;
+ unsigned int pll1;
+ unsigned int pll2;
+ unsigned int pll3;
+ unsigned int dp_padctl0;
+ unsigned int dp_padctl2;
+};
+
struct tegra_sor_soc {
bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ const struct tegra_sor_regs *regs;
+ bool has_nvdisplay;
+
const struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
@@ -171,6 +327,7 @@ struct tegra_sor {
const struct tegra_sor_soc *soc;
void __iomem *regs;
+ unsigned int index;
struct reset_control *rst;
struct clk *clk_parent;
@@ -183,10 +340,9 @@ struct tegra_sor {
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
const struct tegra_sor_ops *ops;
+ enum tegra_io_pad pad;
/* for HDMI 2.0 */
struct tegra_sor_hdmi_settings *settings;
@@ -195,11 +351,16 @@ struct tegra_sor {
struct regulator *avdd_io_supply;
struct regulator *vdd_pll_supply;
struct regulator *hdmi_supply;
+
+ struct delayed_work scdc;
+ bool scdc_enabled;
};
struct tegra_sor_state {
struct drm_connector_state base;
+ unsigned int link_speed;
+ unsigned long pclk;
unsigned int bpc;
};
@@ -389,23 +550,23 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
/* disable LVDS mode */
tegra_sor_writel(sor, 0, SOR_LVDS);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
usleep_range(10, 100);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
if (err < 0)
@@ -465,47 +626,6 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
return 0;
}
-static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
-{
- u32 mask = 0x08, adj = 0, value;
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
-
- value = tegra_sor_readl(sor, SOR_PLL1);
- value |= SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, SOR_PLL1);
-
- while (mask) {
- adj |= mask;
-
- value = tegra_sor_readl(sor, SOR_PLL1);
- value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
- value |= SOR_PLL1_TMDS_TERMADJ(adj);
- tegra_sor_writel(sor, value, SOR_PLL1);
-
- usleep_range(100, 200);
-
- value = tegra_sor_readl(sor, SOR_PLL1);
- if (value & SOR_PLL1_TERM_COMPOUT)
- adj &= ~mask;
-
- mask >>= 1;
- }
-
- value = tegra_sor_readl(sor, SOR_PLL1);
- value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
- value |= SOR_PLL1_TMDS_TERMADJ(adj);
- tegra_sor_writel(sor, value, SOR_PLL1);
-
- /* disable pad calibration logic */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
-}
-
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -897,31 +1017,31 @@ static void tegra_sor_mode_set(struct tegra_sor *sor,
*/
value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state1 + dc->pipe);
/* sync end = sync width - 1 */
vse = mode->vsync_end - mode->vsync_start - 1;
hse = mode->hsync_end - mode->hsync_start - 1;
value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state2 + dc->pipe);
/* blank end = sync end + back porch */
vbe = vse + (mode->vtotal - mode->vsync_end);
hbe = hse + (mode->htotal - mode->hsync_end);
value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state3 + dc->pipe);
/* blank start = blank end + active */
vbs = vbe + mode->vdisplay;
hbs = hbe + mode->hdisplay;
value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state4 + dc->pipe);
/* XXX interlacing support */
- tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+ tegra_sor_writel(sor, 0x001, sor->soc->regs->head_state5 + dc->pipe);
}
static int tegra_sor_detach(struct tegra_sor *sor)
@@ -1003,10 +1123,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* stop lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -1026,20 +1146,20 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD;
value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
@@ -1105,12 +1225,133 @@ unlock:
return err;
}
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_sor_regs[] = {
+ DEBUGFS_REG32(SOR_CTXSW),
+ DEBUGFS_REG32(SOR_SUPER_STATE0),
+ DEBUGFS_REG32(SOR_SUPER_STATE1),
+ DEBUGFS_REG32(SOR_STATE0),
+ DEBUGFS_REG32(SOR_STATE1),
+ DEBUGFS_REG32(SOR_HEAD_STATE0(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE0(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE1(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE1(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE2(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE2(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE3(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE3(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE4(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE4(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE5(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE5(1)),
+ DEBUGFS_REG32(SOR_CRC_CNTRL),
+ DEBUGFS_REG32(SOR_DP_DEBUG_MVID),
+ DEBUGFS_REG32(SOR_CLK_CNTRL),
+ DEBUGFS_REG32(SOR_CAP),
+ DEBUGFS_REG32(SOR_PWR),
+ DEBUGFS_REG32(SOR_TEST),
+ DEBUGFS_REG32(SOR_PLL0),
+ DEBUGFS_REG32(SOR_PLL1),
+ DEBUGFS_REG32(SOR_PLL2),
+ DEBUGFS_REG32(SOR_PLL3),
+ DEBUGFS_REG32(SOR_CSTM),
+ DEBUGFS_REG32(SOR_LVDS),
+ DEBUGFS_REG32(SOR_CRCA),
+ DEBUGFS_REG32(SOR_CRCB),
+ DEBUGFS_REG32(SOR_BLANK),
+ DEBUGFS_REG32(SOR_SEQ_CTL),
+ DEBUGFS_REG32(SOR_LANE_SEQ_CTL),
+ DEBUGFS_REG32(SOR_SEQ_INST(0)),
+ DEBUGFS_REG32(SOR_SEQ_INST(1)),
+ DEBUGFS_REG32(SOR_SEQ_INST(2)),
+ DEBUGFS_REG32(SOR_SEQ_INST(3)),
+ DEBUGFS_REG32(SOR_SEQ_INST(4)),
+ DEBUGFS_REG32(SOR_SEQ_INST(5)),
+ DEBUGFS_REG32(SOR_SEQ_INST(6)),
+ DEBUGFS_REG32(SOR_SEQ_INST(7)),
+ DEBUGFS_REG32(SOR_SEQ_INST(8)),
+ DEBUGFS_REG32(SOR_SEQ_INST(9)),
+ DEBUGFS_REG32(SOR_SEQ_INST(10)),
+ DEBUGFS_REG32(SOR_SEQ_INST(11)),
+ DEBUGFS_REG32(SOR_SEQ_INST(12)),
+ DEBUGFS_REG32(SOR_SEQ_INST(13)),
+ DEBUGFS_REG32(SOR_SEQ_INST(14)),
+ DEBUGFS_REG32(SOR_SEQ_INST(15)),
+ DEBUGFS_REG32(SOR_PWM_DIV),
+ DEBUGFS_REG32(SOR_PWM_CTL),
+ DEBUGFS_REG32(SOR_VCRC_A0),
+ DEBUGFS_REG32(SOR_VCRC_A1),
+ DEBUGFS_REG32(SOR_VCRC_B0),
+ DEBUGFS_REG32(SOR_VCRC_B1),
+ DEBUGFS_REG32(SOR_CCRC_A0),
+ DEBUGFS_REG32(SOR_CCRC_A1),
+ DEBUGFS_REG32(SOR_CCRC_B0),
+ DEBUGFS_REG32(SOR_CCRC_B1),
+ DEBUGFS_REG32(SOR_EDATA_A0),
+ DEBUGFS_REG32(SOR_EDATA_A1),
+ DEBUGFS_REG32(SOR_EDATA_B0),
+ DEBUGFS_REG32(SOR_EDATA_B1),
+ DEBUGFS_REG32(SOR_COUNT_A0),
+ DEBUGFS_REG32(SOR_COUNT_A1),
+ DEBUGFS_REG32(SOR_COUNT_B0),
+ DEBUGFS_REG32(SOR_COUNT_B1),
+ DEBUGFS_REG32(SOR_DEBUG_A0),
+ DEBUGFS_REG32(SOR_DEBUG_A1),
+ DEBUGFS_REG32(SOR_DEBUG_B0),
+ DEBUGFS_REG32(SOR_DEBUG_B1),
+ DEBUGFS_REG32(SOR_TRIG),
+ DEBUGFS_REG32(SOR_MSCHECK),
+ DEBUGFS_REG32(SOR_XBAR_CTRL),
+ DEBUGFS_REG32(SOR_XBAR_POL),
+ DEBUGFS_REG32(SOR_DP_LINKCTL0),
+ DEBUGFS_REG32(SOR_DP_LINKCTL1),
+ DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT0),
+ DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT1),
+ DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT0),
+ DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT1),
+ DEBUGFS_REG32(SOR_LANE_PREEMPHASIS0),
+ DEBUGFS_REG32(SOR_LANE_PREEMPHASIS1),
+ DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS0),
+ DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS1),
+ DEBUGFS_REG32(SOR_LANE_POSTCURSOR0),
+ DEBUGFS_REG32(SOR_LANE_POSTCURSOR1),
+ DEBUGFS_REG32(SOR_DP_CONFIG0),
+ DEBUGFS_REG32(SOR_DP_CONFIG1),
+ DEBUGFS_REG32(SOR_DP_MN0),
+ DEBUGFS_REG32(SOR_DP_MN1),
+ DEBUGFS_REG32(SOR_DP_PADCTL0),
+ DEBUGFS_REG32(SOR_DP_PADCTL1),
+ DEBUGFS_REG32(SOR_DP_PADCTL2),
+ DEBUGFS_REG32(SOR_DP_DEBUG0),
+ DEBUGFS_REG32(SOR_DP_DEBUG1),
+ DEBUGFS_REG32(SOR_DP_SPARE0),
+ DEBUGFS_REG32(SOR_DP_SPARE1),
+ DEBUGFS_REG32(SOR_DP_AUDIO_CTRL),
+ DEBUGFS_REG32(SOR_DP_AUDIO_HBLANK_SYMBOLS),
+ DEBUGFS_REG32(SOR_DP_AUDIO_VBLANK_SYMBOLS),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_HEADER),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK0),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK1),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK2),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK3),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK4),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK5),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK6),
+ DEBUGFS_REG32(SOR_DP_TPG),
+ DEBUGFS_REG32(SOR_DP_TPG_CONFIG),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM0),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM1),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM2),
+};
+
static int tegra_sor_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_sor *sor = node->info_ent->data;
struct drm_crtc *crtc = sor->output.encoder.crtc;
struct drm_device *drm = node->minor->dev;
+ unsigned int i;
int err = 0;
drm_modeset_lock_all(drm);
@@ -1120,126 +1361,12 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data)
goto unlock;
}
-#define DUMP_REG(name) \
- seq_printf(s, "%-38s %#05x %08x\n", #name, name, \
- tegra_sor_readl(sor, name))
-
- DUMP_REG(SOR_CTXSW);
- DUMP_REG(SOR_SUPER_STATE0);
- DUMP_REG(SOR_SUPER_STATE1);
- DUMP_REG(SOR_STATE0);
- DUMP_REG(SOR_STATE1);
- DUMP_REG(SOR_HEAD_STATE0(0));
- DUMP_REG(SOR_HEAD_STATE0(1));
- DUMP_REG(SOR_HEAD_STATE1(0));
- DUMP_REG(SOR_HEAD_STATE1(1));
- DUMP_REG(SOR_HEAD_STATE2(0));
- DUMP_REG(SOR_HEAD_STATE2(1));
- DUMP_REG(SOR_HEAD_STATE3(0));
- DUMP_REG(SOR_HEAD_STATE3(1));
- DUMP_REG(SOR_HEAD_STATE4(0));
- DUMP_REG(SOR_HEAD_STATE4(1));
- DUMP_REG(SOR_HEAD_STATE5(0));
- DUMP_REG(SOR_HEAD_STATE5(1));
- DUMP_REG(SOR_CRC_CNTRL);
- DUMP_REG(SOR_DP_DEBUG_MVID);
- DUMP_REG(SOR_CLK_CNTRL);
- DUMP_REG(SOR_CAP);
- DUMP_REG(SOR_PWR);
- DUMP_REG(SOR_TEST);
- DUMP_REG(SOR_PLL0);
- DUMP_REG(SOR_PLL1);
- DUMP_REG(SOR_PLL2);
- DUMP_REG(SOR_PLL3);
- DUMP_REG(SOR_CSTM);
- DUMP_REG(SOR_LVDS);
- DUMP_REG(SOR_CRCA);
- DUMP_REG(SOR_CRCB);
- DUMP_REG(SOR_BLANK);
- DUMP_REG(SOR_SEQ_CTL);
- DUMP_REG(SOR_LANE_SEQ_CTL);
- DUMP_REG(SOR_SEQ_INST(0));
- DUMP_REG(SOR_SEQ_INST(1));
- DUMP_REG(SOR_SEQ_INST(2));
- DUMP_REG(SOR_SEQ_INST(3));
- DUMP_REG(SOR_SEQ_INST(4));
- DUMP_REG(SOR_SEQ_INST(5));
- DUMP_REG(SOR_SEQ_INST(6));
- DUMP_REG(SOR_SEQ_INST(7));
- DUMP_REG(SOR_SEQ_INST(8));
- DUMP_REG(SOR_SEQ_INST(9));
- DUMP_REG(SOR_SEQ_INST(10));
- DUMP_REG(SOR_SEQ_INST(11));
- DUMP_REG(SOR_SEQ_INST(12));
- DUMP_REG(SOR_SEQ_INST(13));
- DUMP_REG(SOR_SEQ_INST(14));
- DUMP_REG(SOR_SEQ_INST(15));
- DUMP_REG(SOR_PWM_DIV);
- DUMP_REG(SOR_PWM_CTL);
- DUMP_REG(SOR_VCRC_A0);
- DUMP_REG(SOR_VCRC_A1);
- DUMP_REG(SOR_VCRC_B0);
- DUMP_REG(SOR_VCRC_B1);
- DUMP_REG(SOR_CCRC_A0);
- DUMP_REG(SOR_CCRC_A1);
- DUMP_REG(SOR_CCRC_B0);
- DUMP_REG(SOR_CCRC_B1);
- DUMP_REG(SOR_EDATA_A0);
- DUMP_REG(SOR_EDATA_A1);
- DUMP_REG(SOR_EDATA_B0);
- DUMP_REG(SOR_EDATA_B1);
- DUMP_REG(SOR_COUNT_A0);
- DUMP_REG(SOR_COUNT_A1);
- DUMP_REG(SOR_COUNT_B0);
- DUMP_REG(SOR_COUNT_B1);
- DUMP_REG(SOR_DEBUG_A0);
- DUMP_REG(SOR_DEBUG_A1);
- DUMP_REG(SOR_DEBUG_B0);
- DUMP_REG(SOR_DEBUG_B1);
- DUMP_REG(SOR_TRIG);
- DUMP_REG(SOR_MSCHECK);
- DUMP_REG(SOR_XBAR_CTRL);
- DUMP_REG(SOR_XBAR_POL);
- DUMP_REG(SOR_DP_LINKCTL0);
- DUMP_REG(SOR_DP_LINKCTL1);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
- DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
- DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
- DUMP_REG(SOR_LANE_PREEMPHASIS0);
- DUMP_REG(SOR_LANE_PREEMPHASIS1);
- DUMP_REG(SOR_LANE4_PREEMPHASIS0);
- DUMP_REG(SOR_LANE4_PREEMPHASIS1);
- DUMP_REG(SOR_LANE_POSTCURSOR0);
- DUMP_REG(SOR_LANE_POSTCURSOR1);
- DUMP_REG(SOR_DP_CONFIG0);
- DUMP_REG(SOR_DP_CONFIG1);
- DUMP_REG(SOR_DP_MN0);
- DUMP_REG(SOR_DP_MN1);
- DUMP_REG(SOR_DP_PADCTL0);
- DUMP_REG(SOR_DP_PADCTL1);
- DUMP_REG(SOR_DP_DEBUG0);
- DUMP_REG(SOR_DP_DEBUG1);
- DUMP_REG(SOR_DP_SPARE0);
- DUMP_REG(SOR_DP_SPARE1);
- DUMP_REG(SOR_DP_AUDIO_CTRL);
- DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
- DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
- DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
- DUMP_REG(SOR_DP_TPG);
- DUMP_REG(SOR_DP_TPG_CONFIG);
- DUMP_REG(SOR_DP_LQ_CSTM0);
- DUMP_REG(SOR_DP_LQ_CSTM1);
- DUMP_REG(SOR_DP_LQ_CSTM2);
-
-#undef DUMP_REG
+ for (i = 0; i < ARRAY_SIZE(tegra_sor_regs); i++) {
+ unsigned int offset = tegra_sor_regs[i].offset;
+
+ seq_printf(s, "%-38s %#05x %08x\n", tegra_sor_regs[i].name,
+ offset, tegra_sor_readl(sor, offset));
+ }
unlock:
drm_modeset_unlock_all(drm);
@@ -1251,57 +1378,46 @@ static const struct drm_info_list debugfs_files[] = {
{ "regs", tegra_sor_show_regs, 0, NULL },
};
-static int tegra_sor_debugfs_init(struct tegra_sor *sor,
- struct drm_minor *minor)
+static int tegra_sor_late_register(struct drm_connector *connector)
{
- const char *name = sor->soc->supports_dp ? "sor1" : "sor";
- unsigned int i;
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_sor *sor = to_sor(output);
int err;
- sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
- if (!sor->debugfs)
- return -ENOMEM;
-
sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
- if (!sor->debugfs_files) {
- err = -ENOMEM;
- goto remove;
- }
+ if (!sor->debugfs_files)
+ return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ for (i = 0; i < count; i++)
sor->debugfs_files[i].data = sor;
- err = drm_debugfs_create_files(sor->debugfs_files,
- ARRAY_SIZE(debugfs_files),
- sor->debugfs, minor);
+ err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
if (err < 0)
goto free;
- sor->minor = minor;
-
return 0;
free:
kfree(sor->debugfs_files);
sor->debugfs_files = NULL;
-remove:
- debugfs_remove_recursive(sor->debugfs);
- sor->debugfs = NULL;
+
return err;
}
-static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
+static void tegra_sor_early_unregister(struct drm_connector *connector)
{
- drm_debugfs_remove_files(sor->debugfs_files, ARRAY_SIZE(debugfs_files),
- sor->minor);
- sor->minor = NULL;
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_sor *sor = to_sor(output);
+ drm_debugfs_remove_files(sor->debugfs_files, count,
+ connector->dev->primary);
kfree(sor->debugfs_files);
sor->debugfs_files = NULL;
-
- debugfs_remove_recursive(sor->debugfs);
- sor->debugfs = NULL;
}
static void tegra_sor_connector_reset(struct drm_connector *connector)
@@ -1354,6 +1470,8 @@ static const struct drm_connector_funcs tegra_sor_connector_funcs = {
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = tegra_sor_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_sor_late_register,
+ .early_unregister = tegra_sor_early_unregister,
};
static int tegra_sor_connector_get_modes(struct drm_connector *connector)
@@ -1377,10 +1495,6 @@ static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- /* HDMI 2.0 modes are not yet supported */
- if (mode->clock > 340000)
- return MODE_NOCLOCK;
-
return MODE_OK;
}
@@ -1417,7 +1531,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
*/
if (dc) {
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE;
+ value &= ~SOR_ENABLE(0);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
@@ -1433,9 +1547,9 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
dev_err(sor->dev, "failed to disable DP: %d\n", err);
}
- err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+ err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
- dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
if (output->panel)
drm_panel_unprepare(output->panel);
@@ -1533,40 +1647,40 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL3);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, SOR_PLL3);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD;
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, SOR_PLL1);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
while (true) {
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
break;
usleep_range(250, 1000);
}
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
/*
* power up
@@ -1579,49 +1693,49 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* step 1 */
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- value = tegra_sor_readl(sor, SOR_PLL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* step 2 */
- err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
+ err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
- dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
+ dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
usleep_range(5, 100);
/* step 3 */
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
/* step 4 */
- value = tegra_sor_readl(sor, SOR_PLL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(200, 1000);
/* step 5 */
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
@@ -1637,7 +1751,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
/* power DP lanes */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
if (link.num_lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1654,7 +1768,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
else
value |= SOR_DP_PADCTL_PD_TXD_0;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
@@ -1698,9 +1812,9 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_DP_TPG);
/* enable pad calibration logic */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
err = drm_dp_link_probe(sor->aux, &link);
if (err < 0)
@@ -1773,7 +1887,7 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
tegra_sor_update(sor);
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE;
+ value |= SOR_ENABLE(0);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
@@ -1805,6 +1919,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
info = &output->connector.display_info;
+ /*
+ * For HBR2 modes, the SOR brick needs to use the x20 multiplier, so
+ * the pixel clock must be corrected accordingly.
+ */
+ if (pclk >= 340000000) {
+ state->link_speed = 20;
+ state->pclk = pclk / 2;
+ } else {
+ state->link_speed = 10;
+ state->pclk = pclk;
+ }
+
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
@@ -1955,6 +2081,81 @@ tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
return NULL;
}
+static void tegra_sor_hdmi_disable_scrambling(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
+ value &= ~SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
+ value &= ~SOR_HDMI2_CTRL_SCRAMBLE;
+ tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
+}
+
+static void tegra_sor_hdmi_scdc_disable(struct tegra_sor *sor)
+{
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ drm_scdc_set_high_tmds_clock_ratio(ddc, false);
+ drm_scdc_set_scrambling(ddc, false);
+
+ tegra_sor_hdmi_disable_scrambling(sor);
+}
+
+static void tegra_sor_hdmi_scdc_stop(struct tegra_sor *sor)
+{
+ if (sor->scdc_enabled) {
+ cancel_delayed_work_sync(&sor->scdc);
+ tegra_sor_hdmi_scdc_disable(sor);
+ }
+}
+
+static void tegra_sor_hdmi_enable_scrambling(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
+ value |= SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
+ value |= SOR_HDMI2_CTRL_SCRAMBLE;
+ tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
+}
+
+static void tegra_sor_hdmi_scdc_enable(struct tegra_sor *sor)
+{
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ drm_scdc_set_high_tmds_clock_ratio(ddc, true);
+ drm_scdc_set_scrambling(ddc, true);
+
+ tegra_sor_hdmi_enable_scrambling(sor);
+}
+
+static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
+{
+ struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ if (!drm_scdc_get_scrambling_status(ddc)) {
+ DRM_DEBUG_KMS("SCDC not scrambled\n");
+ tegra_sor_hdmi_scdc_enable(sor);
+ }
+
+ schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
+}
+
+static void tegra_sor_hdmi_scdc_start(struct tegra_sor *sor)
+{
+ struct drm_scdc *scdc = &sor->output.connector.display_info.hdmi.scdc;
+ struct drm_display_mode *mode;
+
+ mode = &sor->output.encoder.crtc->state->adjusted_mode;
+
+ if (mode->clock >= 340000 && scdc->supported) {
+ schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
+ tegra_sor_hdmi_scdc_enable(sor);
+ sor->scdc_enabled = true;
+ }
+}
+
static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -1963,6 +2164,8 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_sor_hdmi_scdc_stop(sor);
+
err = tegra_sor_detach(sor);
if (err < 0)
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
@@ -1972,8 +2175,12 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
/* disable display to SOR clock */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR1_TIMING_CYA;
- value &= ~SOR1_ENABLE;
+
+ if (!sor->soc->has_nvdisplay)
+ value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
+ else
+ value &= ~SOR_ENABLE(sor->index);
+
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
@@ -1982,9 +2189,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
- err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+ err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
- dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
pm_runtime_put(sor->dev);
}
@@ -1998,12 +2205,14 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_state *state;
struct drm_display_mode *mode;
+ unsigned long rate, pclk;
unsigned int div, i;
u32 value;
int err;
state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
+ pclk = mode->clock * 1000;
pm_runtime_get_sync(sor->dev);
@@ -2016,44 +2225,44 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
div = clk_get_rate(sor->clk) / 1000000 * 4;
- err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+ err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
- dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+ dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_PLL3);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, SOR_PLL3);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
- value = tegra_sor_readl(sor, SOR_PLL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(200, 400);
- value = tegra_sor_readl(sor, SOR_PLL2);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, SOR_PLL2);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
@@ -2079,18 +2288,30 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- if (mode->clock < 340000)
+ if (mode->clock < 340000) {
+ DRM_DEBUG_KMS("setting 2.7 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
- else
+ } else {
+ DRM_DEBUG_KMS("setting 5.4 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+ }
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+ /* SOR pad PLL stabilization time */
+ usleep_range(250, 1000);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(4);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_SEQ_ENABLE;
+ value &= ~SOR_DP_SPARE_SEQ_ENABLE;
+ value &= ~SOR_DP_SPARE_MACRO_SOR_CLK;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
@@ -2102,9 +2323,11 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
- /* program the reference clock */
- value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
- tegra_sor_writel(sor, value, SOR_REFCLK);
+ if (!sor->soc->has_nvdisplay) {
+ /* program the reference clock */
+ value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+ tegra_sor_writel(sor, value, SOR_REFCLK);
+ }
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
@@ -2127,13 +2350,25 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
return;
}
- value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+ /* adjust clock rate for HDMI 2.0 modes */
+ rate = clk_get_rate(sor->clk_parent);
- /* XXX is this the proper check? */
- if (mode->clock < 75000)
- value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+ if (mode->clock >= 340000)
+ rate /= 2;
- tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+ DRM_DEBUG_KMS("setting clock to %lu Hz, mode: %lu Hz\n", rate, pclk);
+
+ clk_set_rate(sor->clk, rate);
+
+ if (!sor->soc->has_nvdisplay) {
+ value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+ /* XXX is this the proper check? */
+ if (mode->clock < 75000)
+ value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+ tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+ }
max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
@@ -2141,20 +2376,23 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
- /* H_PULSE2 setup */
- pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
- (mode->htotal - mode->hsync_end) - 10;
+ if (!dc->soc->has_nvdisplay) {
+ /* H_PULSE2 setup */
+ pulse_start = h_ref_to_sync +
+ (mode->hsync_end - mode->hsync_start) +
+ (mode->htotal - mode->hsync_end) - 10;
- value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
- PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
- tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+ value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+ PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
- value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
- tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+ value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
- value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
- value |= H_PULSE2_ENABLE;
- tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ value |= H_PULSE2_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ }
/* infoframe setup */
err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
@@ -2171,9 +2409,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_STATE1);
/* power up pad calibration */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* production settings */
settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
@@ -2183,51 +2421,68 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
return;
}
- value = tegra_sor_readl(sor, SOR_PLL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_FILTER_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+ value |= SOR_PLL0_FILTER(settings->filter);
value |= SOR_PLL0_VCOCAP(settings->vcocap);
- tegra_sor_writel(sor, value, SOR_PLL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
- tegra_sor_dp_term_calibrate(sor);
-
- value = tegra_sor_readl(sor, SOR_PLL1);
+ /* XXX not in TRM */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_LOADADJ(settings->loadadj);
- tegra_sor_writel(sor, value, SOR_PLL1);
+ value |= SOR_PLL1_TMDS_TERMADJ(settings->tmds_termadj);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- value = tegra_sor_readl(sor, SOR_PLL3);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value &= ~SOR_PLL3_BG_TEMP_COEF_MASK;
value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
- value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
- tegra_sor_writel(sor, value, SOR_PLL3);
-
- value = settings->drive_current[0] << 24 |
- settings->drive_current[1] << 16 |
- settings->drive_current[2] << 8 |
- settings->drive_current[3] << 0;
+ value &= ~SOR_PLL3_AVDD10_LEVEL_MASK;
+ value &= ~SOR_PLL3_AVDD14_LEVEL_MASK;
+ value |= SOR_PLL3_BG_TEMP_COEF(settings->bg_temp_coef);
+ value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref_level);
+ value |= SOR_PLL3_AVDD10_LEVEL(settings->avdd10_level);
+ value |= SOR_PLL3_AVDD14_LEVEL(settings->avdd14_level);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = settings->drive_current[3] << 24 |
+ settings->drive_current[2] << 16 |
+ settings->drive_current[1] << 8 |
+ settings->drive_current[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
- value = settings->preemphasis[0] << 24 |
- settings->preemphasis[1] << 16 |
- settings->preemphasis[2] << 8 |
- settings->preemphasis[3] << 0;
+ value = settings->preemphasis[3] << 24 |
+ settings->preemphasis[2] << 16 |
+ settings->preemphasis[1] << 8 |
+ settings->preemphasis[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu_value);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl2);
+ value &= ~SOR_DP_PADCTL_SPAREPLL_MASK;
+ value |= SOR_DP_PADCTL_SPAREPLL(settings->sparepll);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl2);
/* power down pad calibration */
- value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- /* miscellaneous display controller settings */
- value = VSYNC_H_POSITION(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+ if (!dc->soc->has_nvdisplay) {
+ /* miscellaneous display controller settings */
+ value = VSYNC_H_POSITION(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+ }
value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
value &= ~DITHER_CONTROL_MASK;
@@ -2242,6 +2497,14 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value |= BASE_COLOR_SIZE_888;
break;
+ case 10:
+ value |= BASE_COLOR_SIZE_101010;
+ break;
+
+ case 12:
+ value |= BASE_COLOR_SIZE_121212;
+ break;
+
default:
WARN(1, "%u bits-per-color not supported\n", state->bpc);
value |= BASE_COLOR_SIZE_888;
@@ -2250,40 +2513,65 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+ /* XXX set display head owner */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ value |= SOR_STATE_ASY_OWNER(1 + dc->pipe);
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
/* configure dynamic range of output */
- value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
- tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
/* configure colorspace */
- value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+ value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
- tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
tegra_sor_mode_set(sor, mode, state);
tegra_sor_update(sor);
+ /* program preamble timing in SOR (XXX) */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
/* enable display to SOR clock and generate HDMI preamble */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+
+ if (!sor->soc->has_nvdisplay)
+ value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
+ else
+ value |= SOR_ENABLE(sor->index);
+
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ if (dc->soc->has_nvdisplay) {
+ value = tegra_dc_readl(dc, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
+ value &= ~PROTOCOL_MASK;
+ value |= PROTOCOL_SINGLE_TMDS_A;
+ tegra_dc_writel(dc, value, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
+ }
+
tegra_dc_commit(dc);
err = tegra_sor_wakeup(sor);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ tegra_sor_hdmi_scdc_start(sor);
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2344,13 +2632,7 @@ static int tegra_sor_init(struct host1x_client *client)
return err;
}
- sor->output.encoder.possible_crtcs = 0x3;
-
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_sor_debugfs_init(sor, drm->primary);
- if (err < 0)
- dev_err(sor->dev, "debugfs setup failed: %d\n", err);
- }
+ tegra_output_find_possible_crtcs(&sor->output, drm);
if (sor->aux) {
err = drm_dp_aux_attach(sor->aux, &sor->output);
@@ -2420,9 +2702,6 @@ static int tegra_sor_exit(struct host1x_client *client)
clk_disable_unprepare(sor->clk_dp);
clk_disable_unprepare(sor->clk);
- if (IS_ENABLED(CONFIG_DEBUG_FS))
- tegra_sor_debugfs_exit(sor);
-
return 0;
}
@@ -2480,6 +2759,8 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
return err;
}
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
return 0;
}
@@ -2502,19 +2783,53 @@ static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
+static const struct tegra_sor_regs tegra124_sor_regs = {
+ .head_state0 = 0x05,
+ .head_state1 = 0x07,
+ .head_state2 = 0x09,
+ .head_state3 = 0x0b,
+ .head_state4 = 0x0d,
+ .head_state5 = 0x0f,
+ .pll0 = 0x17,
+ .pll1 = 0x18,
+ .pll2 = 0x19,
+ .pll3 = 0x1a,
+ .dp_padctl0 = 0x5c,
+ .dp_padctl2 = 0x73,
+};
+
static const struct tegra_sor_soc tegra124_sor = {
.supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
};
+static const struct tegra_sor_regs tegra210_sor_regs = {
+ .head_state0 = 0x05,
+ .head_state1 = 0x07,
+ .head_state2 = 0x09,
+ .head_state3 = 0x0b,
+ .head_state4 = 0x0d,
+ .head_state5 = 0x0f,
+ .pll0 = 0x17,
+ .pll1 = 0x18,
+ .pll2 = 0x19,
+ .pll3 = 0x1a,
+ .dp_padctl0 = 0x5c,
+ .dp_padctl2 = 0x73,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
.supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = false,
+ .regs = &tegra210_sor_regs,
+ .has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
};
@@ -2528,13 +2843,60 @@ static const struct tegra_sor_soc tegra210_sor1 = {
.supports_hdmi = true,
.supports_dp = true,
+ .regs = &tegra210_sor_regs,
+ .has_nvdisplay = false,
+
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
};
+static const struct tegra_sor_regs tegra186_sor_regs = {
+ .head_state0 = 0x151,
+ .head_state1 = 0x154,
+ .head_state2 = 0x157,
+ .head_state3 = 0x15a,
+ .head_state4 = 0x15d,
+ .head_state5 = 0x160,
+ .pll0 = 0x163,
+ .pll1 = 0x164,
+ .pll2 = 0x165,
+ .pll3 = 0x166,
+ .dp_padctl0 = 0x168,
+ .dp_padctl2 = 0x16a,
+};
+
+static const struct tegra_sor_soc tegra186_sor = {
+ .supports_edp = false,
+ .supports_lvds = false,
+ .supports_hdmi = false,
+ .supports_dp = true,
+
+ .regs = &tegra186_sor_regs,
+ .has_nvdisplay = true,
+
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const struct tegra_sor_soc tegra186_sor1 = {
+ .supports_edp = false,
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+
+ .regs = &tegra186_sor_regs,
+ .has_nvdisplay = true,
+
+ .num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
+ .settings = tegra186_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
+ { .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
@@ -2542,6 +2904,29 @@ static const struct of_device_id tegra_sor_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+static int tegra_sor_parse_dt(struct tegra_sor *sor)
+{
+ struct device_node *np = sor->dev->of_node;
+ u32 value;
+ int err;
+
+ if (sor->soc->has_nvdisplay) {
+ err = of_property_read_u32(np, "nvidia,interface", &value);
+ if (err < 0)
+ return err;
+
+ sor->index = value;
+
+ /*
+ * override the default that we already set for Tegra210 and
+ * earlier
+ */
+ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ }
+
+ return 0;
+}
+
static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -2577,6 +2962,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux) {
if (sor->soc->supports_hdmi) {
sor->ops = &tegra_sor_hdmi_ops;
+ sor->pad = TEGRA_IO_PAD_HDMI;
} else if (sor->soc->supports_lvds) {
dev_err(&pdev->dev, "LVDS not supported yet\n");
return -ENODEV;
@@ -2587,6 +2973,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
} else {
if (sor->soc->supports_edp) {
sor->ops = &tegra_sor_edp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
} else if (sor->soc->supports_dp) {
dev_err(&pdev->dev, "DisplayPort not supported yet\n");
return -ENODEV;
@@ -2596,6 +2983,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
}
+ err = tegra_sor_parse_dt(sor);
+ if (err < 0)
+ return err;
+
err = tegra_output_probe(&sor->output);
if (err < 0) {
dev_err(&pdev->dev, "failed to probe output: %d\n", err);
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index 865c73b..fb0854d 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -89,6 +89,8 @@
#define SOR_PLL0 0x17
#define SOR_PLL0_ICHPMP_MASK (0xf << 24)
#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL0_FILTER_MASK (0xf << 16)
+#define SOR_PLL0_FILTER(x) (((x) & 0xf) << 16)
#define SOR_PLL0_VCOCAP_MASK (0xf << 8)
#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8)
#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3)
@@ -122,10 +124,16 @@
#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16)
#define SOR_PLL3 0x1a
+#define SOR_PLL3_BG_TEMP_COEF_MASK (0xf << 28)
+#define SOR_PLL3_BG_TEMP_COEF(x) (((x) & 0xf) << 28)
#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24)
#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24)
#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13)
#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13)
+#define SOR_PLL3_AVDD10_LEVEL_MASK (0xf << 8)
+#define SOR_PLL3_AVDD10_LEVEL(x) (((x) & 0xf) << 8)
+#define SOR_PLL3_AVDD14_LEVEL_MASK (0xf << 4)
+#define SOR_PLL3_AVDD14_LEVEL(x) (((x) & 0xf) << 4)
#define SOR_CSTM 0x1b
#define SOR_CSTM_ROTCLK_MASK (0xf << 24)
@@ -334,6 +342,10 @@
#define SOR_DP_LQ_CSTM1 0x70
#define SOR_DP_LQ_CSTM2 0x71
+#define SOR_DP_PADCTL2 0x73
+#define SOR_DP_PADCTL_SPAREPLL_MASK (0xff << 24)
+#define SOR_DP_PADCTL_SPAREPLL(x) (((x) & 0xff) << 24)
+
#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
@@ -370,4 +382,8 @@
#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
+#define SOR_HDMI2_CTRL 0x13e
+#define SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
+#define SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 1802418..f5794dd 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -115,7 +115,7 @@ static int vic_boot(struct vic *vic)
}
static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
+ dma_addr_t *iova)
{
struct tegra_drm *tegra = falcon->data;
@@ -138,13 +138,14 @@ static const struct falcon_ops vic_falcon_ops = {
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (tegra->domain) {
- err = iommu_attach_device(tegra->domain, vic->dev);
+ if (group && tegra->domain) {
+ err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
dev_err(vic->dev, "failed to attach to domain: %d\n",
err);
@@ -158,13 +159,13 @@ static int vic_init(struct host1x_client *client)
vic->falcon.data = tegra;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
- goto detach_device;
+ goto detach;
}
vic->channel = host1x_channel_request(client->dev);
if (!vic->channel) {
err = -ENOMEM;
- goto detach_device;
+ goto detach;
}
client->syncpts[0] = host1x_syncpt_request(client, 0);
@@ -183,9 +184,9 @@ free_syncpt:
host1x_syncpt_free(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
-detach_device:
- if (tegra->domain)
- iommu_detach_device(tegra->domain, vic->dev);
+detach:
+ if (group && tegra->domain)
+ iommu_detach_group(tegra->domain, group);
return err;
}
@@ -193,6 +194,7 @@ detach_device:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
@@ -206,7 +208,7 @@ static int vic_exit(struct host1x_client *client)
host1x_channel_put(vic->channel);
if (vic->domain) {
- iommu_detach_device(vic->domain, vic->dev);
+ iommu_detach_group(vic->domain, group);
vic->domain = NULL;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 81ac824..5259804 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -4,6 +4,8 @@ config DRM_TILCDC
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 6ef4d1a..1b278a2 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -51,12 +51,8 @@ struct tilcdc_crtc {
ktime_t last_vblank;
unsigned int hvtotal_us;
- struct drm_framebuffer *curr_fb;
struct drm_framebuffer *next_fb;
- /* for deferred fb unref's: */
- struct drm_flip_work unref_work;
-
/* Only set if an external encoder is connected */
bool simulate_vesa_sync;
@@ -70,20 +66,8 @@ struct tilcdc_crtc {
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
-static void unref_worker(struct drm_flip_work *work, void *val)
-{
- struct tilcdc_crtc *tilcdc_crtc =
- container_of(work, struct tilcdc_crtc, unref_work);
- struct drm_device *dev = tilcdc_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_put(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_cma_object *gem;
@@ -108,12 +92,6 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
-
- if (tilcdc_crtc->curr_fb)
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
-
- tilcdc_crtc->curr_fb = fb;
}
/*
@@ -294,7 +272,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
-uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
+static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
{
return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
mode->clock);
@@ -464,8 +442,6 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
set_scanout(crtc, fb);
- drm_framebuffer_get(fb);
-
crtc->hwmode = crtc->state->adjusted_mode;
tilcdc_crtc->hvtotal_us =
@@ -524,7 +500,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -554,20 +529,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
pm_runtime_put_sync(dev->dev);
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
}
@@ -614,7 +575,6 @@ out:
static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
tilcdc_crtc_shutdown(crtc);
@@ -623,7 +583,6 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
}
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
@@ -638,9 +597,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
return -EBUSY;
}
- drm_framebuffer_get(fb);
-
- crtc->primary->fb = fb;
tilcdc_crtc->event = event;
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -936,8 +892,6 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
now = ktime_get();
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
tilcdc_crtc->last_vblank = now;
@@ -1040,10 +994,8 @@ int tilcdc_crtc_create(struct drm_device *dev)
int ret;
tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
- if (!tilcdc_crtc) {
- dev_err(dev->dev, "allocation failed\n");
+ if (!tilcdc_crtc)
return -ENOMEM;
- }
init_completion(&tilcdc_crtc->palette_loaded);
tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
@@ -1064,9 +1016,6 @@ int tilcdc_crtc_create(struct drm_device *dev)
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- drm_flip_work_init(&tilcdc_crtc->unref_work,
- "unref", unref_worker);
-
spin_lock_init(&tilcdc_crtc->irq_lock);
INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 72ce063..b8a5e4e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -69,12 +69,6 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -146,7 +140,7 @@ static int tilcdc_commit(struct drm_device *dev,
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = tilcdc_fb_create,
- .output_poll_changed = tilcdc_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -198,8 +192,7 @@ static void tilcdc_fini(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(dev);
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
@@ -240,10 +233,8 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "failed to allocate private data\n");
+ if (!priv)
return -ENOMEM;
- }
ddev = drm_dev_alloc(ddrv, dev);
if (IS_ERR(ddev))
@@ -405,12 +396,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
drm_mode_config_reset(ddev);
- priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
- ddev->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
+ ret = drm_fb_cma_fbdev_init(ddev, bpp, 0);
+ if (ret)
goto init_failed;
- }
drm_kms_helper_poll_init(ddev);
@@ -427,12 +415,6 @@ init_failed:
return ret;
}
-static void tilcdc_lastclose(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static irqreturn_t tilcdc_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -507,7 +489,6 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
static struct drm_info_list tilcdc_debugfs_list[] = {
{ "regs", tilcdc_regs_show, 0 },
{ "mm", tilcdc_mm_show, 0 },
- { "fb", drm_fb_cma_debugfs_show, 0 },
};
static int tilcdc_debugfs_init(struct drm_minor *minor)
@@ -538,9 +519,10 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .lastclose = tilcdc_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_print_info = drm_gem_cma_print_info,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 8caa11b..ead5122 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -79,8 +79,6 @@ struct tilcdc_drm_private {
struct workqueue_struct *wq;
- struct drm_fbdev_cma *fbdev;
-
struct drm_crtc *crtc;
unsigned int num_encoders;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 711c7b3..d651bdd 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -188,18 +188,16 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
int tilcdc_attach_external_device(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct device_node *remote_node;
struct drm_bridge *bridge;
+ struct drm_panel *panel;
int ret;
- remote_node = of_graph_get_remote_node(ddev->dev->of_node, 0, 0);
- if (!remote_node)
+ ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
+ &panel, &bridge);
+ if (ret == -ENODEV)
return 0;
-
- bridge = of_drm_find_bridge(remote_node);
- of_node_put(remote_node);
- if (!bridge)
- return -EPROBE_DEFER;
+ else if (ret)
+ return ret;
priv->external_encoder = devm_kzalloc(ddev->dev,
sizeof(*priv->external_encoder),
@@ -215,10 +213,23 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
return ret;
}
+ if (panel) {
+ bridge = devm_drm_panel_bridge_add(ddev->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto err_encoder_cleanup;
+ }
+ }
+
ret = tilcdc_attach_bridge(ddev, bridge);
if (ret)
- drm_encoder_cleanup(priv->external_encoder);
+ goto err_encoder_cleanup;
+
+ return 0;
+err_encoder_cleanup:
+ drm_encoder_cleanup(priv->external_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 8eebb5f..d616d64 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -101,10 +101,8 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
panel_encoder = devm_kzalloc(dev->dev, sizeof(*panel_encoder),
GFP_KERNEL);
- if (!panel_encoder) {
- dev_err(dev->dev, "allocation failed\n");
+ if (!panel_encoder)
return NULL;
- }
panel_encoder->mod = mod;
@@ -210,10 +208,8 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector),
GFP_KERNEL);
- if (!panel_connector) {
- dev_err(dev->dev, "allocation failed\n");
+ if (!panel_connector)
return NULL;
- }
panel_connector->encoder = encoder;
panel_connector->mod = mod;
@@ -293,11 +289,8 @@ static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- pr_err("%s: allocation failed\n", __func__);
- of_node_put(info_np);
- return NULL;
- }
+ if (!info)
+ goto put_node;
ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
@@ -316,11 +309,11 @@ static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
if (ret) {
pr_err("%s: error reading panel-info properties\n", __func__);
kfree(info);
- of_node_put(info_np);
- return NULL;
+ info = NULL;
}
- of_node_put(info_np);
+put_node:
+ of_node_put(info_np);
return info;
}
@@ -428,7 +421,7 @@ struct platform_driver panel_driver = {
.remove = panel_remove,
.driver = {
.owner = THIS_MODULE,
- .name = "panel",
+ .name = "tilcdc-panel",
.of_match_table = panel_of_match,
},
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 9d528c0..5048ebb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -133,7 +133,7 @@ static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
struct tilcdc_drm_private *priv = dev->dev_private;
volatile void __iomem *addr = priv->mmio + reg;
-#ifdef iowrite64
+#if defined(iowrite64) && !defined(iowrite64_is_nonatomic)
iowrite64(data, addr);
#else
__iowmb();
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7e36434..c45cabb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -111,10 +111,8 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
GFP_KERNEL);
- if (!tfp410_encoder) {
- dev_err(dev->dev, "allocation failed\n");
+ if (!tfp410_encoder)
return NULL;
- }
tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
tfp410_encoder->mod = mod;
@@ -224,10 +222,8 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
GFP_KERNEL);
- if (!tfp410_connector) {
- dev_err(dev->dev, "allocation failed\n");
+ if (!tfp410_connector)
return NULL;
- }
tfp410_connector->encoder = encoder;
tfp410_connector->mod = mod;
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 2e790e7..4592a5e 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -3,8 +3,6 @@ menuconfig DRM_TINYDRM
depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- select BACKLIGHT_LCD_SUPPORT
- select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have a tinydrm supported display.
If M is selected the module will be called tinydrm.
@@ -12,9 +10,20 @@ menuconfig DRM_TINYDRM
config TINYDRM_MIPI_DBI
tristate
+config TINYDRM_ILI9225
+ tristate "DRM support for ILI9225 display panels"
+ depends on DRM_TINYDRM && SPI
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Ilitek ILI9225 panels:
+ * No-name 2.2" color screen module
+
+ If M is selected the module will be called ili9225.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
select TINYDRM_MIPI_DBI
help
DRM driver for the Multi-Inno MI0283QT display panel
@@ -42,3 +51,14 @@ config TINYDRM_ST7586
* LEGO MINDSTORMS EV3
If M is selected the module will be called st7586.
+
+config TINYDRM_ST7735R
+ tristate "DRM support for Sitronix ST7735R display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver Sitronix ST7735R with one of the following LCDs:
+ * JD-T18003-T01 1.8" 128x160 TFT
+
+ If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 0c184bd..49a1119 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -4,6 +4,8 @@ obj-$(CONFIG_DRM_TINYDRM) += core/
obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
+obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
+obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 1a8a57c..4c661627 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <linux/device.h>
@@ -36,23 +37,6 @@
*/
/**
- * tinydrm_lastclose - DRM lastclose helper
- * @drm: DRM device
- *
- * This function ensures that fbdev is restored when drm_lastclose() is called
- * on the last drm_release(). Drivers can use this as their
- * &drm_driver->lastclose callback.
- */
-void tinydrm_lastclose(struct drm_device *drm)
-{
- struct tinydrm_device *tdev = drm->dev_private;
-
- DRM_DEBUG_KMS("\n");
- drm_fbdev_cma_restore_mode(tdev->fbdev_cma);
-}
-EXPORT_SYMBOL(tinydrm_lastclose);
-
-/**
* tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
* another driver's scatter/gather table of pinned pages
* @drm: DRM device to import into
@@ -214,35 +198,24 @@ EXPORT_SYMBOL(devm_tinydrm_init);
static int tinydrm_register(struct tinydrm_device *tdev)
{
struct drm_device *drm = tdev->drm;
- int bpp = drm->mode_config.preferred_depth;
- struct drm_fbdev_cma *fbdev;
int ret;
ret = drm_dev_register(tdev->drm, 0);
if (ret)
return ret;
- fbdev = drm_fbdev_cma_init_with_funcs(drm, bpp ? bpp : 32,
- drm->mode_config.num_connector,
- tdev->fb_funcs);
- if (IS_ERR(fbdev))
- DRM_ERROR("Failed to initialize fbdev: %ld\n", PTR_ERR(fbdev));
- else
- tdev->fbdev_cma = fbdev;
+ ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+ if (ret)
+ DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
return 0;
}
static void tinydrm_unregister(struct tinydrm_device *tdev)
{
- struct drm_fbdev_cma *fbdev_cma = tdev->fbdev_cma;
-
drm_atomic_helper_shutdown(tdev->drm);
- /* don't restore fbdev in lastclose, keep pipeline disabled */
- tdev->fbdev_cma = NULL;
+ drm_fb_cma_fbdev_fini(tdev->drm);
drm_dev_unregister(tdev->drm);
- if (fbdev_cma)
- drm_fbdev_cma_fini(fbdev_cma);
}
static void devm_tinydrm_register_release(void *data)
@@ -292,71 +265,4 @@ void tinydrm_shutdown(struct tinydrm_device *tdev)
}
EXPORT_SYMBOL(tinydrm_shutdown);
-/**
- * tinydrm_suspend - Suspend tinydrm
- * @tdev: tinydrm device
- *
- * Used in driver PM operations to suspend tinydrm.
- * Suspends fbdev and DRM.
- * Resume with tinydrm_resume().
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_suspend(struct tinydrm_device *tdev)
-{
- struct drm_atomic_state *state;
-
- if (tdev->suspend_state) {
- DRM_ERROR("Failed to suspend: state already set\n");
- return -EINVAL;
- }
-
- drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1);
- state = drm_atomic_helper_suspend(tdev->drm);
- if (IS_ERR(state)) {
- drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
- return PTR_ERR(state);
- }
-
- tdev->suspend_state = state;
-
- return 0;
-}
-EXPORT_SYMBOL(tinydrm_suspend);
-
-/**
- * tinydrm_resume - Resume tinydrm
- * @tdev: tinydrm device
- *
- * Used in driver PM operations to resume tinydrm.
- * Suspend with tinydrm_suspend().
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_resume(struct tinydrm_device *tdev)
-{
- struct drm_atomic_state *state = tdev->suspend_state;
- int ret;
-
- if (!state) {
- DRM_ERROR("Failed to resume: state is not set\n");
- return -EINVAL;
- }
-
- tdev->suspend_state = NULL;
-
- ret = drm_atomic_helper_resume(tdev->drm, state);
- if (ret) {
- DRM_ERROR("Error resuming state: %d\n", ret);
- return ret;
- }
-
- drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
-
- return 0;
-}
-EXPORT_SYMBOL(tinydrm_resume);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index bd6cce0..d1c3ce9 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -236,101 +236,6 @@ void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
-/**
- * tinydrm_of_find_backlight - Find backlight device in device-tree
- * @dev: Device
- *
- * This function looks for a DT node pointed to by a property named 'backlight'
- * and uses of_find_backlight_by_node() to get the backlight device.
- * Additionally if the brightness property is zero, it is set to
- * max_brightness.
- *
- * Returns:
- * NULL if there's no backlight property.
- * Error pointer -EPROBE_DEFER if the DT node is found, but no backlight device
- * is found.
- * If the backlight device is found, a pointer to the structure is returned.
- */
-struct backlight_device *tinydrm_of_find_backlight(struct device *dev)
-{
- struct backlight_device *backlight;
- struct device_node *np;
-
- np = of_parse_phandle(dev->of_node, "backlight", 0);
- if (!np)
- return NULL;
-
- backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!backlight)
- return ERR_PTR(-EPROBE_DEFER);
-
- if (!backlight->props.brightness) {
- backlight->props.brightness = backlight->props.max_brightness;
- DRM_DEBUG_KMS("Backlight brightness set to %d\n",
- backlight->props.brightness);
- }
-
- return backlight;
-}
-EXPORT_SYMBOL(tinydrm_of_find_backlight);
-
-/**
- * tinydrm_enable_backlight - Enable backlight helper
- * @backlight: Backlight device
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_enable_backlight(struct backlight_device *backlight)
-{
- unsigned int old_state;
- int ret;
-
- if (!backlight)
- return 0;
-
- old_state = backlight->props.state;
- backlight->props.state &= ~BL_CORE_FBBLANK;
- DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state,
- backlight->props.state);
-
- ret = backlight_update_status(backlight);
- if (ret)
- DRM_ERROR("Failed to enable backlight %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL(tinydrm_enable_backlight);
-
-/**
- * tinydrm_disable_backlight - Disable backlight helper
- * @backlight: Backlight device
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_disable_backlight(struct backlight_device *backlight)
-{
- unsigned int old_state;
- int ret;
-
- if (!backlight)
- return 0;
-
- old_state = backlight->props.state;
- backlight->props.state |= BL_CORE_FBBLANK;
- DRM_DEBUG_KMS("Backlight state: 0x%x -> 0x%x\n", old_state,
- backlight->props.state);
- ret = backlight_update_status(backlight);
- if (ret)
- DRM_ERROR("Failed to disable backlight %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL(tinydrm_disable_backlight);
-
#if IS_ENABLED(CONFIG_SPI)
/**
@@ -414,11 +319,9 @@ tinydrm_dbg_spi_print(struct spi_device *spi, struct spi_transfer *tr,
void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m)
{
struct spi_transfer *tmp;
- struct list_head *pos;
int i = 0;
- list_for_each(pos, &m->transfers) {
- tmp = list_entry(pos, struct spi_transfer, transfer_list);
+ list_for_each_entry(tmp, &m->transfers, transfer_list) {
if (tmp->tx_buf)
tinydrm_dbg_spi_print(spi, tmp, tmp->tx_buf, i, true);
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index f41fc50..11ae950 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -15,7 +15,7 @@
struct tinydrm_connector {
struct drm_connector base;
- const struct drm_display_mode *mode;
+ struct drm_display_mode mode;
};
static inline struct tinydrm_connector *
@@ -29,7 +29,7 @@ static int tinydrm_connector_get_modes(struct drm_connector *connector)
struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, tconn->mode);
+ mode = drm_mode_duplicate(connector->dev, &tconn->mode);
if (!mode) {
DRM_ERROR("Failed to duplicate mode\n");
return 0;
@@ -92,7 +92,7 @@ tinydrm_connector_create(struct drm_device *drm,
if (!tconn)
return ERR_PTR(-ENOMEM);
- tconn->mode = mode;
+ drm_mode_copy(&tconn->mode, mode);
connector = &tconn->base;
drm_connector_helper_add(connector, &tinydrm_connector_hfuncs);
@@ -199,35 +199,27 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
unsigned int rotation)
{
struct drm_device *drm = tdev->drm;
- struct drm_display_mode *mode_copy;
+ struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
- mode_copy = devm_kmalloc(drm->dev, sizeof(*mode_copy), GFP_KERNEL);
- if (!mode_copy)
- return -ENOMEM;
-
- *mode_copy = *mode;
- ret = tinydrm_rotate_mode(mode_copy, rotation);
+ drm_mode_copy(&mode_copy, mode);
+ ret = tinydrm_rotate_mode(&mode_copy, rotation);
if (ret) {
DRM_ERROR("Illegal rotation value %u\n", rotation);
return -EINVAL;
}
- drm->mode_config.min_width = mode_copy->hdisplay;
- drm->mode_config.max_width = mode_copy->hdisplay;
- drm->mode_config.min_height = mode_copy->vdisplay;
- drm->mode_config.max_height = mode_copy->vdisplay;
+ drm->mode_config.min_width = mode_copy.hdisplay;
+ drm->mode_config.max_width = mode_copy.hdisplay;
+ drm->mode_config.min_height = mode_copy.vdisplay;
+ drm->mode_config.max_height = mode_copy.vdisplay;
- connector = tinydrm_connector_create(drm, mode_copy, connector_type);
+ connector = tinydrm_connector_create(drm, &mode_copy, connector_type);
if (IS_ERR(connector))
return PTR_ERR(connector);
- ret = drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
- format_count, NULL, connector);
- if (ret)
- return ret;
-
- return 0;
+ return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
+ format_count, NULL, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
new file mode 100644
index 0000000..a075950
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -0,0 +1,465 @@
+/*
+ * DRM driver for Ilitek ILI9225 panels
+ *
+ * Copyright 2017 David Lechner <david@lechnology.com>
+ *
+ * Some code copied from mipi-dbi.c
+ * Copyright 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+#define ILI9225_DRIVER_READ_CODE 0x00
+#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
+#define ILI9225_LCD_AC_DRIVING_CONTROL 0x02
+#define ILI9225_ENTRY_MODE 0x03
+#define ILI9225_DISPLAY_CONTROL_1 0x07
+#define ILI9225_BLANK_PERIOD_CONTROL_1 0x08
+#define ILI9225_FRAME_CYCLE_CONTROL 0x0b
+#define ILI9225_INTERFACE_CONTROL 0x0c
+#define ILI9225_OSCILLATION_CONTROL 0x0f
+#define ILI9225_POWER_CONTROL_1 0x10
+#define ILI9225_POWER_CONTROL_2 0x11
+#define ILI9225_POWER_CONTROL_3 0x12
+#define ILI9225_POWER_CONTROL_4 0x13
+#define ILI9225_POWER_CONTROL_5 0x14
+#define ILI9225_VCI_RECYCLING 0x15
+#define ILI9225_RAM_ADDRESS_SET_1 0x20
+#define ILI9225_RAM_ADDRESS_SET_2 0x21
+#define ILI9225_WRITE_DATA_TO_GRAM 0x22
+#define ILI9225_SOFTWARE_RESET 0x28
+#define ILI9225_GATE_SCAN_CONTROL 0x30
+#define ILI9225_VERTICAL_SCROLL_1 0x31
+#define ILI9225_VERTICAL_SCROLL_2 0x32
+#define ILI9225_VERTICAL_SCROLL_3 0x33
+#define ILI9225_PARTIAL_DRIVING_POS_1 0x34
+#define ILI9225_PARTIAL_DRIVING_POS_2 0x35
+#define ILI9225_HORIZ_WINDOW_ADDR_1 0x36
+#define ILI9225_HORIZ_WINDOW_ADDR_2 0x37
+#define ILI9225_VERT_WINDOW_ADDR_1 0x38
+#define ILI9225_VERT_WINDOW_ADDR_2 0x39
+#define ILI9225_GAMMA_CONTROL_1 0x50
+#define ILI9225_GAMMA_CONTROL_2 0x51
+#define ILI9225_GAMMA_CONTROL_3 0x52
+#define ILI9225_GAMMA_CONTROL_4 0x53
+#define ILI9225_GAMMA_CONTROL_5 0x54
+#define ILI9225_GAMMA_CONTROL_6 0x55
+#define ILI9225_GAMMA_CONTROL_7 0x56
+#define ILI9225_GAMMA_CONTROL_8 0x57
+#define ILI9225_GAMMA_CONTROL_9 0x58
+#define ILI9225_GAMMA_CONTROL_10 0x59
+
+static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
+{
+ u8 par[2] = { data >> 8, data & 0xff };
+
+ return mipi_dbi_command_buf(mipi, cmd, par, 2);
+}
+
+static int ili9225_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct tinydrm_device *tdev = fb->dev->dev_private;
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ bool swap = mipi->swap_bytes;
+ struct drm_clip_rect clip;
+ u16 x_start, y_start;
+ u16 x1, x2, y1, y2;
+ int ret = 0;
+ bool full;
+ void *tr;
+
+ mutex_lock(&tdev->dirty_lock);
+
+ if (!mipi->enabled)
+ goto out_unlock;
+
+ /* fbdev can flush even when we're not interested */
+ if (tdev->pipe.plane.fb != fb)
+ goto out_unlock;
+
+ full = tinydrm_merge_clips(&clip, clips, num_clips, flags,
+ fb->width, fb->height);
+
+ DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
+ clip.x1, clip.x2, clip.y1, clip.y2);
+
+ if (!mipi->dc || !full || swap ||
+ fb->format->format == DRM_FORMAT_XRGB8888) {
+ tr = mipi->tx_buf;
+ ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, &clip, swap);
+ if (ret)
+ goto out_unlock;
+ } else {
+ tr = cma_obj->vaddr;
+ }
+
+ switch (mipi->rotation) {
+ default:
+ x1 = clip.x1;
+ x2 = clip.x2 - 1;
+ y1 = clip.y1;
+ y2 = clip.y2 - 1;
+ x_start = x1;
+ y_start = y1;
+ break;
+ case 90:
+ x1 = clip.y1;
+ x2 = clip.y2 - 1;
+ y1 = fb->width - clip.x2;
+ y2 = fb->width - clip.x1 - 1;
+ x_start = x1;
+ y_start = y2;
+ break;
+ case 180:
+ x1 = fb->width - clip.x2;
+ x2 = fb->width - clip.x1 - 1;
+ y1 = fb->height - clip.y2;
+ y2 = fb->height - clip.y1 - 1;
+ x_start = x2;
+ y_start = y2;
+ break;
+ case 270:
+ x1 = fb->height - clip.y2;
+ x2 = fb->height - clip.y1 - 1;
+ y1 = clip.x1;
+ y2 = clip.x2 - 1;
+ x_start = x2;
+ y_start = y1;
+ break;
+ }
+
+ ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
+ ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
+ ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_1, y2);
+ ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_2, y1);
+
+ ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, x_start);
+ ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, y_start);
+
+ ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
+ (clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
+
+out_unlock:
+ mutex_unlock(&tdev->dirty_lock);
+
+ if (ret)
+ dev_err_once(fb->dev->dev, "Failed to update display %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct drm_framebuffer_funcs ili9225_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = ili9225_fb_dirty,
+};
+
+static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct device *dev = tdev->drm->dev;
+ int ret;
+ u8 am_id;
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi_dbi_hw_reset(mipi);
+
+ /*
+ * There don't seem to be two example init sequences that match, so
+ * using the one from the popular Arduino library for this display.
+ * https://github.com/Nkawu/TFT_22_ILI9225/blob/master/src/TFT_22_ILI9225.cpp
+ */
+
+ ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
+ return;
+ }
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x0000);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x0000);
+
+ msleep(40);
+
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0018);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x6121);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x006f);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x495f);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0800);
+
+ msleep(10);
+
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x103b);
+
+ msleep(50);
+
+ switch (mipi->rotation) {
+ default:
+ am_id = 0x30;
+ break;
+ case 90:
+ am_id = 0x18;
+ break;
+ case 180:
+ am_id = 0x00;
+ break;
+ case 270:
+ am_id = 0x28;
+ break;
+ }
+ ili9225_command(mipi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
+ ili9225_command(mipi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
+ ili9225_command(mipi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
+ ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ ili9225_command(mipi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
+ ili9225_command(mipi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
+ ili9225_command(mipi, ILI9225_INTERFACE_CONTROL, 0x0000);
+ ili9225_command(mipi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
+ ili9225_command(mipi, ILI9225_VCI_RECYCLING, 0x0020);
+ ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
+ ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
+
+ ili9225_command(mipi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
+ ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
+ ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
+ ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
+ ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
+ ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
+
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_1, 0x0000);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_2, 0x0808);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_3, 0x080a);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_4, 0x000a);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_6, 0x0808);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_7, 0x0000);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_9, 0x0710);
+ ili9225_command(mipi, ILI9225_GAMMA_CONTROL_10, 0x0710);
+
+ ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
+
+ msleep(50);
+
+ ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
+
+ mipi_dbi_enable_flush(mipi);
+}
+
+static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!mipi->enabled)
+ return;
+
+ ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ msleep(50);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0007);
+ msleep(50);
+ ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0a02);
+
+ mipi->enabled = false;
+}
+
+static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+ size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ unsigned int bpw = 8;
+ u32 speed_hz;
+ int ret;
+
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ if (ret || !num)
+ return ret;
+
+ if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ bpw = 16;
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+
+ return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+}
+
+static const u32 ili9225_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static int ili9225_init(struct device *dev, struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+ struct tinydrm_device *tdev = &mipi->tinydrm;
+ int ret;
+
+ if (!mipi->command)
+ return -EINVAL;
+
+ mutex_init(&mipi->cmdlock);
+
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
+ ret = devm_tinydrm_init(dev, tdev, &ili9225_fb_funcs, driver);
+ if (ret)
+ return ret;
+
+ ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ ili9225_formats,
+ ARRAY_SIZE(ili9225_formats), mode,
+ rotation);
+ if (ret)
+ return ret;
+
+ tdev->drm->mode_config.preferred_depth = 16;
+ mipi->rotation = rotation;
+
+ drm_mode_config_reset(tdev->drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ tdev->drm->mode_config.preferred_depth, rotation);
+
+ return 0;
+}
+
+static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
+ .enable = ili9225_pipe_enable,
+ .disable = ili9225_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode ili9225_mode = {
+ TINYDRM_MODE(176, 220, 35, 44),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
+
+static struct drm_driver ili9225_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &ili9225_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = drm_fb_helper_lastclose,
+ .name = "ili9225",
+ .desc = "Ilitek ILI9225",
+ .date = "20171106",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9225_of_match[] = {
+ { .compatible = "vot,v220hf01a-t" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ili9225_of_match);
+
+static const struct spi_device_id ili9225_id[] = {
+ { "v220hf01a-t", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ili9225_id);
+
+static int ili9225_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *rs;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ rs = devm_gpiod_get(dev, "rs", GPIOD_OUT_LOW);
+ if (IS_ERR(rs)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'rs'\n");
+ return PTR_ERR(rs);
+ }
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, rs);
+ if (ret)
+ return ret;
+
+ /* override the command function set in mipi_dbi_spi_init() */
+ mipi->command = ili9225_dbi_command;
+
+ ret = ili9225_init(&spi->dev, mipi, &ili9225_pipe_funcs,
+ &ili9225_driver, &ili9225_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9225_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9225_spi_driver = {
+ .driver = {
+ .name = "ili9225",
+ .owner = THIS_MODULE,
+ .of_match_table = ili9225_of_match,
+ },
+ .id_table = ili9225_id,
+ .probe = ili9225_probe,
+ .shutdown = ili9225_shutdown,
+};
+module_spi_driver(ili9225_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9225 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 6a83b30..d8ed6e6 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -9,45 +9,60 @@
* (at your option) any later version.
*/
-#include <drm/tinydrm/ili9341.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
+#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
-static int mi0283qt_init(struct mipi_dbi *mipi)
+#define ILI9341_FRMCTR1 0xb1
+#define ILI9341_DISCTRL 0xb6
+#define ILI9341_ETMOD 0xb7
+
+#define ILI9341_PWCTRL1 0xc0
+#define ILI9341_PWCTRL2 0xc1
+#define ILI9341_VMCTRL1 0xc5
+#define ILI9341_VMCTRL2 0xc7
+#define ILI9341_PWCTRLA 0xcb
+#define ILI9341_PWCTRLB 0xcf
+
+#define ILI9341_PGAMCTRL 0xe0
+#define ILI9341_NGAMCTRL 0xe1
+#define ILI9341_DTCTRLA 0xe8
+#define ILI9341_DTCTRLB 0xea
+#define ILI9341_PWRSEQ 0xed
+
+#define ILI9341_EN3GAM 0xf2
+#define ILI9341_PUMPCTRL 0xf7
+
+#define ILI9341_MADCTL_BGR BIT(3)
+#define ILI9341_MADCTL_MV BIT(5)
+#define ILI9341_MADCTL_MX BIT(6)
+#define ILI9341_MADCTL_MY BIT(7)
+
+static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
{
- struct tinydrm_device *tdev = &mipi->tinydrm;
- struct device *dev = tdev->drm->dev;
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
u8 addr_mode;
int ret;
DRM_DEBUG_KMS("\n");
- ret = regulator_enable(mipi->regulator);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to enable regulator %d\n", ret);
- return ret;
- }
-
- /* Avoid flicker by skipping setup if the bootloader has done it */
- if (mipi_dbi_display_is_on(mipi))
- return 0;
-
- mipi_dbi_hw_reset(mipi);
- ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
- if (ret) {
- DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
- regulator_disable(mipi->regulator);
- return ret;
- }
-
- msleep(20);
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
@@ -66,7 +81,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0xbe);
/* Memory Access Control */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
switch (mipi->rotation) {
default:
@@ -110,19 +125,12 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
- return 0;
-}
-
-static void mi0283qt_fini(void *data)
-{
- struct mipi_dbi *mipi = data;
-
- DRM_DEBUG_KMS("\n");
- regulator_disable(mipi->regulator);
+out_enable:
+ mipi_dbi_enable_flush(mipi);
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
- .enable = mipi_dbi_pipe_enable,
+ .enable = mi0283qt_enable,
.disable = mipi_dbi_pipe_disable,
.update = tinydrm_display_pipe_update,
.prepare_fb = tinydrm_display_pipe_prepare_fb,
@@ -139,7 +147,7 @@ static struct drm_driver mi0283qt_driver = {
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = tinydrm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
@@ -188,7 +196,7 @@ static int mi0283qt_probe(struct spi_device *spi)
if (IS_ERR(mipi->regulator))
return PTR_ERR(mipi->regulator);
- mipi->backlight = tinydrm_of_find_backlight(dev);
+ mipi->backlight = devm_of_find_backlight(dev);
if (IS_ERR(mipi->backlight))
return PTR_ERR(mipi->backlight);
@@ -203,17 +211,6 @@ static int mi0283qt_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mi0283qt_init(mipi);
- if (ret)
- return ret;
-
- /* use devres to fini after drm unregister (drv->remove is before) */
- ret = devm_add_action(dev, mi0283qt_fini, mipi);
- if (ret) {
- mi0283qt_fini(mipi);
- return ret;
- }
-
spi_set_drvdata(spi, mipi);
return devm_tinydrm_register(&mipi->tinydrm);
@@ -229,27 +226,17 @@ static void mi0283qt_shutdown(struct spi_device *spi)
static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
{
struct mipi_dbi *mipi = dev_get_drvdata(dev);
- int ret;
- ret = tinydrm_suspend(&mipi->tinydrm);
- if (ret)
- return ret;
-
- mi0283qt_fini(mipi);
-
- return 0;
+ return drm_mode_config_helper_suspend(mipi->tinydrm.drm);
}
static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
{
struct mipi_dbi *mipi = dev_get_drvdata(dev);
- int ret;
- ret = mi0283qt_init(mipi);
- if (ret)
- return ret;
+ drm_mode_config_helper_resume(mipi->tinydrm.drm);
- return tinydrm_resume(&mipi->tinydrm);
+ return 0;
}
static const struct dev_pm_ops mi0283qt_pm_ops = {
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index d43e992..9e90381 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -154,8 +154,18 @@ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
}
EXPORT_SYMBOL(mipi_dbi_command_buf);
-static int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
- struct drm_clip_rect *clip, bool swap)
+/**
+ * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
+ * @dst: The destination buffer
+ * @fb: The source framebuffer
+ * @clip: Clipping rectangle of the area to be copied
+ * @swap: When true, swap MSB/LSB of 16-bit values
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip, bool swap)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
@@ -192,6 +202,7 @@ static int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
DMA_FROM_DEVICE);
return ret;
}
+EXPORT_SYMBOL(mipi_dbi_buf_copy);
static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -260,29 +271,24 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
};
/**
- * mipi_dbi_pipe_enable - MIPI DBI pipe enable helper
- * @pipe: Display pipe
- * @crtc_state: CRTC state
+ * mipi_dbi_enable_flush - MIPI DBI enable helper
+ * @mipi: MIPI DBI structure
*
- * This function enables backlight. Drivers can use this as their
+ * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
+ * enables the backlight. Drivers can use this in their
* &drm_simple_display_pipe_funcs->enable callback.
*/
-void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state)
+void mipi_dbi_enable_flush(struct mipi_dbi *mipi)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- struct drm_framebuffer *fb = pipe->plane.fb;
-
- DRM_DEBUG_KMS("\n");
+ struct drm_framebuffer *fb = mipi->tinydrm.pipe.plane.fb;
mipi->enabled = true;
if (fb)
fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
- tinydrm_enable_backlight(mipi->backlight);
+ backlight_enable(mipi->backlight);
}
-EXPORT_SYMBOL(mipi_dbi_pipe_enable);
+EXPORT_SYMBOL(mipi_dbi_enable_flush);
static void mipi_dbi_blank(struct mipi_dbi *mipi)
{
@@ -305,8 +311,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
* mipi_dbi_pipe_disable - MIPI DBI pipe disable helper
* @pipe: Display pipe
*
- * This function disables backlight if present or if not the
- * display memory is blanked. Drivers can use this as their
+ * This function disables backlight if present, if not the display memory is
+ * blanked. The regulator is disabled if in use. Drivers can use this as their
* &drm_simple_display_pipe_funcs->disable callback.
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -319,9 +325,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
mipi->enabled = false;
if (mipi->backlight)
- tinydrm_disable_backlight(mipi->backlight);
+ backlight_disable(mipi->backlight);
else
mipi_dbi_blank(mipi);
+
+ if (mipi->regulator)
+ regulator_disable(mipi->regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
@@ -405,7 +414,7 @@ void mipi_dbi_hw_reset(struct mipi_dbi *mipi)
return;
gpiod_set_value_cansleep(mipi->reset, 0);
- msleep(20);
+ usleep_range(20, 1000);
gpiod_set_value_cansleep(mipi->reset, 1);
msleep(120);
}
@@ -432,6 +441,7 @@ bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
val &= ~DCS_POWER_MODE_RESERVED_MASK;
+ /* The poweron/reset value is 08h DCS_POWER_MODE_DISPLAY_NORMAL_MODE */
if (val != (DCS_POWER_MODE_DISPLAY |
DCS_POWER_MODE_DISPLAY_NORMAL_MODE | DCS_POWER_MODE_SLEEP_MODE))
return false;
@@ -442,20 +452,97 @@ bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
}
EXPORT_SYMBOL(mipi_dbi_display_is_on);
+static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
+{
+ struct device *dev = mipi->tinydrm.drm->dev;
+ int ret;
+
+ if (mipi->regulator) {
+ ret = regulator_enable(mipi->regulator);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable regulator (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (cond && mipi_dbi_display_is_on(mipi))
+ return 1;
+
+ mipi_dbi_hw_reset(mipi);
+ ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
+ if (mipi->regulator)
+ regulator_disable(mipi->regulator);
+ return ret;
+ }
+
+ /*
+ * If we did a hw reset, we know the controller is in Sleep mode and
+ * per MIPI DSC spec should wait 5ms after soft reset. If we didn't,
+ * we assume worst case and wait 120ms.
+ */
+ if (mipi->reset)
+ usleep_range(5000, 20000);
+ else
+ msleep(120);
+
+ return 0;
+}
+
+/**
+ * mipi_dbi_poweron_reset - MIPI DBI poweron and reset
+ * @mipi: MIPI DBI structure
+ *
+ * This function enables the regulator if used and does a hardware and software
+ * reset.
+ *
+ * Returns:
+ * Zero on success, or a negative error code.
+ */
+int mipi_dbi_poweron_reset(struct mipi_dbi *mipi)
+{
+ return mipi_dbi_poweron_reset_conditional(mipi, false);
+}
+EXPORT_SYMBOL(mipi_dbi_poweron_reset);
+
+/**
+ * mipi_dbi_poweron_conditional_reset - MIPI DBI poweron and conditional reset
+ * @mipi: MIPI DBI structure
+ *
+ * This function enables the regulator if used and if the display is off, it
+ * does a hardware and software reset. If mipi_dbi_display_is_on() determines
+ * that the display is on, no reset is performed.
+ *
+ * Returns:
+ * Zero if the controller was reset, 1 if the display was already on, or a
+ * negative error code.
+ */
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi)
+{
+ return mipi_dbi_poweron_reset_conditional(mipi, true);
+}
+EXPORT_SYMBOL(mipi_dbi_poweron_conditional_reset);
+
#if IS_ENABLED(CONFIG_SPI)
-/*
+/**
+ * mipi_dbi_spi_cmd_max_speed - get the maximum SPI bus speed
+ * @spi: SPI device
+ * @len: The transfer buffer length.
+ *
* Many controllers have a max speed of 10MHz, but can be pushed way beyond
* that. Increase reliability by running pixel data at max speed and the rest
* at 10MHz, preventing transfer glitches from messing up the init settings.
*/
-static u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
+u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
{
if (len > 64)
return 0; /* use default */
return min_t(u32, 10000000, spi->max_speed_hz);
}
+EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
/*
* MIPI DBI Type C Option 1
@@ -961,10 +1048,6 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
.write = mipi_dbi_debugfs_command_write,
};
-static const struct drm_info_list mipi_dbi_debugfs_list[] = {
- { "fb", drm_fb_cma_debugfs_show, 0 },
-};
-
/**
* mipi_dbi_debugfs_init - Create debugfs entries
* @minor: DRM minor
@@ -987,9 +1070,7 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
debugfs_create_file("command", mode, minor->debugfs_root, mipi,
&mipi_dbi_debugfs_command_fops);
- return drm_debugfs_create_files(mipi_dbi_debugfs_list,
- ARRAY_SIZE(mipi_dbi_debugfs_list),
- minor->debugfs_root, minor);
+ return 0;
}
EXPORT_SYMBOL(mipi_dbi_debugfs_init);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 0a2c60d..a6396ef 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -178,20 +179,16 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
{
struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- struct drm_framebuffer *fb = pipe->plane.fb;
- struct device *dev = tdev->drm->dev;
int ret;
u8 addr_mode;
DRM_DEBUG_KMS("\n");
- mipi_dbi_hw_reset(mipi);
- ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
- if (ret) {
- DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
+ ret = mipi_dbi_poweron_reset(mipi);
+ if (ret)
return;
- }
+ mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
msleep(10);
@@ -240,10 +237,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
- mipi->enabled = true;
-
- if (fb)
- fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+ mipi_dbi_enable_flush(mipi);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -320,7 +314,7 @@ static struct drm_driver st7586_driver = {
DRIVER_ATOMIC,
.fops = &st7586_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = tinydrm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
new file mode 100644
index 0000000..67d197e
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Sitronix ST7735R panels
+ *
+ * Copyright 2017 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+#define ST7735R_FRMCTR1 0xb1
+#define ST7735R_FRMCTR2 0xb2
+#define ST7735R_FRMCTR3 0xb3
+#define ST7735R_INVCTR 0xb4
+#define ST7735R_PWCTR1 0xc0
+#define ST7735R_PWCTR2 0xc1
+#define ST7735R_PWCTR3 0xc2
+#define ST7735R_PWCTR4 0xc3
+#define ST7735R_PWCTR5 0xc4
+#define ST7735R_VMCTR1 0xc5
+#define ST7735R_GAMCTRP1 0xe0
+#define ST7735R_GAMCTRN1 0xe1
+
+#define ST7735R_MY BIT(7)
+#define ST7735R_MX BIT(6)
+#define ST7735R_MV BIT(5)
+
+static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ int ret;
+ u8 addr_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_reset(mipi);
+ if (ret)
+ return;
+
+ msleep(150);
+
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(500);
+
+ mipi_dbi_command(mipi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(mipi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(mipi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
+ 0x2d);
+ mipi_dbi_command(mipi, ST7735R_INVCTR, 0x07);
+ mipi_dbi_command(mipi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
+ mipi_dbi_command(mipi, ST7735R_PWCTR2, 0xc5);
+ mipi_dbi_command(mipi, ST7735R_PWCTR3, 0x0a, 0x00);
+ mipi_dbi_command(mipi, ST7735R_PWCTR4, 0x8a, 0x2a);
+ mipi_dbi_command(mipi, ST7735R_PWCTR5, 0x8a, 0xee);
+ mipi_dbi_command(mipi, ST7735R_VMCTR1, 0x0e);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ST7735R_MX | ST7735R_MY;
+ break;
+ case 90:
+ addr_mode = ST7735R_MX | ST7735R_MV;
+ break;
+ case 180:
+ addr_mode = 0;
+ break;
+ case 270:
+ addr_mode = ST7735R_MY | ST7735R_MV;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(mipi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
+ 0x32, 0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01,
+ 0x03, 0x10);
+ mipi_dbi_command(mipi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
+ 0x2c, 0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00,
+ 0x02, 0x10);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_ENTER_NORMAL_MODE);
+
+ msleep(20);
+
+ mipi_dbi_enable_flush(mipi);
+}
+
+static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
+ .enable = jd_t18003_t01_pipe_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode jd_t18003_t01_mode = {
+ TINYDRM_MODE(128, 160, 28, 35),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
+
+static struct drm_driver st7735r_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &st7735r_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = drm_fb_helper_lastclose,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "st7735r",
+ .desc = "Sitronix ST7735R",
+ .date = "20171128",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id st7735r_of_match[] = {
+ { .compatible = "jianda,jd-t18003-t01" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st7735r_of_match);
+
+static const struct spi_device_id st7735r_id[] = {
+ { "jd-t18003-t01", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, st7735r_id);
+
+static int st7735r_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ mipi->read_commands = NULL;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs,
+ &st7735r_driver, &jd_t18003_t01_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void st7735r_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver st7735r_spi_driver = {
+ .driver = {
+ .name = "st7735r",
+ .owner = THIS_MODULE,
+ .of_match_table = st7735r_of_match,
+ },
+ .id_table = st7735r_id,
+ .probe = st7735r_probe,
+ .shutdown = st7735r_shutdown,
+};
+module_spi_driver(st7735r_spi_driver);
+
+MODULE_DESCRIPTION("Sitronix ST7735R DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 028ab60..7c2485f 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -50,6 +50,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
@@ -64,7 +65,7 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
struct page *page = ttm->pages[i];
if (!page)
- page = ttm->dummy_read_page;
+ page = dummy_read_page;
mem->pages[mem->page_count++] = page;
}
@@ -109,10 +110,9 @@ static struct ttm_backend_func ttm_agp_func = {
.destroy = ttm_agp_destroy,
};
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+ uint32_t page_flags)
{
struct ttm_agp_backend *agp_be;
@@ -124,7 +124,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->bridge = bridge;
agp_be->ttm.func = &ttm_agp_func;
- if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
kfree(agp_be);
return NULL;
}
@@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_agp_tt_create);
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
if (ttm->state != tt_unpopulated)
return 0;
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
EXPORT_SYMBOL(ttm_agp_tt_populate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c088703..98e06f8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,11 +42,6 @@
#include <linux/atomic.h>
#include <linux/reservation.h>
-#define TTM_ASSERT_LOCKED(param)
-#define TTM_DEBUG(fmt, arg...)
-#define TTM_BO_HASH_ORDER 13
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
static struct attribute ttm_bo_count = {
@@ -54,6 +49,12 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
+/* default destructor */
+static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
+{
+ kfree(bo);
+}
+
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
uint32_t *mem_type)
{
@@ -148,15 +149,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->glob->bo_count);
+ atomic_dec(&bo->bdev->glob->bo_count);
dma_fence_put(bo->moving);
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
- if (bo->destroy)
- bo->destroy(bo);
- else {
- kfree(bo);
- }
+ bo->destroy(bo);
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
@@ -165,19 +162,19 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-
BUG_ON(!list_empty(&bo->lru));
man = &bdev->man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap,
- &bo->glob->swap_lru[bo->priority]);
+ &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
@@ -208,70 +205,26 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
- spin_lock(&bo->glob->lru_lock);
+ struct ttm_bo_global *glob = bo->bdev->glob;
+
+ spin_lock(&glob->lru_lock);
ttm_bo_del_from_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
+ spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-/*
- * Call bo->mutex locked.
- */
-static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
- int ret = 0;
- uint32_t page_flags = 0;
-
- TTM_ASSERT_LOCKED(&bo->mutex);
- bo->ttm = NULL;
-
- if (bdev->need_dma32)
- page_flags |= TTM_PAGE_FLAG_DMA32;
-
- switch (bo->type) {
- case ttm_bo_type_device:
- if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
- case ttm_bo_type_kernel:
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
- ret = -ENOMEM;
- break;
- case ttm_bo_type_sg:
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_SG,
- glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL)) {
- ret = -ENOMEM;
- break;
- }
- bo->ttm->sg = bo->sg;
- break;
- default:
- pr_err("Illegal buffer object type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem,
- bool evict, bool interruptible,
- bool no_wait_gpu)
+ struct ttm_mem_reg *mem, bool evict,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -296,7 +249,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (bo->ttm == NULL) {
bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
- ret = ttm_bo_add_ttm(bo, zero);
+ ret = ttm_tt_create(bo, zero);
if (ret)
goto out_err;
}
@@ -306,7 +259,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(bo->ttm, mem);
+ ret = ttm_tt_bind(bo->ttm, mem, ctx);
if (ret)
goto out_err;
}
@@ -325,12 +278,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, ctx, mem);
else if (bdev->driver->move)
- ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_gpu, mem);
+ ret = bdev->driver->move(bo, evict, ctx, mem);
else
- ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -355,13 +307,13 @@ moved:
bo->evicted = false;
}
- if (bo->mem.mm_node) {
+ if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
- bo->cur_placement = bo->mem.placement;
- } else
+ else
bo->offset = 0;
+ ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0;
out_err:
@@ -390,8 +342,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
-
- ww_mutex_unlock (&bo->resv->lock);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -433,7 +383,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
+ struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -448,7 +398,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
}
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, NULL);
+ ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
if (!ret) {
if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
@@ -457,6 +407,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
reservation_object_unlock(&bo->ttm_resv);
ttm_bo_cleanup_memtype_use(bo);
+ reservation_object_unlock(bo->resv);
return;
}
@@ -472,7 +423,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
@@ -487,22 +438,23 @@ error:
}
/**
- * function ttm_bo_cleanup_refs_and_unlock
+ * function ttm_bo_cleanup_refs
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
* Must be called with lru_lock and reservation held, this function
- * will drop both before returning.
+ * will drop the lru lock and optionally the reservation lock before returning.
*
* @interruptible Any sleeps should occur interruptibly.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
+ * @unlock_resv Unlock the reservation lock as well.
*/
-static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait_gpu,
+ bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->glob;
+ struct ttm_bo_global *glob = bo->bdev->glob;
struct reservation_object *resv;
int ret;
@@ -518,7 +470,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
if (ret && !no_wait_gpu) {
long lret;
- ww_mutex_unlock(&bo->resv->lock);
+
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
lret = reservation_object_wait_timeout_rcu(resv, true,
@@ -531,24 +485,24 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return -EBUSY;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, NULL);
-
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- if (ret) {
+ if (unlock_resv && !reservation_object_trylock(bo->resv)) {
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
spin_unlock(&glob->lru_lock);
return 0;
}
+ ret = 0;
}
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- __ttm_bo_unreserve(bo);
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -560,6 +514,9 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
+ if (unlock_resv)
+ reservation_object_unlock(bo->resv);
+
return 0;
}
@@ -567,60 +524,44 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
* encountered buffers.
*/
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry = NULL;
- int ret = 0;
+ struct list_head removed;
+ bool empty;
- spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- goto out_unlock;
+ INIT_LIST_HEAD(&removed);
- entry = list_first_entry(&bdev->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&entry->list_kref);
-
- for (;;) {
- struct ttm_buffer_object *nentry = NULL;
+ spin_lock(&glob->lru_lock);
+ while (!list_empty(&bdev->ddestroy)) {
+ struct ttm_buffer_object *bo;
- if (entry->ddestroy.next != &bdev->ddestroy) {
- nentry = list_first_entry(&entry->ddestroy,
- struct ttm_buffer_object, ddestroy);
- kref_get(&nentry->list_kref);
- }
+ bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
+ ddestroy);
+ kref_get(&bo->list_kref);
+ list_move_tail(&bo->ddestroy, &removed);
- ret = __ttm_bo_reserve(entry, false, true, NULL);
- if (remove_all && ret) {
+ if (remove_all || bo->resv != &bo->ttm_resv) {
spin_unlock(&glob->lru_lock);
- ret = __ttm_bo_reserve(entry, false, false, NULL);
+ reservation_object_lock(bo->resv, NULL);
+
spin_lock(&glob->lru_lock);
- }
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- if (!ret)
- ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
- !remove_all);
- else
+ } else if (reservation_object_trylock(bo->resv)) {
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ } else {
spin_unlock(&glob->lru_lock);
+ }
- kref_put(&entry->list_kref, ttm_bo_release_list);
- entry = nentry;
-
- if (ret || !entry)
- goto out;
-
+ kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock);
- if (list_empty(&entry->ddestroy))
- break;
}
-
-out_unlock:
+ list_splice_tail(&removed, &bdev->ddestroy);
+ empty = list_empty(&bdev->ddestroy);
spin_unlock(&glob->lru_lock);
-out:
- if (entry)
- kref_put(&entry->list_kref, ttm_bo_release_list);
- return ret;
+
+ return empty;
}
static void ttm_bo_delayed_workqueue(struct work_struct *work)
@@ -628,10 +569,9 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
struct ttm_bo_device *bdev =
container_of(work, struct ttm_bo_device, wq.work);
- if (ttm_bo_delayed_delete(bdev, false)) {
+ if (!ttm_bo_delayed_delete(bdev, false))
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
- }
}
static void ttm_bo_release(struct kref *kref)
@@ -672,26 +612,34 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_gpu)
+static int ttm_bo_evict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
+
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+
+ if (!placement.num_placement && !placement.num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
- placement.num_placement = 0;
- placement.num_busy_placement = 0;
- bdev->driver->evict_flags(bo, &placement);
- ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -701,8 +649,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -728,49 +675,79 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+/**
+ * Check the target bo is allowable to be evicted or swapout, including cases:
+ *
+ * a. if share same reservation object with ctx->resv, have assumption
+ * reservation objects should already be locked, so not lock again and
+ * return true directly when either the opreation allow_reserved_eviction
+ * or the target bo already is in delayed free list;
+ *
+ * b. Otherwise, trylock it.
+ */
+static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx, bool *locked)
+{
+ bool ret = false;
+
+ *locked = false;
+ if (bo->resv == ctx->resv) {
+ reservation_object_assert_held(bo->resv);
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
+ || !list_empty(&bo->ddestroy))
+ ret = true;
+ } else {
+ *locked = reservation_object_trylock(bo->resv);
+ ret = *locked;
+ }
+
+ return ret;
+}
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- uint32_t mem_type,
- const struct ttm_place *place,
- bool interruptible,
- bool no_wait_gpu)
+ uint32_t mem_type,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_buffer_object *bo;
- int ret = -EBUSY;
+ struct ttm_buffer_object *bo = NULL;
+ bool locked = false;
unsigned i;
+ int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (ret)
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
continue;
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
+ if (locked)
+ reservation_object_unlock(bo->resv);
continue;
}
-
break;
}
- if (!ret)
+ /* If the inner loop terminated early, we have our candidate */
+ if (&bo->lru != &man->lru[i])
break;
+
+ bo = NULL;
}
- if (ret) {
+ if (!bo) {
spin_unlock(&glob->lru_lock);
- return ret;
+ return -EBUSY;
}
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
+ ctx->no_wait_gpu, locked);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -778,10 +755,14 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- BUG_ON(ret != 0);
-
- ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
- ttm_bo_unreserve(bo);
+ ret = ttm_bo_evict(bo, ctx);
+ if (locked) {
+ ttm_bo_unreserve(bo);
+ } else {
+ spin_lock(&glob->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -832,8 +813,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -845,8 +825,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place,
- interruptible, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -909,8 +888,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -1004,8 +982,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1024,14 +1001,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
int ret = 0;
struct ttm_mem_reg mem;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1041,12 +1017,10 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1097,20 +1071,18 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement,
EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
int ret;
uint32_t new_flags;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
/*
* Check whether we need to move buffer.
*/
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
} else {
@@ -1125,7 +1097,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* We might need to add a TTM.
*/
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- ret = ttm_bo_add_ttm(bo, true);
+ ret = ttm_tt_create(bo, true);
if (ret)
return ret;
}
@@ -1139,8 +1111,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- bool interruptible,
- struct file *persistent_swap_storage,
+ struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg,
struct reservation_object *resv,
@@ -1151,7 +1122,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
@@ -1171,7 +1142,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
- bo->destroy = destroy;
+ bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
kref_init(&bo->list_kref);
@@ -1182,7 +1153,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->io_reserve_lru);
mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
- bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
bo->mem.size = num_pages << PAGE_SHIFT;
@@ -1194,17 +1164,16 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.bus.io_reserved_count = 0;
bo->moving = NULL;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
- bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
if (resv) {
bo->resv = resv;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
} else {
bo->resv = &bo->ttm_resv;
}
reservation_object_init(&bo->ttm_resv);
- atomic_inc(&bo->glob->bo_count);
+ atomic_inc(&bo->bdev->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
@@ -1221,12 +1190,12 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = ww_mutex_trylock(&bo->resv->lock);
+ locked = reservation_object_trylock(bo->resv);
WARN_ON(!locked);
}
if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, ctx);
if (unlikely(ret)) {
if (!resv)
@@ -1237,9 +1206,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bo->glob->lru_lock);
+ spin_lock(&bdev->glob->lru_lock);
ttm_bo_add_to_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
+ spin_unlock(&bdev->glob->lru_lock);
}
return ret;
@@ -1253,17 +1222,16 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
- struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
- page_alignment, interruptible,
- persistent_swap_storage, acc_size,
+ page_alignment, &ctx, acc_size,
sg, resv, destroy);
if (ret)
return ret;
@@ -1309,7 +1277,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
- struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
@@ -1322,7 +1289,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- interruptible, persistent_swap_storage, acc_size,
+ interruptible, acc_size,
NULL, NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1334,6 +1301,11 @@ EXPORT_SYMBOL(ttm_bo_create);
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
unsigned mem_type)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .flags = TTM_OPT_FLAG_FORCE_ALLOC
+ };
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence;
@@ -1348,7 +1320,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1469,7 +1441,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
- ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
__free_page(glob->dummy_read_page);
kfree(glob);
}
@@ -1494,6 +1465,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1504,14 +1476,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
-
- ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
- ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
- if (unlikely(ret != 0)) {
- pr_err("Could not register buffer object swapout\n");
- goto out_no_shrink;
- }
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
@@ -1519,8 +1483,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
-out_no_shrink:
- __free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
@@ -1554,16 +1516,13 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
cancel_delayed_work_sync(&bdev->wq);
- while (ttm_bo_delayed_delete(bdev, true))
- ;
+ if (ttm_bo_delayed_delete(bdev, true))
+ pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
- if (list_empty(&bdev->ddestroy))
- TTM_DEBUG("Delayed destroy list was clean\n");
-
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&bdev->man[0].lru[0]))
- TTM_DEBUG("Swap list %d was clean\n", i);
+ pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
@@ -1706,21 +1665,20 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_global *glob =
- container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
+ bool locked;
unsigned i;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- ret = __ttm_bo_reserve(bo, false, true, NULL);
- if (!ret)
+ if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
+ ret = 0;
break;
+ }
}
if (!ret)
break;
@@ -1734,7 +1692,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1748,6 +1706,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
if (bo->mem.mem_type != TTM_PL_SYSTEM ||
bo->ttm->caching_state != tt_cached) {
+ struct ttm_operation_ctx ctx = { false, false };
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
@@ -1755,8 +1714,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
evict_mem.mem_type = TTM_PL_SYSTEM;
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
if (unlikely(ret != 0))
goto out;
}
@@ -1787,15 +1745,21 @@ out:
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
-
- __ttm_bo_unreserve(bo);
+ if (locked)
+ reservation_object_unlock(bo->resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
- while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
@@ -1822,10 +1786,12 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
- ret = __ttm_bo_reserve(bo, true, false, NULL);
+ ret = reservation_object_lock_interruptible(bo->resv, NULL);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
if (unlikely(ret != 0))
goto out_unlock;
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
out_unlock:
mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e7a519f..2ebbae6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -53,7 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
@@ -73,7 +73,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(ttm, new_mem);
+ ret = ttm_tt_bind(ttm, new_mem, ctx);
if (unlikely(ret != 0))
return ret;
}
@@ -255,6 +255,54 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
+#ifdef CONFIG_X86
+#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
+#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
+#else
+#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
+#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
+#endif
+
+
+/**
+ * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
+ * specified page protection.
+ *
+ * @page: The page to map.
+ * @prot: The page protection.
+ *
+ * This function maps a TTM page using the kmap_atomic api if available,
+ * otherwise falls back to vmap. The user must make sure that the
+ * specified page does not have an aliased mapping with a different caching
+ * policy unless the architecture explicitly allows it. Also mapping and
+ * unmapping using this api must be correctly nested. Unmapping should
+ * occur in the reverse order of mapping.
+ */
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ return kmap_atomic(page);
+ else
+ return __ttm_kmap_atomic_prot(page, prot);
+}
+EXPORT_SYMBOL(ttm_kmap_atomic_prot);
+
+/**
+ * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
+ * ttm_kmap_atomic_prot.
+ *
+ * @addr: The virtual address from the map.
+ * @prot: The page protection.
+ */
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
+{
+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ kunmap_atomic(addr);
+ else
+ __ttm_kunmap_atomic(addr);
+}
+EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
+
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -266,28 +314,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-
-#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- dst = vmap(&d, 1, 0, prot);
- else
- dst = kmap(d);
-#endif
+ dst = ttm_kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(dst);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(dst);
- else
- kunmap(d);
-#endif
+ ttm_kunmap_atomic_prot(dst, prot);
return 0;
}
@@ -303,33 +336,19 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- src = vmap(&s, 1, 0, prot);
- else
- src = kmap(s);
-#endif
+ src = ttm_kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(src);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(src);
- else
- kunmap(s);
-#endif
+ ttm_kunmap_atomic_prot(src, prot);
return 0;
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -345,7 +364,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -375,8 +394,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
/*
* TTM might be null for moves within the same region.
*/
- if (ttm && ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ttm) {
+ ret = ttm_tt_populate(ttm, ctx);
if (ret)
goto out1;
}
@@ -402,8 +421,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
- } else
+ } else {
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ }
if (ret)
goto out1;
}
@@ -469,7 +489,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->glob->bo_count);
+ atomic_inc(&bo->bdev->glob->bo_count);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
@@ -485,7 +505,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->acc_size = 0;
fbo->resv = &fbo->ttm_resv;
reservation_object_init(fbo->resv);
- ret = ww_mutex_trylock(&fbo->resv->lock);
+ ret = reservation_object_trylock(fbo->resv);
WARN_ON(!ret);
*new_obj = fbo;
@@ -545,17 +565,20 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
int ret;
BUG_ON(!ttm);
- if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
- return ret;
- }
+ ret = ttm_tt_populate(ttm, &ctx);
+ if (ret)
+ return ret;
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
@@ -797,3 +820,27 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_pipeline_move);
+
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
+{
+ struct ttm_buffer_object *ghost;
+ int ret;
+
+ ret = ttm_buffer_object_transfer(bo, &ghost);
+ if (ret)
+ return ret;
+
+ ret = reservation_object_copy_fences(ghost->resv, bo->resv);
+ /* Last resort, wait for the BO to be idle when we are OOM */
+ if (ret)
+ ttm_bo_wait(bo, false, false);
+
+ memset(&bo->mem, 0, sizeof(bo->mem));
+ bo->mem.mem_type = TTM_PL_SYSTEM;
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost);
+ ttm_bo_unref(&ghost);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c8ebb75..8eba95b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -92,6 +92,18 @@ out_unlock:
return ret;
}
+static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ if (bdev->driver->io_mem_pfn)
+ return bdev->driver->io_mem_pfn(bo, page_offset);
+
+ return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ + page_offset;
+}
+
static int ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -106,7 +118,6 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
int ret;
int i;
unsigned long address = vmf->address;
- int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
struct vm_area_struct cvma;
@@ -146,7 +157,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- retval = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -157,10 +168,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
- retval = VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
goto out_unlock;
default:
- retval = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
@@ -171,12 +182,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
- retval = ret;
-
- if (retval == VM_FAULT_RETRY &&
+ if (ret == VM_FAULT_RETRY &&
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* The BO has already been unreserved. */
- return retval;
+ return ret;
}
goto out_unlock;
@@ -184,12 +193,12 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
- retval = VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
ret = ttm_mem_io_reserve_vm(bo);
if (unlikely(ret != 0)) {
- retval = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -199,7 +208,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
- retval = VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -215,13 +224,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
} else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .flags = TTM_OPT_FLAG_FORCE_ALLOC
+
+ };
+
ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
/* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
- retval = VM_FAULT_OOM;
+ if (ttm_tt_populate(ttm, &ctx)) {
+ ret = VM_FAULT_OOM;
goto out_io_unlock;
}
}
@@ -234,16 +250,15 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (bo->mem.bus.is_iomem) {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
- pfn = bdev->driver->io_mem_pfn(bo, page_offset);
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
- retval = VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
goto out_io_unlock;
} else if (unlikely(!page)) {
break;
}
- page->mapping = vma->vm_file->f_mapping;
page->index = drm_vma_node_start(&bo->vma_node) +
page_offset;
pfn = page_to_pfn(page);
@@ -263,7 +278,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
break;
else if (unlikely(ret != 0)) {
- retval =
+ ret =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_io_unlock;
}
@@ -272,11 +287,12 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (unlikely(++page_offset >= page_last))
break;
}
+ ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
out_unlock:
ttm_bo_unreserve(bo);
- return retval;
+ return ret;
}
static void ttm_bo_vm_open(struct vm_area_struct *vma)
@@ -299,7 +315,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
- void *buf, int len, int write)
+ uint8_t *buf, int len, int write)
{
unsigned long page = offset >> PAGE_SHIFT;
unsigned long bytes_left = len;
@@ -328,6 +344,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
ttm_bo_kunmap(&map);
page++;
+ buf += bytes;
bytes_left -= bytes;
offset = 0;
} while (bytes_left);
@@ -404,14 +421,6 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
-unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
- unsigned long page_offset)
-{
- return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
- + page_offset;
-}
-EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
-
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 5e1bcab..3dca206e 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -38,7 +38,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
}
@@ -62,14 +62,14 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
return;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->glob;
+ glob = entry->bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_add_to_lru(bo);
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
@@ -102,7 +102,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
return 0;
entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->glob;
+ glob = entry->bo->bdev->glob;
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -112,7 +112,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
ret = -EBUSY;
@@ -139,12 +139,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
*/
ttm_eu_backoff_reservation_reverse(list, entry);
- if (ret == -EDEADLK && intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- } else if (ret == -EDEADLK) {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
- ret = 0;
+ if (ret == -EDEADLK) {
+ if (intr) {
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+ } else {
+ ww_mutex_lock_slow(&bo->resv->lock, ticket);
+ ret = 0;
+ }
}
if (!ret && entry->shared)
@@ -192,7 +194,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
driver = bdev->driver;
- glob = bo->glob;
+ glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
@@ -203,7 +205,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
else
reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo);
- __ttm_bo_unreserve(bo);
+ reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
if (ticket)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e963749..27856c5 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/swap.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
@@ -166,6 +167,54 @@ static struct kobj_type ttm_mem_zone_kobj_type = {
.default_attrs = ttm_mem_zone_attrs,
};
+static struct attribute ttm_mem_global_lower_mem_limit = {
+ .name = "lower_mem_limit",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static ssize_t ttm_mem_global_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_mem_global *glob =
+ container_of(kobj, struct ttm_mem_global, kobj);
+ uint64_t val = 0;
+
+ spin_lock(&glob->lock);
+ val = glob->lower_mem_limit;
+ spin_unlock(&glob->lock);
+ /* convert from number of pages to KB */
+ val <<= (PAGE_SHIFT - 10);
+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ (unsigned long long) val);
+}
+
+static ssize_t ttm_mem_global_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ int chars;
+ uint64_t val64;
+ unsigned long val;
+ struct ttm_mem_global *glob =
+ container_of(kobj, struct ttm_mem_global, kobj);
+
+ chars = sscanf(buffer, "%lu", &val);
+ if (chars == 0)
+ return size;
+
+ val64 = val;
+ /* convert from KB to number of pages */
+ val64 >>= (PAGE_SHIFT - 10);
+
+ spin_lock(&glob->lock);
+ glob->lower_mem_limit = val64;
+ spin_unlock(&glob->lock);
+
+ return size;
+}
+
static void ttm_mem_global_kobj_release(struct kobject *kobj)
{
struct ttm_mem_global *glob =
@@ -174,8 +223,20 @@ static void ttm_mem_global_kobj_release(struct kobject *kobj)
kfree(glob);
}
+static struct attribute *ttm_mem_global_attrs[] = {
+ &ttm_mem_global_lower_mem_limit,
+ NULL
+};
+
+static const struct sysfs_ops ttm_mem_global_ops = {
+ .show = &ttm_mem_global_show,
+ .store = &ttm_mem_global_store,
+};
+
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
+ .sysfs_ops = &ttm_mem_global_ops,
+ .default_attrs = ttm_mem_global_attrs,
};
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
@@ -211,35 +272,33 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
*/
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra)
+ uint64_t extra, struct ttm_operation_ctx *ctx)
{
int ret;
- struct ttm_mem_shrink *shrink;
spin_lock(&glob->lock);
- if (glob->shrink == NULL)
- goto out;
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- shrink = glob->shrink;
spin_unlock(&glob->lock);
- ret = shrink->do_shrink(shrink);
+ ret = ttm_bo_swapout(glob->bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
- goto out;
+ break;
}
-out:
+
spin_unlock(&glob->lock);
}
-
-
static void ttm_shrink_work(struct work_struct *work)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work);
- ttm_shrink(glob, true, 0ULL);
+ ttm_shrink(glob, true, 0ULL, &ctx);
}
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
@@ -377,6 +436,9 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
si_meminfo(&si);
+ /* set it as 0 by default to keep original behavior of OOM */
+ glob->lower_mem_limit = 0;
+
ret = ttm_mem_init_kernel_zone(glob, &si);
if (unlikely(ret != 0))
goto out_no_zone;
@@ -471,6 +533,35 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
}
EXPORT_SYMBOL(ttm_mem_global_free);
+/*
+ * check if the available mem is under lower memory limit
+ *
+ * a. if no swap disk at all or free swap space is under swap_mem_limit
+ * but available system mem is bigger than sys_mem_limit, allow TTM
+ * allocation;
+ *
+ * b. if the available system mem is less than sys_mem_limit but free
+ * swap disk is bigger than swap_mem_limit, allow TTM allocation.
+ */
+bool
+ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
+ uint64_t num_pages,
+ struct ttm_operation_ctx *ctx)
+{
+ int64_t available;
+
+ if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
+ return false;
+
+ available = get_nr_swap_pages() + si_mem_available();
+ available -= num_pages;
+ if (available < glob->lower_mem_limit)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ttm_check_under_lowerlimit);
+
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve)
@@ -514,7 +605,7 @@ out_unlock:
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
- bool no_wait, bool interruptible)
+ struct ttm_operation_ctx *ctx)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
@@ -522,33 +613,32 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
single_zone,
memory, true)
!= 0)) {
- if (no_wait)
+ if (ctx->no_wait_gpu)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+ ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
}
return 0;
}
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible)
+ struct ttm_operation_ctx *ctx)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
- return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
- interruptible);
+ return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size)
+ struct page *page, uint64_t size,
+ struct ttm_operation_ctx *ctx)
{
-
struct ttm_mem_zone *zone = NULL;
/**
@@ -563,7 +653,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
+ return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 26a7ad0..1aa2baa 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -325,6 +325,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
@@ -350,7 +354,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -EPERM;
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- false, false);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
@@ -686,7 +690,10 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
@@ -696,7 +703,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
* Need to create a new dma_buf, with memory accounting.
*/
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
mutex_unlock(&prime->mutex);
goto out_unref;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 5d252fb..f0481b7 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -477,12 +477,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return count;
}
-static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
+ return register_shrinker(&manager->mm_shrink);
}
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -741,6 +741,9 @@ out:
if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
+ if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
@@ -893,6 +896,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
+ if (flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
@@ -1034,15 +1040,18 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
- }
-
- ttm_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
+ ret = ttm_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
return 0;
+
+error:
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+ return ret;
}
void ttm_page_alloc_fini(void)
@@ -1060,27 +1069,52 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
-int ttm_pool_populate(struct ttm_tt *ttm)
+static void
+ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
+{
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ unsigned i;
+
+ if (mem_count_update == 0)
+ goto put_pages;
+
+ for (i = 0; i < mem_count_update; ++i) {
+ if (!ttm->pages[i])
+ continue;
+
+ ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
+ }
+
+put_pages:
+ ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
+ ttm->state = tt_unpopulated;
+}
+
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned i;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
+ if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
+ return -ENOMEM;
+
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
ttm->caching_state);
if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
+ ttm_pool_unpopulate_helper(ttm, 0);
return ret;
}
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- PAGE_SIZE);
+ PAGE_SIZE, ctx);
if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
+ ttm_pool_unpopulate_helper(ttm, i);
return -ENOMEM;
}
}
@@ -1100,27 +1134,17 @@ EXPORT_SYMBOL(ttm_pool_populate);
void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- if (!ttm->pages[i])
- continue;
-
- ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
- PAGE_SIZE);
- }
- ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state);
- ttm->state = tt_unpopulated;
+ ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+ struct ttm_operation_ctx *ctx)
{
unsigned i, j;
int r;
- r = ttm_pool_populate(&tt->ttm);
+ r = ttm_pool_populate(&tt->ttm, ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 6b2627f..8a25d19 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -61,6 +61,7 @@
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
#define VADDR_FLAG_HUGE_POOL 1UL
+#define VADDR_FLAG_UPDATED_COUNT 2UL
enum pool_type {
IS_UNDEFINED = 0,
@@ -209,6 +210,7 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
+
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
@@ -216,11 +218,11 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
- if (attr == &ttm_page_pool_max)
+ if (attr == &ttm_page_pool_max) {
m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
+ } else if (attr == &ttm_page_pool_small) {
m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
+ } else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
@@ -333,14 +335,18 @@ static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
+ unsigned long attrs = 0;
void *vaddr;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
- vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
- pool->gfp_flags);
+ if (pool->type & IS_HUGE)
+ attrs = DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
+ pool->gfp_flags, attrs);
if (vaddr) {
if (is_vmalloc_addr(vaddr))
d_page->p = vmalloc_to_page(vaddr);
@@ -384,14 +390,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
struct page *page = d_page->p;
unsigned i, num_pages;
- int ret;
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED)) {
num_pages = pool->size / PAGE_SIZE;
for (i = 0; i < num_pages; ++i, ++page) {
- ret = set_pages_array_wb(&page, 1);
- if (ret) {
+ if (set_pages_array_wb(&page, 1)) {
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, 1);
}
@@ -676,10 +680,10 @@ err_mem:
static struct dma_pool *ttm_dma_find_pool(struct device *dev,
enum pool_type type)
{
- struct dma_pool *pool, *tmp, *found = NULL;
+ struct dma_pool *pool, *tmp;
if (type == IS_UNDEFINED)
- return found;
+ return NULL;
/* NB: We iterate on the 'struct dev' which has no spinlock, but
* it does have a kref which we have taken. The kref is taken during
@@ -692,13 +696,10 @@ static struct dma_pool *ttm_dma_find_pool(struct device *dev,
* thing is at that point of time there are no pages associated with the
* driver so this function will not be called.
*/
- list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- found = pool;
- break;
- }
- return found;
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
+ if (pool->type == type)
+ return pool;
+ return NULL;
}
/*
@@ -760,10 +761,9 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
return -ENOMEM;
}
- if (count > 1) {
+ if (count > 1)
pr_debug("%s: (%s:%d) Getting %d pages\n",
pool->dev_name, pool->name, current->pid, count);
- }
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
@@ -870,18 +870,18 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
}
/*
- * @return count of pages still required to fulfill the request.
* The populate list is actually a stack (not that is matters as TTM
* allocates one page at a time.
+ * return dma_page pointer if success, otherwise NULL.
*/
-static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma,
unsigned index)
{
- struct dma_page *d_page;
+ struct dma_page *d_page = NULL;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
- int count, r = -ENOMEM;
+ int count;
spin_lock_irqsave(&pool->lock, irq_flags);
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
@@ -890,12 +890,11 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- r = 0;
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return r;
+ return d_page;
}
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
@@ -916,6 +915,9 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
gfp_flags &= ~__GFP_COMP;
}
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
return gfp_flags;
}
@@ -923,12 +925,14 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
+ struct dma_page *d_page;
enum pool_type type;
unsigned i;
int ret;
@@ -936,6 +940,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (ttm->state != tt_unpopulated)
return 0;
+ if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ttm_dma->pages_list);
i = 0;
@@ -957,17 +964,18 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
while (num_pages >= HPAGE_PMD_NR) {
unsigned j;
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0)
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1;
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
@@ -991,19 +999,20 @@ skip_huge:
}
while (num_pages) {
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0) {
+ d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (!d_page) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+ d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
++i;
--num_pages;
}
@@ -1025,6 +1034,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -1044,8 +1054,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
continue;
count++;
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(mem_glob, d_page->p,
+ pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
ttm_dma_page_put(pool, d_page);
}
@@ -1065,9 +1078,19 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* make sure pages array match list and count number of pages */
count = 0;
- list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ page_list) {
ttm->pages[count] = d_page->p;
count++;
+
+ if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
+ ttm_mem_global_free_page(mem_glob, d_page->p,
+ pool->size);
+ d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
+ }
+
+ if (is_cached)
+ ttm_dma_page_put(pool, d_page);
}
spin_lock_irqsave(&pool->lock, irq_flags);
@@ -1087,19 +1110,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (is_cached) {
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p, pool->size);
- ttm_dma_page_put(pool, d_page);
- }
- } else {
- for (i = 0; i < count; i++) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i], pool->size);
- }
- }
-
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
@@ -1177,12 +1187,12 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
return count;
}
-static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
+ return register_shrinker(&manager->mm_shrink);
}
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
@@ -1192,7 +1202,7 @@ static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
- int ret = -ENOMEM;
+ int ret;
WARN_ON(_manager);
@@ -1200,7 +1210,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
- goto err;
+ return -ENOMEM;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
@@ -1212,13 +1222,17 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
/* This takes care of auto-freeing the _manager */
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "dma_pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- goto err;
- }
- ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
+
+ ret = ttm_dma_pool_mm_shrink_init(_manager);
+ if (unlikely(ret != 0))
+ goto error;
return 0;
-err:
+
+error:
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
return ret;
}
@@ -1244,15 +1258,12 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct device_pools *p;
struct dma_pool *pool = NULL;
- char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
- "name", "virt", "busaddr"};
if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
- seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
- h[0], h[1], h[2], h[3], h[4], h[5]);
+ seq_printf(m, " pool refills pages freed inuse available name\n");
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools) {
struct device *dev = p->dev;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 8ebc8d3..7e672be 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,38 +31,87 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/sched.h>
-#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
/**
+ * Allocates a ttm structure for the given BO.
+ */
+int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ uint32_t page_flags = 0;
+
+ reservation_object_assert_held(bo->resv);
+
+ if (bdev->need_dma32)
+ page_flags |= TTM_PAGE_FLAG_DMA32;
+
+ if (bdev->no_retry)
+ page_flags |= TTM_PAGE_FLAG_NO_RETRY;
+
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ break;
+ case ttm_bo_type_kernel:
+ break;
+ case ttm_bo_type_sg:
+ page_flags |= TTM_PAGE_FLAG_SG;
+ break;
+ default:
+ bo->ttm = NULL;
+ pr_err("Illegal buffer object type\n");
+ return -EINVAL;
+ }
+
+ bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
+ if (unlikely(bo->ttm == NULL))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
* Allocates storage for pointers to the pages that back the ttm.
*/
-static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->pages)
+ return -ENOMEM;
+ return 0;
}
-static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
sizeof(*ttm->ttm.pages) +
sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->ttm.pages)
+ return -ENOMEM;
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+ return 0;
+}
+
+static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+ ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->dma_address)
+ return -ENOMEM;
+ return 0;
}
#ifdef CONFIG_X86
@@ -184,21 +233,24 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm->func->destroy(ttm);
}
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
- ttm->bdev = bdev;
- ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->bdev = bo->bdev;
+ ttm->num_pages = bo->num_pages;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
- ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
ttm->swap_storage = NULL;
+ ttm->sg = bo->sg;
+}
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ ttm_tt_init_fields(ttm, bo, page_flags);
+
+ if (ttm_tt_alloc_page_directory(ttm)) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -214,24 +266,15 @@ void ttm_tt_fini(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- ttm->bdev = bdev;
- ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
- ttm->page_flags = page_flags;
- ttm->dummy_read_page = dummy_read_page;
- ttm->state = tt_unpopulated;
- ttm->swap_storage = NULL;
+ ttm_tt_init_fields(ttm, bo, page_flags);
INIT_LIST_HEAD(&ttm_dma->pages_list);
- ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages) {
+ if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -240,11 +283,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_dma_tt_init);
+int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ int ret;
+
+ ttm_tt_init_fields(ttm, bo, page_flags);
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ if (page_flags & TTM_PAGE_FLAG_SG)
+ ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ else
+ ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (ret) {
+ ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_sg_tt_init);
+
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm_dma->dma_address);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}
@@ -261,7 +329,8 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
}
}
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+ struct ttm_operation_ctx *ctx)
{
int ret = 0;
@@ -271,7 +340,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm_tt_populate(ttm, ctx);
if (ret)
return ret;
@@ -300,7 +369,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
- from_page = shmem_read_mapping_page(swap_space, i);
+ gfp_t gfp_mask = mapping_gfp_mask(swap_space);
+
+ gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
+ from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
+
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
@@ -343,16 +416,22 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
- } else
+ } else {
swap_storage = persistent_swap_storage;
+ }
swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
+ gfp_t gfp_mask = mapping_gfp_mask(swap_space);
+
+ gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
+
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = shmem_read_mapping_page(swap_space, i);
+
+ to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
@@ -377,6 +456,33 @@ out_err:
return ret;
}
+static void ttm_tt_add_mapping(struct ttm_tt *ttm)
+{
+ pgoff_t i;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
+}
+
+int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ if (ttm->bdev->driver->ttm_tt_populate)
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
+ else
+ ret = ttm_pool_populate(ttm, ctx);
+ if (!ret)
+ ttm_tt_add_mapping(ttm);
+ return ret;
+}
+
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
{
pgoff_t i;
@@ -397,5 +503,8 @@ void ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
ttm_tt_clear_mapping(ttm);
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ if (ttm->bdev->driver->ttm_tt_unpopulate)
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ else
+ ttm_pool_unpopulate(ttm);
}
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 2c668bd..db397fc 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -273,16 +273,20 @@ static void tve200_display_update(struct drm_simple_display_pipe *pipe,
}
}
-int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc)
+static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
-void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc)
+static void tve200_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
writel(0, priv->regs + TVE200_INT_EN);
@@ -300,6 +304,8 @@ static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
.disable = tve200_display_disable,
.update = tve200_display_update,
.prepare_fb = tve200_display_prepare_fb,
+ .enable_vblank = tve200_display_enable_vblank,
+ .disable_vblank = tve200_display_disable_vblank,
};
int tve200_display_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
index 628b793..1ba4380 100644
--- a/drivers/gpu/drm/tve200/tve200_drm.h
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -103,7 +103,6 @@ struct tve200_drm_dev_private {
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
- struct drm_fbdev_cma *fbdev;
void *regs;
struct clk *pclk;
@@ -114,8 +113,6 @@ struct tve200_drm_dev_private {
container_of(x, struct tve200_drm_connector, connector)
int tve200_display_init(struct drm_device *dev);
-int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc);
-void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc);
irqreturn_t tve200_irq(int irq, void *data);
int tve200_connector_init(struct drm_device *dev);
int tve200_encoder_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index bd6c945..ac344dd 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -46,6 +46,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
@@ -130,8 +131,7 @@ static int tve200_modeset_init(struct drm_device *dev)
* Passing in 16 here will make the RGB656 mode the default
* Passing in 32 will use XRGB8888 mode
*/
- priv->fbdev = drm_fbdev_cma_init(dev, 16,
- dev->mode_config.num_connector);
+ drm_fb_cma_fbdev_init(dev, 16, 0);
drm_kms_helper_poll_init(dev);
goto finish;
@@ -146,17 +146,10 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
-static void tve200_lastclose(struct drm_device *dev)
-{
- struct tve200_drm_dev_private *priv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static struct drm_driver tve200_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = tve200_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
.name = "tve200",
@@ -169,9 +162,6 @@ static struct drm_driver tve200_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .enable_vblank = tve200_enable_vblank,
- .disable_vblank = tve200_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
@@ -270,8 +260,7 @@ static int tve200_remove(struct platform_device *pdev)
struct tve200_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b5b335c..2ebdc6d 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long offset;
unsigned long page, pos;
- if (offset + size > info->fix.smem_len)
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index f5500df..4a3a868 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -15,6 +15,7 @@ vc4-y := \
vc4_vec.o \
vc4_hvs.o \
vc4_irq.o \
+ vc4_perfmon.o \
vc4_plane.o \
vc4_render_cl.o \
vc4_trace_points.o \
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ce1e3b9..bf46674 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -643,9 +643,12 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
+ struct vc4_plane_state *vc4_plane_state;
bool debug_dump_regs = false;
+ bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
@@ -656,6 +659,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
/* Copy all the active planes' dlist contents to the hardware dlist. */
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ /* Is this the first active plane? */
+ if (dlist_next == dlist_start) {
+ /* We need to enable background fill when a plane
+ * could be alpha blending from the background, i.e.
+ * where no other plane is underneath. It suffices to
+ * consider the first active plane here since we set
+ * needs_bg_fill such that either the first plane
+ * already needs it or all planes on top blend from
+ * the first or a lower plane.
+ */
+ vc4_plane_state = to_vc4_plane_state(plane->state);
+ enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ }
+
dlist_next += vc4_plane_write_dlist(plane, dlist_next);
}
@@ -664,6 +681,14 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
+ if (enable_bg_fill)
+ /* This sets a black background color fill, as is the case
+ * with other DRM drivers.
+ */
+ HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
+ HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) |
+ SCALER_DISPBKGND_FILL);
+
/* Only update DISPLIST if the CRTC was already running and is not
* being disabled.
* vc4_crtc_enable() takes care of updating the dlist just after
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index e3c2972..94b99c9 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -101,6 +101,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
case DRM_VC4_PARAM_SUPPORTS_MADVISE:
+ case DRM_VC4_PARAM_SUPPORTS_PERFMON:
args->value = true;
break;
default:
@@ -111,11 +112,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void vc4_lastclose(struct drm_device *dev)
+static int vc4_open(struct drm_device *dev, struct drm_file *file)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_file *vc4file;
+
+ vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
+ if (!vc4file)
+ return -ENOMEM;
+
+ vc4_perfmon_open_file(vc4file);
+ file->driver_priv = vc4file;
+ return 0;
+}
+
+static void vc4_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct vc4_file *vc4file = file->driver_priv;
- drm_fbdev_cma_restore_mode(vc4->fbdev);
+ vc4_perfmon_close_file(vc4file);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -150,6 +164,9 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_PERFMON_CREATE, vc4_perfmon_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_PERFMON_DESTROY, vc4_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -159,7 +176,9 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_HAVE_IRQ |
DRIVER_RENDER |
DRIVER_PRIME),
- .lastclose = vc4_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
+ .open = vc4_open,
+ .postclose = vc4_close,
.irq_handler = vc4_irq,
.irq_preinstall = vc4_irq_preinstall,
.irq_postinstall = vc4_irq_postinstall,
@@ -301,12 +320,10 @@ static void vc4_drm_unbind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = platform_get_drvdata(pdev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
- if (vc4->fbdev)
- drm_fbdev_cma_fini(vc4->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 9c0d380..1b4cd1f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -11,6 +11,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+#include "uapi/drm/vc4_drm.h"
+
/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
* this.
*/
@@ -29,6 +31,36 @@ enum vc4_kernel_bo_type {
VC4_BO_TYPE_COUNT
};
+/* Performance monitor object. The perform lifetime is controlled by userspace
+ * using perfmon related ioctls. A perfmon can be attached to a submit_cl
+ * request, and when this is the case, HW perf counters will be activated just
+ * before the submit_cl is submitted to the GPU and disabled when the job is
+ * done. This way, only events related to a specific job will be counted.
+ */
+struct vc4_perfmon {
+ /* Tracks the number of users of the perfmon, when this counter reaches
+ * zero the perfmon is destroyed.
+ */
+ refcount_t refcnt;
+
+ /* Number of counters activated in this perfmon instance
+ * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
+ */
+ u8 ncounters;
+
+ /* Events counted by the HW perf counters. */
+ u8 events[DRM_VC4_MAX_PERF_COUNTERS];
+
+ /* Storage for counter values. Counters are incremented by the HW
+ * perf counter values every time the perfmon is attached to a GPU job.
+ * This way, perfmon users don't have to retrieve the results after
+ * each job if they want to track events covering several submissions.
+ * Note that counter values can't be reset, but you can fake a reset by
+ * destroying the perfmon and creating a new one.
+ */
+ u64 counters[0];
+};
+
struct vc4_dev {
struct drm_device *dev;
@@ -39,8 +71,6 @@ struct vc4_dev {
struct vc4_dsi *dsi1;
struct vc4_vec *vec;
- struct drm_fbdev_cma *fbdev;
-
struct vc4_hang_state *hang_state;
/* The kernel-space BO cache. Tracks buffers that have been
@@ -123,6 +153,11 @@ struct vc4_dev {
wait_queue_head_t job_wait_queue;
struct work_struct job_done_work;
+ /* Used to track the active perfmon if any. Access to this field is
+ * protected by job_lock.
+ */
+ struct vc4_perfmon *active_perfmon;
+
/* List of struct vc4_seqno_cb for callbacks to be made from a
* workqueue when the given seqno is passed.
*/
@@ -275,6 +310,66 @@ to_vc4_plane(struct drm_plane *plane)
return (struct vc4_plane *)plane;
}
+enum vc4_scaling_mode {
+ VC4_SCALING_NONE,
+ VC4_SCALING_TPZ,
+ VC4_SCALING_PPF,
+};
+
+struct vc4_plane_state {
+ struct drm_plane_state base;
+ /* System memory copy of the display list for this element, computed
+ * at atomic_check time.
+ */
+ u32 *dlist;
+ u32 dlist_size; /* Number of dwords allocated for the display list */
+ u32 dlist_count; /* Number of used dwords in the display list. */
+
+ /* Offset in the dlist to various words, for pageflip or
+ * cursor updates.
+ */
+ u32 pos0_offset;
+ u32 pos2_offset;
+ u32 ptr0_offset;
+
+ /* Offset where the plane's dlist was last stored in the
+ * hardware at vc4_crtc_atomic_flush() time.
+ */
+ u32 __iomem *hw_dlist;
+
+ /* Clipped coordinates of the plane on the display. */
+ int crtc_x, crtc_y, crtc_w, crtc_h;
+ /* Clipped area being scanned from in the FB. */
+ u32 src_x, src_y;
+
+ u32 src_w[2], src_h[2];
+
+ /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
+ enum vc4_scaling_mode x_scaling[2], y_scaling[2];
+ bool is_unity;
+ bool is_yuv;
+
+ /* Offset to start scanning out from the start of the plane's
+ * BO.
+ */
+ u32 offsets[3];
+
+ /* Our allocation in LBM for temporary storage during scaling. */
+ struct drm_mm_node lbm;
+
+ /* Set when the plane has per-pixel alpha content or does not cover
+ * the entire screen. This is a hint to the CRTC that it might need
+ * to enable background color fill.
+ */
+ bool needs_bg_fill;
+};
+
+static inline struct vc4_plane_state *
+to_vc4_plane_state(struct drm_plane_state *state)
+{
+ return (struct vc4_plane_state *)state;
+}
+
enum vc4_encoder_type {
VC4_ENCODER_TYPE_NONE,
VC4_ENCODER_TYPE_HDMI,
@@ -408,6 +503,21 @@ struct vc4_exec_info {
void *uniforms_v;
uint32_t uniforms_p;
uint32_t uniforms_size;
+
+ /* Pointer to a performance monitor object if the user requested it,
+ * NULL otherwise.
+ */
+ struct vc4_perfmon *perfmon;
+};
+
+/* Per-open file private data. Any driver-specific resource that has to be
+ * released when the DRM file is closed should be placed here.
+ */
+struct vc4_file {
+ struct {
+ struct idr idr;
+ struct mutex lock;
+ } perfmon;
};
static inline struct vc4_exec_info *
@@ -648,3 +758,19 @@ bool vc4_check_tex_size(struct vc4_exec_info *exec,
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+
+/* vc4_perfmon.c */
+void vc4_perfmon_get(struct vc4_perfmon *perfmon);
+void vc4_perfmon_put(struct vc4_perfmon *perfmon);
+void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
+void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
+ bool capture);
+struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
+void vc4_perfmon_open_file(struct vc4_file *vc4file);
+void vc4_perfmon_close_file(struct vc4_file *vc4file);
+int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index c94cce9..2107b0d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -467,14 +467,30 @@ again:
vc4_flush_caches(dev);
+ /* Only start the perfmon if it was not already started by a previous
+ * job.
+ */
+ if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
+ vc4_perfmon_start(vc4, exec->perfmon);
+
/* Either put the job in the binner if it uses the binner, or
* immediately move it to the to-be-rendered queue.
*/
if (exec->ct0ca != exec->ct0ea) {
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
} else {
+ struct vc4_exec_info *next;
+
vc4_move_job_to_render(dev, exec);
- goto again;
+ next = vc4_first_bin_job(vc4);
+
+ /* We can't start the next bin job if the previous job had a
+ * different perfmon instance attached to it. The same goes
+ * if one of them had a perfmon attached to it and the other
+ * one doesn't.
+ */
+ if (next && next->perfmon == exec->perfmon)
+ goto again;
}
}
@@ -642,6 +658,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
struct ww_acquire_ctx *acquire_ctx)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *renderjob;
uint64_t seqno;
unsigned long irqflags;
struct vc4_fence *fence;
@@ -667,11 +684,14 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
list_add_tail(&exec->head, &vc4->bin_job_list);
- /* If no job was executing, kick ours off. Otherwise, it'll
- * get started when the previous job's flush done interrupt
- * occurs.
+ /* If no bin job was executing and if the render job (if any) has the
+ * same perfmon as our job attached to it (or if both jobs don't have
+ * perfmon activated), then kick ours off. Otherwise, it'll get
+ * started when the previous job's flush/render done interrupt occurs.
*/
- if (vc4_first_bin_job(vc4) == exec) {
+ renderjob = vc4_first_render_job(vc4);
+ if (vc4_first_bin_job(vc4) == exec &&
+ (!renderjob || renderjob->perfmon == exec->perfmon)) {
vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
@@ -936,6 +956,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
vc4->bin_alloc_used &= ~exec->bin_slots;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ /* Release the reference we had on the perf monitor. */
+ vc4_perfmon_put(exec->perfmon);
+
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
@@ -1088,6 +1111,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_submit_cl *args = data;
struct vc4_exec_info *exec;
struct ww_acquire_ctx acquire_ctx;
@@ -1101,6 +1125,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->pad2 != 0) {
+ DRM_DEBUG("->pad2 must be set to zero\n");
+ return -EINVAL;
+ }
+
exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
if (!exec) {
DRM_ERROR("malloc failure on exec struct\n");
@@ -1126,6 +1155,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
+ if (args->perfmonid) {
+ exec->perfmon = vc4_perfmon_find(vc4file,
+ args->perfmonid);
+ if (!exec->perfmon) {
+ ret = -ENOENT;
+ goto fail;
+ }
+ }
+
if (exec->args->bin_cl_size != 0) {
ret = vc4_get_bcl(dev, exec);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 0b20882..b8d5053 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -287,7 +287,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;
@@ -682,7 +681,7 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(VC4_HDMI_FIFO_CTL,
drift | VC4_HDMI_FIFO_CTL_RECENTER);
- udelay(1000);
+ usleep_range(1000, 1100);
HDMI_WRITE(VC4_HDMI_FIFO_CTL,
drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
HDMI_WRITE(VC4_HDMI_FIFO_CTL,
@@ -695,7 +694,22 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
}
}
+static enum drm_mode_status
+vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
+ const struct drm_display_mode *mode)
+{
+ /* HSM clock must be 108% of the pixel clock. Additionally,
+ * the AXI clock needs to be at least 25% of pixel clock, but
+ * HSM ends up being the limiting factor.
+ */
+ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
+ .mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
};
@@ -981,15 +995,17 @@ static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
{ "TX", NULL, "Playback" },
};
-static const struct snd_soc_codec_driver vc4_hdmi_audio_codec_drv = {
- .component_driver = {
- .controls = vc4_hdmi_audio_controls,
- .num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls),
- .dapm_widgets = vc4_hdmi_audio_widgets,
- .num_dapm_widgets = ARRAY_SIZE(vc4_hdmi_audio_widgets),
- .dapm_routes = vc4_hdmi_audio_routes,
- .num_dapm_routes = ARRAY_SIZE(vc4_hdmi_audio_routes),
- },
+static const struct snd_soc_component_driver vc4_hdmi_audio_component_drv = {
+ .controls = vc4_hdmi_audio_controls,
+ .num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls),
+ .dapm_widgets = vc4_hdmi_audio_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(vc4_hdmi_audio_widgets),
+ .dapm_routes = vc4_hdmi_audio_routes,
+ .num_dapm_routes = ARRAY_SIZE(vc4_hdmi_audio_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
};
static const struct snd_soc_dai_ops vc4_hdmi_audio_dai_ops = {
@@ -1087,11 +1103,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
return ret;
}
- /* register codec and codec dai */
- ret = snd_soc_register_codec(dev, &vc4_hdmi_audio_codec_drv,
+ /* register component and codec dai */
+ ret = devm_snd_soc_register_component(dev, &vc4_hdmi_audio_component_drv,
&vc4_hdmi_audio_codec_dai_drv, 1);
if (ret) {
- dev_err(dev, "Could not register codec: %d\n", ret);
+ dev_err(dev, "Could not register component: %d\n", ret);
return ret;
}
@@ -1116,29 +1132,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
*/
snd_soc_card_set_drvdata(card, hdmi);
ret = devm_snd_soc_register_card(dev, card);
- if (ret) {
+ if (ret)
dev_err(dev, "Could not register sound card: %d\n", ret);
- goto unregister_codec;
- }
-
- return 0;
-
-unregister_codec:
- snd_soc_unregister_codec(dev);
return ret;
-}
-static void vc4_hdmi_audio_cleanup(struct vc4_hdmi *hdmi)
-{
- struct device *dev = &hdmi->pdev->dev;
-
- /*
- * If drvdata is not set this means the audio card was not
- * registered, just skip codec unregistration in this case.
- */
- if (dev_get_drvdata(dev))
- snd_soc_unregister_codec(dev);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
@@ -1466,7 +1464,6 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = drm->dev_private;
struct vc4_hdmi *hdmi = vc4->hdmi;
- vc4_hdmi_audio_cleanup(hdmi);
cec_unregister_adapter(hdmi->cec_adap);
vc4_hdmi_connector_destroy(hdmi->connector);
vc4_hdmi_encoder_destroy(hdmi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 3dd62d7..4cd2ccf 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -104,13 +104,20 @@ static void
vc4_irq_finish_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+ struct vc4_exec_info *next, *exec = vc4_first_bin_job(vc4);
if (!exec)
return;
vc4_move_job_to_render(dev, exec);
- vc4_submit_next_bin_job(dev);
+ next = vc4_first_bin_job(vc4);
+
+ /* Only submit the next job in the bin list if it matches the perfmon
+ * attached to the one that just finished (or if both jobs don't have
+ * perfmon attached to them).
+ */
+ if (next && next->perfmon == exec->perfmon)
+ vc4_submit_next_bin_job(dev);
}
static void
@@ -122,6 +129,10 @@ vc4_cancel_bin_job(struct drm_device *dev)
if (!exec)
return;
+ /* Stop the perfmon so that the next bin job can be started. */
+ if (exec->perfmon)
+ vc4_perfmon_stop(vc4, exec->perfmon, false);
+
list_move_tail(&exec->head, &vc4->bin_job_list);
vc4_submit_next_bin_job(dev);
}
@@ -131,18 +142,41 @@ vc4_irq_finish_render_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec = vc4_first_render_job(vc4);
+ struct vc4_exec_info *nextbin, *nextrender;
if (!exec)
return;
vc4->finished_seqno++;
list_move_tail(&exec->head, &vc4->job_done_list);
+
+ nextbin = vc4_first_bin_job(vc4);
+ nextrender = vc4_first_render_job(vc4);
+
+ /* Only stop the perfmon if following jobs in the queue don't expect it
+ * to be enabled.
+ */
+ if (exec->perfmon && !nextrender &&
+ (!nextbin || nextbin->perfmon != exec->perfmon))
+ vc4_perfmon_stop(vc4, exec->perfmon, true);
+
+ /* If there's a render job waiting, start it. If this is not the case
+ * we may have to unblock the binner if it's been stalled because of
+ * perfmon (this can be checked by comparing the perfmon attached to
+ * the finished renderjob to the one attached to the next bin job: if
+ * they don't match, this means the binner is stalled and should be
+ * restarted).
+ */
+ if (nextrender)
+ vc4_submit_next_render_job(dev);
+ else if (nextbin && nextbin->perfmon != exec->perfmon)
+ vc4_submit_next_bin_job(dev);
+
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
dma_fence_put(exec->fence);
exec->fence = NULL;
}
- vc4_submit_next_render_job(dev);
wake_up_all(&vc4->job_wait_queue);
schedule_work(&vc4->job_done_work);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 50c4959..ba60153 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -19,17 +19,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "vc4_drv.h"
-static void vc4_output_poll_changed(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
-
- drm_fbdev_cma_hotplug_event(vc4->fbdev);
-}
-
static void
vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
@@ -194,7 +188,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
- .output_poll_changed = vc4_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = vc4_atomic_commit,
.fb_create = vc4_fb_create,
@@ -221,15 +215,12 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.funcs = &vc4_mode_funcs;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
+ dev->mode_config.allow_fb_modifiers = true;
drm_mode_config_reset(dev);
- if (dev->mode_config.num_connector) {
- vc4->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(vc4->fbdev))
- vc4->fbdev = NULL;
- }
+ if (dev->mode_config.num_connector)
+ drm_fb_cma_fbdev_init(dev, 32, 0);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
new file mode 100644
index 0000000..437e7a2
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Broadcom
+ */
+
+/**
+ * DOC: VC4 V3D performance monitor module
+ *
+ * The V3D block provides 16 hardware counters which can count various events.
+ */
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define VC4_PERFMONID_MIN 1
+#define VC4_PERFMONID_MAX U32_MAX
+
+void vc4_perfmon_get(struct vc4_perfmon *perfmon)
+{
+ if (perfmon)
+ refcount_inc(&perfmon->refcnt);
+}
+
+void vc4_perfmon_put(struct vc4_perfmon *perfmon)
+{
+ if (perfmon && refcount_dec_and_test(&perfmon->refcnt))
+ kfree(perfmon);
+}
+
+void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
+{
+ unsigned int i;
+ u32 mask;
+
+ if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
+ return;
+
+ for (i = 0; i < perfmon->ncounters; i++)
+ V3D_WRITE(V3D_PCTRS(i), perfmon->events[i]);
+
+ mask = GENMASK(perfmon->ncounters - 1, 0);
+ V3D_WRITE(V3D_PCTRC, mask);
+ V3D_WRITE(V3D_PCTRE, V3D_PCTRE_EN | mask);
+ vc4->active_perfmon = perfmon;
+}
+
+void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
+ bool capture)
+{
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!vc4->active_perfmon ||
+ perfmon != vc4->active_perfmon))
+ return;
+
+ if (capture) {
+ for (i = 0; i < perfmon->ncounters; i++)
+ perfmon->counters[i] += V3D_READ(V3D_PCTR(i));
+ }
+
+ V3D_WRITE(V3D_PCTRE, 0);
+ vc4->active_perfmon = NULL;
+}
+
+struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
+{
+ struct vc4_perfmon *perfmon;
+
+ mutex_lock(&vc4file->perfmon.lock);
+ perfmon = idr_find(&vc4file->perfmon.idr, id);
+ vc4_perfmon_get(perfmon);
+ mutex_unlock(&vc4file->perfmon.lock);
+
+ return perfmon;
+}
+
+void vc4_perfmon_open_file(struct vc4_file *vc4file)
+{
+ mutex_init(&vc4file->perfmon.lock);
+ idr_init(&vc4file->perfmon.idr);
+}
+
+static int vc4_perfmon_idr_del(int id, void *elem, void *data)
+{
+ struct vc4_perfmon *perfmon = elem;
+
+ vc4_perfmon_put(perfmon);
+
+ return 0;
+}
+
+void vc4_perfmon_close_file(struct vc4_file *vc4file)
+{
+ mutex_lock(&vc4file->perfmon.lock);
+ idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
+ idr_destroy(&vc4file->perfmon.idr);
+ mutex_unlock(&vc4file->perfmon.lock);
+}
+
+int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_file *vc4file = file_priv->driver_priv;
+ struct drm_vc4_perfmon_create *req = data;
+ struct vc4_perfmon *perfmon;
+ unsigned int i;
+ int ret;
+
+ /* Number of monitored counters cannot exceed HW limits. */
+ if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
+ !req->ncounters)
+ return -EINVAL;
+
+ /* Make sure all events are valid. */
+ for (i = 0; i < req->ncounters; i++) {
+ if (req->events[i] >= VC4_PERFCNT_NUM_EVENTS)
+ return -EINVAL;
+ }
+
+ perfmon = kzalloc(sizeof(*perfmon) + (req->ncounters * sizeof(u64)),
+ GFP_KERNEL);
+ if (!perfmon)
+ return -ENOMEM;
+
+ for (i = 0; i < req->ncounters; i++)
+ perfmon->events[i] = req->events[i];
+
+ perfmon->ncounters = req->ncounters;
+
+ refcount_set(&perfmon->refcnt, 1);
+
+ mutex_lock(&vc4file->perfmon.lock);
+ ret = idr_alloc(&vc4file->perfmon.idr, perfmon, VC4_PERFMONID_MIN,
+ VC4_PERFMONID_MAX, GFP_KERNEL);
+ mutex_unlock(&vc4file->perfmon.lock);
+
+ if (ret < 0) {
+ kfree(perfmon);
+ return ret;
+ }
+
+ req->id = ret;
+ return 0;
+}
+
+int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_file *vc4file = file_priv->driver_priv;
+ struct drm_vc4_perfmon_destroy *req = data;
+ struct vc4_perfmon *perfmon;
+
+ mutex_lock(&vc4file->perfmon.lock);
+ perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
+ mutex_unlock(&vc4file->perfmon.lock);
+
+ if (!perfmon)
+ return -EINVAL;
+
+ vc4_perfmon_put(perfmon);
+ return 0;
+}
+
+int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_file *vc4file = file_priv->driver_priv;
+ struct drm_vc4_perfmon_get_values *req = data;
+ struct vc4_perfmon *perfmon;
+ int ret;
+
+ mutex_lock(&vc4file->perfmon.lock);
+ perfmon = idr_find(&vc4file->perfmon.idr, req->id);
+ vc4_perfmon_get(perfmon);
+ mutex_unlock(&vc4file->perfmon.lock);
+
+ if (!perfmon)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->counters,
+ perfmon->ncounters * sizeof(u64)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ vc4_perfmon_put(perfmon);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 423a23e..ce39390 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -27,124 +27,90 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-enum vc4_scaling_mode {
- VC4_SCALING_NONE,
- VC4_SCALING_TPZ,
- VC4_SCALING_PPF,
-};
-
-struct vc4_plane_state {
- struct drm_plane_state base;
- /* System memory copy of the display list for this element, computed
- * at atomic_check time.
- */
- u32 *dlist;
- u32 dlist_size; /* Number of dwords allocated for the display list */
- u32 dlist_count; /* Number of used dwords in the display list. */
-
- /* Offset in the dlist to various words, for pageflip or
- * cursor updates.
- */
- u32 pos0_offset;
- u32 pos2_offset;
- u32 ptr0_offset;
-
- /* Offset where the plane's dlist was last stored in the
- * hardware at vc4_crtc_atomic_flush() time.
- */
- u32 __iomem *hw_dlist;
-
- /* Clipped coordinates of the plane on the display. */
- int crtc_x, crtc_y, crtc_w, crtc_h;
- /* Clipped area being scanned from in the FB. */
- u32 src_x, src_y;
-
- u32 src_w[2], src_h[2];
-
- /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
- enum vc4_scaling_mode x_scaling[2], y_scaling[2];
- bool is_unity;
- bool is_yuv;
-
- /* Offset to start scanning out from the start of the plane's
- * BO.
- */
- u32 offsets[3];
-
- /* Our allocation in LBM for temporary storage during scaling. */
- struct drm_mm_node lbm;
-};
-
-static inline struct vc4_plane_state *
-to_vc4_plane_state(struct drm_plane_state *state)
-{
- return (struct vc4_plane_state *)state;
-}
-
static const struct hvs_format {
u32 drm; /* DRM_FORMAT_* */
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
- bool has_alpha;
- bool flip_cbcr;
} hvs_formats[] = {
{
.drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
- .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
- .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
- .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
- .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
- .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
+ .pixel_order = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
- .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
+ .pixel_order = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
- .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
},
{
.drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
- .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888,
+ .pixel_order = HVS_PIXEL_ORDER_XRGB,
+ },
+ {
+ .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888,
+ .pixel_order = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_YUV422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
- .flip_cbcr = true,
+ .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_YUV420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
- .flip_cbcr = true,
+ .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV12,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ },
+ {
+ .drm = DRM_FORMAT_NV21,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV16,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ },
+ {
+ .drm = DRM_FORMAT_NV61,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
},
};
@@ -501,6 +467,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
int num_planes = drm_format_num_planes(format->drm);
+ bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
@@ -598,13 +565,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
SCALER_POS1_SCL_HEIGHT));
}
- /* Position Word 2: Source Image Size, Alpha Mode */
+ /* Position Word 2: Source Image Size, Alpha */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(format->has_alpha ?
+ VC4_SET_FIELD(fb->format->has_alpha ?
SCALER_POS2_ALPHA_MODE_PIPELINE :
SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE) |
+ (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
@@ -617,15 +585,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* The pointers may be any byte address.
*/
vc4_state->ptr0_offset = vc4_state->dlist_count;
- if (!format->flip_cbcr) {
- for (i = 0; i < num_planes; i++)
- vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
- } else {
- WARN_ON_ONCE(num_planes != 3);
- vc4_dlist_write(vc4_state, vc4_state->offsets[0]);
- vc4_dlist_write(vc4_state, vc4_state->offsets[2]);
- vc4_dlist_write(vc4_state, vc4_state->offsets[1]);
- }
+ for (i = 0; i < num_planes; i++)
+ vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
/* Pointer Context Word 0/1/2: Written by the HVS */
for (i = 0; i < num_planes; i++)
@@ -687,6 +648,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->dlist[ctl0_offset] |=
VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
+ /* crtc_* are already clipped coordinates. */
+ covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
+ vc4_state->crtc_w == state->crtc->mode.hdisplay &&
+ vc4_state->crtc_h == state->crtc->mode.vdisplay;
+ /* Background fill might be necessary when the plane has per-pixel
+ * alpha content and blends from the background or does not cover
+ * the entire screen.
+ */
+ vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen;
+
return 0;
}
@@ -893,6 +864,32 @@ out:
ctx);
}
+static bool vc4_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ /* Support T_TILING for RGB formats only. */
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ return true;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ default:
+ return (modifier == DRM_FORMAT_MOD_LINEAR);
+ }
+}
+
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = vc4_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -901,6 +898,7 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
.atomic_destroy_state = vc4_plane_destroy_state,
+ .format_mod_supported = vc4_format_mod_supported,
};
struct drm_plane *vc4_plane_init(struct drm_device *dev,
@@ -912,6 +910,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
u32 num_formats = 0;
int ret = 0;
unsigned i;
+ static const uint64_t modifiers[] = {
+ DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+ };
vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
GFP_KERNEL);
@@ -932,7 +935,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
formats, num_formats,
- NULL, type, NULL);
+ modifiers, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 55677bd..a141496 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -122,38 +122,9 @@
#define V3D_VPMBASE 0x00504
#define V3D_PCTRC 0x00670
#define V3D_PCTRE 0x00674
-#define V3D_PCTR0 0x00680
-#define V3D_PCTRS0 0x00684
-#define V3D_PCTR1 0x00688
-#define V3D_PCTRS1 0x0068c
-#define V3D_PCTR2 0x00690
-#define V3D_PCTRS2 0x00694
-#define V3D_PCTR3 0x00698
-#define V3D_PCTRS3 0x0069c
-#define V3D_PCTR4 0x006a0
-#define V3D_PCTRS4 0x006a4
-#define V3D_PCTR5 0x006a8
-#define V3D_PCTRS5 0x006ac
-#define V3D_PCTR6 0x006b0
-#define V3D_PCTRS6 0x006b4
-#define V3D_PCTR7 0x006b8
-#define V3D_PCTRS7 0x006bc
-#define V3D_PCTR8 0x006c0
-#define V3D_PCTRS8 0x006c4
-#define V3D_PCTR9 0x006c8
-#define V3D_PCTRS9 0x006cc
-#define V3D_PCTR10 0x006d0
-#define V3D_PCTRS10 0x006d4
-#define V3D_PCTR11 0x006d8
-#define V3D_PCTRS11 0x006dc
-#define V3D_PCTR12 0x006e0
-#define V3D_PCTRS12 0x006e4
-#define V3D_PCTR13 0x006e8
-#define V3D_PCTRS13 0x006ec
-#define V3D_PCTR14 0x006f0
-#define V3D_PCTRS14 0x006f4
-#define V3D_PCTR15 0x006f8
-#define V3D_PCTRS15 0x006fc
+# define V3D_PCTRE_EN BIT(31)
+#define V3D_PCTR(x) (0x00680 + ((x) * 8))
+#define V3D_PCTRS(x) (0x00684 + ((x) * 8))
#define V3D_DBGE 0x00f00
#define V3D_FDBGO 0x00f04
#define V3D_FDBGB 0x00f08
@@ -877,6 +848,7 @@ enum hvs_pixel_format {
#define SCALER_POS2_ALPHA_MODE_FIXED 1
#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2
#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
+#define SCALER_POS2_ALPHA_PREMULT BIT(29)
#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS2_HEIGHT_SHIFT 16
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 493f392b..bfc2fa7 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -68,38 +68,38 @@ static const struct {
REGDEF(V3D_VPMBASE),
REGDEF(V3D_PCTRC),
REGDEF(V3D_PCTRE),
- REGDEF(V3D_PCTR0),
- REGDEF(V3D_PCTRS0),
- REGDEF(V3D_PCTR1),
- REGDEF(V3D_PCTRS1),
- REGDEF(V3D_PCTR2),
- REGDEF(V3D_PCTRS2),
- REGDEF(V3D_PCTR3),
- REGDEF(V3D_PCTRS3),
- REGDEF(V3D_PCTR4),
- REGDEF(V3D_PCTRS4),
- REGDEF(V3D_PCTR5),
- REGDEF(V3D_PCTRS5),
- REGDEF(V3D_PCTR6),
- REGDEF(V3D_PCTRS6),
- REGDEF(V3D_PCTR7),
- REGDEF(V3D_PCTRS7),
- REGDEF(V3D_PCTR8),
- REGDEF(V3D_PCTRS8),
- REGDEF(V3D_PCTR9),
- REGDEF(V3D_PCTRS9),
- REGDEF(V3D_PCTR10),
- REGDEF(V3D_PCTRS10),
- REGDEF(V3D_PCTR11),
- REGDEF(V3D_PCTRS11),
- REGDEF(V3D_PCTR12),
- REGDEF(V3D_PCTRS12),
- REGDEF(V3D_PCTR13),
- REGDEF(V3D_PCTRS13),
- REGDEF(V3D_PCTR14),
- REGDEF(V3D_PCTRS14),
- REGDEF(V3D_PCTR15),
- REGDEF(V3D_PCTRS15),
+ REGDEF(V3D_PCTR(0)),
+ REGDEF(V3D_PCTRS(0)),
+ REGDEF(V3D_PCTR(1)),
+ REGDEF(V3D_PCTRS(1)),
+ REGDEF(V3D_PCTR(2)),
+ REGDEF(V3D_PCTRS(2)),
+ REGDEF(V3D_PCTR(3)),
+ REGDEF(V3D_PCTRS(3)),
+ REGDEF(V3D_PCTR(4)),
+ REGDEF(V3D_PCTRS(4)),
+ REGDEF(V3D_PCTR(5)),
+ REGDEF(V3D_PCTRS(5)),
+ REGDEF(V3D_PCTR(6)),
+ REGDEF(V3D_PCTRS(6)),
+ REGDEF(V3D_PCTR(7)),
+ REGDEF(V3D_PCTRS(7)),
+ REGDEF(V3D_PCTR(8)),
+ REGDEF(V3D_PCTRS(8)),
+ REGDEF(V3D_PCTR(9)),
+ REGDEF(V3D_PCTRS(9)),
+ REGDEF(V3D_PCTR(10)),
+ REGDEF(V3D_PCTRS(10)),
+ REGDEF(V3D_PCTR(11)),
+ REGDEF(V3D_PCTRS(11)),
+ REGDEF(V3D_PCTR(12)),
+ REGDEF(V3D_PCTRS(12)),
+ REGDEF(V3D_PCTR(13)),
+ REGDEF(V3D_PCTRS(13)),
+ REGDEF(V3D_PCTR(14)),
+ REGDEF(V3D_PCTRS(14)),
+ REGDEF(V3D_PCTR(15)),
+ REGDEF(V3D_PCTRS(15)),
REGDEF(V3D_DBGE),
REGDEF(V3D_FDBGO),
REGDEF(V3D_FDBGB),
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2db485a..eec76af 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -753,7 +753,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[shader_reloc_count + 8];
+ struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 9873942..6d1ae83 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -74,9 +74,9 @@ typedef struct drm_via_private {
volatile uint32_t *last_pause_ptr;
volatile uint32_t *hw_addr_ptr;
drm_via_ring_buffer_t ring;
- struct timeval last_vblank;
+ ktime_t last_vblank;
int last_vblank_valid;
- unsigned usec_per_vblank;
+ ktime_t nsec_per_vblank;
atomic_t vbl_received;
drm_via_state_t hc_state;
char pci_buf[VIA_PCI_BUF_SIZE];
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index ea8172c..c96830c 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -88,13 +88,6 @@ static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
-static unsigned time_diff(struct timeval *now, struct timeval *then)
-{
- return (now->tv_usec >= then->tv_usec) ?
- now->tv_usec - then->tv_usec :
- 1000000 - (then->tv_usec - now->tv_usec);
-}
-
u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -111,7 +104,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
- struct timeval cur_vblank;
+ ktime_t cur_vblank;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
@@ -119,18 +112,18 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev_priv->vbl_received);
if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
- do_gettimeofday(&cur_vblank);
+ cur_vblank = ktime_get();
if (dev_priv->last_vblank_valid) {
- dev_priv->usec_per_vblank =
- time_diff(&cur_vblank,
- &dev_priv->last_vblank) >> 4;
+ dev_priv->nsec_per_vblank =
+ ktime_sub(cur_vblank,
+ dev_priv->last_vblank) >> 4;
}
dev_priv->last_vblank = cur_vblank;
dev_priv->last_vblank_valid = 1;
}
if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
- DRM_DEBUG("US per vblank is: %u\n",
- dev_priv->usec_per_vblank);
+ DRM_DEBUG("nsec per vblank is: %llu\n",
+ ktime_to_ns(dev_priv->nsec_per_vblank));
}
drm_handle_vblank(dev, 0);
handled = 1;
@@ -350,7 +343,7 @@ void via_driver_irq_uninstall(struct drm_device *dev)
int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_irqwait_t *irqwait = data;
- struct timeval now;
+ struct timespec64 now;
int ret = 0;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
@@ -384,9 +377,9 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
&irqwait->request.sequence);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
irqwait->reply.tval_sec = now.tv_sec;
- irqwait->reply.tval_usec = now.tv_usec;
+ irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 41b0930..8cc8c34 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -61,9 +61,9 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
static int
virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
@@ -71,7 +71,19 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
}
+static int
+virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct virtio_gpu_framebuffer *virtio_gpu_fb =
+ to_virtio_gpu_framebuffer(fb);
+
+ return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
+}
+
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
+ .create_handle = virtio_gpu_framebuffer_create_handle,
.destroy = virtio_gpu_user_framebuffer_destroy,
.dirty = virtio_gpu_framebuffer_surface_dirty,
};
@@ -84,6 +96,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
{
int ret;
struct virtio_gpu_object *bo;
+
vgfb->obj = obj;
bo = gem_to_virtio_gpu_obj(obj);
@@ -375,7 +388,7 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
- drm_mode_config_reset(vgdev->ddev);
+ drm_mode_config_reset(vgdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 49a3d8d..d9287c1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -54,6 +54,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
+
drm_put_dev(dev);
}
@@ -112,7 +113,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
.llseek = noop_llseek,
};
-
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.load = virtio_gpu_driver_load,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index da2fb58..d25c8ca 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -246,7 +246,7 @@ int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
struct drm_clip_rect *clips,
- unsigned num_clips);
+ unsigned int num_clips);
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
@@ -363,12 +363,12 @@ int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma);
static inline struct virtio_gpu_object*
virtio_gpu_object_ref(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 15d18fd..8af69ab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -118,7 +118,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -127,6 +127,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
int left, right, top, bottom;
int i;
int inc = 1;
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -172,6 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_fillrect(info, rect);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
rect->width, rect->height);
@@ -182,6 +184,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_copyarea(info, area);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
area->width, area->height);
@@ -192,6 +195,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_imageblit(info, image);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
image->width, image->height);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 92fb277..0f2768e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -124,6 +124,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
{
struct drm_gem_object *gobj;
struct virtio_gpu_object *obj;
+
BUG_ON(!offset_p);
gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0528edb..7bdf6f0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -56,6 +56,7 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct list_head *head)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
struct virtio_gpu_object *qobj;
@@ -68,7 +69,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
list_for_each_entry(buf, head, head) {
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, false, false);
+ ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
if (ret) {
ttm_eu_backoff_reservation(ticket, head);
return ret;
@@ -82,6 +83,7 @@ static void virtio_gpu_unref_list(struct list_head *head)
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
struct virtio_gpu_object *qobj;
+
list_for_each_entry(buf, head, head) {
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
@@ -196,6 +198,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_3D_FEATURES:
value = vgdev->has_virgl_3d == true ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+ value = 1;
+ break;
default:
return -EINVAL;
}
@@ -261,7 +266,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
} else {
/* use a gem reference since unref list undoes them */
- drm_gem_object_reference(&qobj->gem_base);
+ drm_gem_object_get(&qobj->gem_base);
mainbuf.bo = &qobj->tbo;
list_add(&mainbuf.head, &validate_list);
@@ -352,6 +357,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence;
@@ -372,8 +378,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret)
goto out;
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out_unres;
@@ -399,6 +404,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence;
@@ -416,8 +422,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out_unres;
@@ -471,15 +476,20 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_get_caps *args = data;
- int size;
+ unsigned size, host_caps_size;
int i;
int found_valid = -1;
int ret;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *ptr;
+
if (vgdev->num_capsets == 0)
return -ENOSYS;
+ /* don't allow userspace to pass 0 */
+ if (args->size == 0)
+ return -EINVAL;
+
spin_lock(&vgdev->display_info_lock);
for (i = 0; i < vgdev->num_capsets; i++) {
if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -495,11 +505,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
return -EINVAL;
}
- size = vgdev->capsets[found_valid].max_size;
- if (args->size > size) {
- spin_unlock(&vgdev->display_info_lock);
- return -EINVAL;
- }
+ host_caps_size = vgdev->capsets[found_valid].max_size;
+ /* only copy to user the minimum of the host caps size or the guest caps size */
+ size = min(args->size, host_caps_size);
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->id == args->cap_set_id &&
@@ -517,6 +525,8 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
ret = wait_event_timeout(vgdev->resp_wq,
atomic_read(&cache_ent->is_valid), 5 * HZ);
+ if (!ret)
+ return -EBUSY;
ptr = cache_ent->caps_cache;
@@ -529,33 +539,34 @@ copy_exit:
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
- thread these in the underlying GL */
+ * thread these in the underlying GL
+ */
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
- DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 6400506..65060c0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -272,20 +272,18 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
uint32_t id;
- char dbgname[64], tmpname[TASK_COMM_LEN];
+ char dbgname[TASK_COMM_LEN];
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
return 0;
- get_task_comm(tmpname, current);
- snprintf(dbgname, sizeof(dbgname), "%s", tmpname);
- dbgname[63] = 0;
/* allocate a virt GPU context for this opener */
vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
if (!vfpriv)
return -ENOMEM;
+ get_task_comm(dbgname, current);
virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
vfpriv->ctx_id = id;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 6f66b73..9f2f470 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -89,7 +89,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
virtio_gpu_init_ttm_placement(bo, pinned);
ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, NULL, acc_size,
+ &bo->placement, 0, !kernel, acc_size,
NULL, NULL, &virtio_gpu_ttm_bo_destroy);
/* ttm_bo_init failure will call the destroy */
if (ret != 0)
@@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
int ret;
struct page **pages = bo->tbo.ttm->pages;
int nr_pages = bo->tbo.num_pages;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
/* wtf swapping */
if (bo->pages)
return 0;
if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+ bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!bo->pages)
goto out;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 385e0eb..d27a168 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -25,7 +25,8 @@
#include "virtgpu_drv.h"
/* Empty Implementations as there should not be any other driver for a virtual
- * device that might share buffers with virtgpu */
+ * device that might share buffers with virtgpu
+ */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
{
@@ -38,7 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index cd389c5..11f8ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -177,7 +177,6 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
mem->mm_node = (void *)NULL;
- return;
}
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
@@ -225,7 +224,7 @@ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
@@ -244,7 +243,6 @@ static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
placement->busy_placement = &placements;
placement->num_placement = 1;
placement->num_busy_placement = 1;
- return;
}
static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
@@ -324,35 +322,19 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
.destroy = &virtio_gpu_ttm_backend_destroy,
};
-static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
-{
- if (ttm->state != tt_unpopulated)
- return 0;
-
- return ttm_pool_populate(ttm);
-}
-
-static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct virtio_gpu_device *vgdev;
struct virtio_gpu_ttm_tt *gtt;
- vgdev = virtio_gpu_get_vgdev(bdev);
+ vgdev = virtio_gpu_get_vgdev(bo->bdev);
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
gtt->vgdev = vgdev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
- dummy_read_page)) {
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
}
@@ -369,14 +351,13 @@ static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
int ret;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -421,8 +402,6 @@ static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
static struct ttm_bo_driver virtio_gpu_bo_driver = {
.ttm_tt_create = &virtio_gpu_ttm_tt_create,
- .ttm_tt_populate = &virtio_gpu_ttm_tt_populate,
- .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate,
.invalidate_caches = &virtio_gpu_invalidate_caches,
.init_mem_type = &virtio_gpu_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -431,7 +410,6 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9eb96fb2..48e4f1d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -62,6 +62,7 @@ void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->ctrlq.dequeue_work);
}
@@ -69,6 +70,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->cursorq.dequeue_work);
}
@@ -272,7 +274,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
return -ENODEV;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt+incnt] = &vcmd;
+ sgs[outcnt + incnt] = &vcmd;
outcnt++;
if (vbuf->data_size) {
@@ -381,7 +383,8 @@ retry:
}
/* just create gem objects for userspace and long lived objects,
- just use dma_alloced pages for the queue objects? */
+ * just use dma_alloced pages for the queue objects?
+ */
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -593,7 +596,6 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
wake_up(&vgdev->resp_wq);
}
-
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -707,8 +709,8 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
+ strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
+ cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -852,6 +854,7 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
if (!obj->pages) {
int ret;
+
ret = virtio_gpu_object_get_sg_table(vgdev, obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ad80211..794cc9d 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
- vmwgfx_simple_resource.o vmwgfx_va.o
+ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 6e0ccb7..88e72bf 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -372,6 +372,14 @@ SVGAGuestPtr;
* PA, not biased by the offset. When the command buffer is finished
* the guest should not read the offset field as there is no guarantee
* what it will set to.
+ *
+ * When the SVGA_CAP_HP_CMD_QUEUE cap bit is set a new command queue
+ * SVGA_CB_CONTEXT_1 is available. Commands submitted to this queue
+ * will be executed as quickly as possible by the SVGA device
+ * potentially before already queued commands on SVGA_CB_CONTEXT_0.
+ * The SVGA device guarantees that any command buffers submitted to
+ * SVGA_CB_CONTEXT_0 will be executed after any _already_ submitted
+ * command buffers to SVGA_CB_CONTEXT_1.
*/
#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
@@ -382,7 +390,8 @@ SVGAGuestPtr;
typedef enum {
SVGA_CB_CONTEXT_DEVICE = 0x3f,
SVGA_CB_CONTEXT_0 = 0x0,
- SVGA_CB_CONTEXT_MAX = 0x1,
+ SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_CB_CONTEXT_MAX = 0x2,
} SVGACBContext;
@@ -689,6 +698,7 @@ SVGASignedPoint;
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
+#define SVGA_CAP_HP_CMD_QUEUE 0x20000000
#define SVGA_CAP_CMD_RESERVED 0x80000000
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 9c42e96..55d32ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -1202,10 +1202,14 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
- false, false);
+ &ctx);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
new file mode 100644
index 0000000..e8c94b1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -0,0 +1,506 @@
+/**************************************************************************
+ *
+ * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * Template that implements find_first_diff() for a generic
+ * unsigned integer type. @size and return value are in bytes.
+ */
+#define VMW_FIND_FIRST_DIFF(_type) \
+static size_t vmw_find_first_diff_ ## _type \
+ (const _type * dst, const _type * src, size_t size)\
+{ \
+ size_t i; \
+ \
+ for (i = 0; i < size; i += sizeof(_type)) { \
+ if (*dst++ != *src++) \
+ break; \
+ } \
+ \
+ return i; \
+}
+
+
+/*
+ * Template that implements find_last_diff() for a generic
+ * unsigned integer type. Pointers point to the item following the
+ * *end* of the area to be examined. @size and return value are in
+ * bytes.
+ */
+#define VMW_FIND_LAST_DIFF(_type) \
+static ssize_t vmw_find_last_diff_ ## _type( \
+ const _type * dst, const _type * src, size_t size) \
+{ \
+ while (size) { \
+ if (*--dst != *--src) \
+ break; \
+ \
+ size -= sizeof(_type); \
+ } \
+ return size; \
+}
+
+
+/*
+ * Instantiate find diff functions for relevant unsigned integer sizes,
+ * assuming that wider integers are faster (including aligning) up to the
+ * architecture native width, which is assumed to be 32 bit unless
+ * CONFIG_64BIT is defined.
+ */
+VMW_FIND_FIRST_DIFF(u8);
+VMW_FIND_LAST_DIFF(u8);
+
+VMW_FIND_FIRST_DIFF(u16);
+VMW_FIND_LAST_DIFF(u16);
+
+VMW_FIND_FIRST_DIFF(u32);
+VMW_FIND_LAST_DIFF(u32);
+
+#ifdef CONFIG_64BIT
+VMW_FIND_FIRST_DIFF(u64);
+VMW_FIND_LAST_DIFF(u64);
+#endif
+
+
+/* We use size aligned copies. This computes (addr - align(addr)) */
+#define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
+
+
+/*
+ * Template to compute find_first_diff() for a certain integer type
+ * including a head copy for alignment, and adjustment of parameters
+ * for tail find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_FIRST_DIFF(_type) \
+do { \
+ unsigned int spill = SPILL(dst, _type); \
+ size_t diff_offs; \
+ \
+ if (spill && spill == SPILL(src, _type) && \
+ sizeof(_type) - spill <= size) { \
+ spill = sizeof(_type) - spill; \
+ diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
+ if (diff_offs < spill) \
+ return round_down(offset + diff_offs, granularity); \
+ \
+ dst += spill; \
+ src += spill; \
+ size -= spill; \
+ offset += spill; \
+ spill = 0; \
+ } \
+ if (!spill && !SPILL(src, _type)) { \
+ size_t to_copy = size & ~(sizeof(_type) - 1); \
+ \
+ diff_offs = vmw_find_first_diff_ ## _type \
+ ((_type *) dst, (_type *) src, to_copy); \
+ if (diff_offs >= size || granularity == sizeof(_type)) \
+ return (offset + diff_offs); \
+ \
+ dst += diff_offs; \
+ src += diff_offs; \
+ size -= diff_offs; \
+ offset += diff_offs; \
+ } \
+} while (0) \
+
+
+/**
+ * vmw_find_first_diff - find the first difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the first difference was
+ * encountered in bytes. If no difference was found, the function returns
+ * a value >= @size.
+ */
+static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
+ size_t granularity)
+{
+ size_t offset = 0;
+
+ /*
+ * Try finding with large integers if alignment allows, or we can
+ * fix it. Fall through if we need better resolution or alignment
+ * was bad.
+ */
+#ifdef CONFIG_64BIT
+ VMW_TRY_FIND_FIRST_DIFF(u64);
+#endif
+ VMW_TRY_FIND_FIRST_DIFF(u32);
+ VMW_TRY_FIND_FIRST_DIFF(u16);
+
+ return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
+ granularity);
+}
+
+
+/*
+ * Template to compute find_last_diff() for a certain integer type
+ * including a tail copy for alignment, and adjustment of parameters
+ * for head find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_LAST_DIFF(_type) \
+do { \
+ unsigned int spill = SPILL(dst, _type); \
+ ssize_t location; \
+ ssize_t diff_offs; \
+ \
+ if (spill && spill <= size && spill == SPILL(src, _type)) { \
+ diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
+ if (diff_offs) { \
+ location = size - spill + diff_offs - 1; \
+ return round_down(location, granularity); \
+ } \
+ \
+ dst -= spill; \
+ src -= spill; \
+ size -= spill; \
+ spill = 0; \
+ } \
+ if (!spill && !SPILL(src, _type)) { \
+ size_t to_copy = round_down(size, sizeof(_type)); \
+ \
+ diff_offs = vmw_find_last_diff_ ## _type \
+ ((_type *) dst, (_type *) src, to_copy); \
+ location = size - to_copy + diff_offs - sizeof(_type); \
+ if (location < 0 || granularity == sizeof(_type)) \
+ return location; \
+ \
+ dst -= to_copy - diff_offs; \
+ src -= to_copy - diff_offs; \
+ size -= to_copy - diff_offs; \
+ } \
+} while (0)
+
+
+/**
+ * vmw_find_last_diff - find the last difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the last difference was
+ * encountered in bytes, or a negative value if no difference was found.
+ */
+static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
+ size_t granularity)
+{
+ dst += size;
+ src += size;
+
+#ifdef CONFIG_64BIT
+ VMW_TRY_FIND_LAST_DIFF(u64);
+#endif
+ VMW_TRY_FIND_LAST_DIFF(u32);
+ VMW_TRY_FIND_LAST_DIFF(u16);
+
+ return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
+ granularity);
+}
+
+
+/**
+ * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
+ * struct vmw_diff_cpy.
+ *
+ * @diff: The struct vmw_diff_cpy closure argument (unused).
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ */
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
+{
+ memcpy(dest, src, n);
+}
+
+
+/**
+ * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @diff_offs: The offset from @diff->line_offset where the difference was
+ * found.
+ */
+static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
+{
+ size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
+ struct drm_rect *rect = &diff->rect;
+
+ rect->x1 = min_t(int, rect->x1, offs);
+ rect->x2 = max_t(int, rect->x2, offs + 1);
+ rect->y1 = min_t(int, rect->y1, diff->line);
+ rect->y2 = max_t(int, rect->y2, diff->line + 1);
+}
+
+/**
+ * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ *
+ * In order to correctly track the modified content, the field @diff->line must
+ * be pre-loaded with the current line number, the field @diff->line_offset must
+ * be pre-loaded with the line offset in bytes where the copy starts, and
+ * finally the field @diff->cpp need to be preloaded with the number of bytes
+ * per unit in the horizontal direction of the area we're examining.
+ * Typically bytes per pixel.
+ * This is needed to know the needed granularity of the difference computing
+ * operations. A higher cpp generally leads to faster execution at the cost of
+ * bounding box width precision.
+ */
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n)
+{
+ ssize_t csize, byte_len;
+
+ if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
+ return;
+
+ /* TODO: Possibly use a single vmw_find_first_diff per line? */
+ csize = vmw_find_first_diff(dest, src, n, diff->cpp);
+ if (csize < n) {
+ vmw_adjust_rect(diff, csize);
+ byte_len = diff->cpp;
+
+ /*
+ * Starting from where first difference was found, find
+ * location of last difference, and then copy.
+ */
+ diff->line_offset += csize;
+ dest += csize;
+ src += csize;
+ n -= csize;
+ csize = vmw_find_last_diff(dest, src, n, diff->cpp);
+ if (csize >= 0) {
+ byte_len += csize;
+ vmw_adjust_rect(diff, csize);
+ }
+ memcpy(dest, src, byte_len);
+ }
+ diff->line_offset += n;
+}
+
+/**
+ * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
+ *
+ * @mapped_dst: Already mapped destination page index in @dst_pages.
+ * @dst_addr: Kernel virtual address of mapped destination page.
+ * @dst_pages: Array of destination bo pages.
+ * @dst_num_pages: Number of destination bo pages.
+ * @dst_prot: Destination bo page protection.
+ * @mapped_src: Already mapped source page index in @dst_pages.
+ * @src_addr: Kernel virtual address of mapped source page.
+ * @src_pages: Array of source bo pages.
+ * @src_num_pages: Number of source bo pages.
+ * @src_prot: Source bo page protection.
+ * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
+ */
+struct vmw_bo_blit_line_data {
+ u32 mapped_dst;
+ u8 *dst_addr;
+ struct page **dst_pages;
+ u32 dst_num_pages;
+ pgprot_t dst_prot;
+ u32 mapped_src;
+ u8 *src_addr;
+ struct page **src_pages;
+ u32 src_num_pages;
+ pgprot_t src_prot;
+ struct vmw_diff_cpy *diff;
+};
+
+/**
+ * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
+ *
+ * @d: Blit data as described above.
+ * @dst_offset: Destination copy start offset from start of bo.
+ * @src_offset: Source copy start offset from start of bo.
+ * @bytes_to_copy: Number of bytes to copy in this line.
+ */
+static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
+ u32 dst_offset,
+ u32 src_offset,
+ u32 bytes_to_copy)
+{
+ struct vmw_diff_cpy *diff = d->diff;
+
+ while (bytes_to_copy) {
+ u32 copy_size = bytes_to_copy;
+ u32 dst_page = dst_offset >> PAGE_SHIFT;
+ u32 src_page = src_offset >> PAGE_SHIFT;
+ u32 dst_page_offset = dst_offset & ~PAGE_MASK;
+ u32 src_page_offset = src_offset & ~PAGE_MASK;
+ bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
+ bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
+ unmap_dst);
+
+ copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
+ copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
+
+ if (unmap_src) {
+ ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ d->src_addr = NULL;
+ }
+
+ if (unmap_dst) {
+ ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ d->dst_addr = NULL;
+ }
+
+ if (!d->dst_addr) {
+ if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
+ return -EINVAL;
+
+ d->dst_addr =
+ ttm_kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
+ if (!d->dst_addr)
+ return -ENOMEM;
+
+ d->mapped_dst = dst_page;
+ }
+
+ if (!d->src_addr) {
+ if (WARN_ON_ONCE(src_page >= d->src_num_pages))
+ return -EINVAL;
+
+ d->src_addr =
+ ttm_kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
+ if (!d->src_addr)
+ return -ENOMEM;
+
+ d->mapped_src = src_page;
+ }
+ diff->do_cpy(diff, d->dst_addr + dst_page_offset,
+ d->src_addr + src_page_offset, copy_size);
+
+ bytes_to_copy -= copy_size;
+ dst_offset += copy_size;
+ src_offset += copy_size;
+ }
+
+ return 0;
+}
+
+/**
+ * ttm_bo_cpu_blit - in-kernel cpu blit.
+ *
+ * @dst: Destination buffer object.
+ * @dst_offset: Destination offset of blit start in bytes.
+ * @dst_stride: Destination stride in bytes.
+ * @src: Source buffer object.
+ * @src_offset: Source offset of blit start in bytes.
+ * @src_stride: Source stride in bytes.
+ * @w: Width of blit.
+ * @h: Height of blit.
+ * return: Zero on success. Negative error value on failure. Will print out
+ * kernel warnings on caller bugs.
+ *
+ * Performs a CPU blit from one buffer object to another avoiding a full
+ * bo vmap which may exhaust- or fragment vmalloc space.
+ * On supported architectures (x86), we're using kmap_atomic which avoids
+ * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
+ * reference already set-up mappings.
+ *
+ * Neither of the buffer objects may be placed in PCI memory
+ * (Fixed memory in TTM terminology) when using this function.
+ */
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ u32 dst_offset, u32 dst_stride,
+ struct ttm_buffer_object *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ u32 j, initial_line = dst_offset / dst_stride;
+ struct vmw_bo_blit_line_data d;
+ int ret = 0;
+
+ /* Buffer objects need to be either pinned or reserved: */
+ if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ lockdep_assert_held(&dst->resv->lock.base);
+ if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ lockdep_assert_held(&src->resv->lock.base);
+
+ if (dst->ttm->state == tt_unpopulated) {
+ ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (src->ttm->state == tt_unpopulated) {
+ ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
+ if (ret)
+ return ret;
+ }
+
+ d.mapped_dst = 0;
+ d.mapped_src = 0;
+ d.dst_addr = NULL;
+ d.src_addr = NULL;
+ d.dst_pages = dst->ttm->pages;
+ d.src_pages = src->ttm->pages;
+ d.dst_num_pages = dst->num_pages;
+ d.src_num_pages = src->num_pages;
+ d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+ d.diff = diff;
+
+ for (j = 0; j < h; ++j) {
+ diff->line = j + initial_line;
+ diff->line_offset = dst_offset % dst_stride;
+ ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
+ if (ret)
+ goto out;
+
+ dst_offset += dst_stride;
+ src_offset += src_stride;
+ }
+out:
+ if (d.src_addr)
+ ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ if (d.dst_addr)
+ ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c705632..21111fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -185,6 +185,22 @@ static const struct ttm_place evictable_placement_flags[] = {
}
};
+static const struct ttm_place nonfixed_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ }
+};
+
struct ttm_placement vmw_evictable_placement = {
.num_placement = 4,
.placement = evictable_placement_flags,
@@ -213,6 +229,13 @@ struct ttm_placement vmw_mob_ne_placement = {
.busy_placement = &mob_ne_placement_flags
};
+struct ttm_placement vmw_nonfixed_placement = {
+ .num_placement = 3,
+ .placement = nonfixed_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
@@ -394,6 +417,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
struct vmw_piter iter;
dma_addr_t old;
int ret = 0;
@@ -417,8 +444,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sgt_size = ttm_round_pot(sizeof(struct sg_table));
}
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
- ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
- true);
+ ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -632,7 +658,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
}
-static int vmw_ttm_populate(struct ttm_tt *ttm)
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
@@ -646,15 +672,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ret = ttm_mem_global_alloc(glob, size, false, true);
+ ret = ttm_mem_global_alloc(glob, size, ctx);
if (unlikely(ret != 0))
return ret;
- ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+ ctx);
if (unlikely(ret != 0))
ttm_mem_global_free(glob, size);
} else
- ret = ttm_pool_populate(ttm);
+ ret = ttm_pool_populate(ttm, ctx);
return ret;
}
@@ -689,9 +716,8 @@ static struct ttm_backend_func vmw_ttm_func = {
.destroy = vmw_ttm_destroy,
};
-static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
struct vmw_ttm_tt *vmw_be;
int ret;
@@ -701,15 +727,13 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
return NULL;
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
- vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
- dummy_read_page);
+ ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
else
- ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
- dummy_read_page);
+ ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
if (unlikely(ret != 0))
goto out_no_init;
@@ -840,6 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
+ vmw_resource_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
@@ -859,5 +884,4 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index c706ad3..9f45d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -101,6 +101,7 @@ struct vmw_cmdbuf_context {
* @handle: DMA address handle for the command buffer space if @using_mob is
* false. Immutable.
* @size: The size of the command buffer space. Immutable.
+ * @num_contexts: Number of contexts actually enabled.
*/
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
@@ -128,6 +129,7 @@ struct vmw_cmdbuf_man {
bool has_pool;
dma_addr_t handle;
size_t size;
+ u32 num_contexts;
};
/**
@@ -185,7 +187,7 @@ struct vmw_cmdbuf_alloc_info {
/* Loop over each context in the command buffer manager. */
#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
- for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+ for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
++(_i), ++(_ctx))
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
@@ -514,6 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
+ bool global_block = false;
for_each_cmdbuf_ctx(man, i, ctx) {
INIT_LIST_HEAD(&restart_head[i]);
@@ -531,6 +534,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
list_del_init(&entry->list);
restart[entry->cb_context] = true;
+ global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
DRM_ERROR("Unknown command causing device error.\n");
@@ -564,23 +568,21 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
cb_hdr->length -= new_start_offset;
cb_hdr->errorOffset = 0;
cb_hdr->offset = 0;
+
list_add_tail(&entry->list, &restart_head[entry->cb_context]);
- man->ctx[entry->cb_context].block_submission = true;
}
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ man->ctx[i].block_submission = true;
+
spin_unlock(&man->lock);
- /* Preempt all contexts with errors */
- for_each_cmdbuf_ctx(man, i, ctx) {
- if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
- DRM_ERROR("Failed preempting command buffer "
- "context %u.\n", i);
- }
+ /* Preempt all contexts */
+ if (global_block && vmw_cmdbuf_preempt(man, 0))
+ DRM_ERROR("Failed preempting command buffer contexts\n");
spin_lock(&man->lock);
for_each_cmdbuf_ctx(man, i, ctx) {
- if (!ctx->block_submission)
- continue;
-
/* Move preempted command buffers to the preempted queue. */
vmw_cmdbuf_ctx_process(man, ctx, &dummy);
@@ -594,19 +596,16 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
* Finally add all command buffers first in the submitted
* queue, to rerun them.
*/
- list_splice_init(&restart_head[i], &ctx->submitted);
ctx->block_submission = false;
+ list_splice_init(&restart_head[i], &ctx->submitted);
}
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
- for_each_cmdbuf_ctx(man, i, ctx) {
- if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
- DRM_ERROR("Failed restarting command buffer "
- "context %u.\n", i);
- }
+ if (global_block && vmw_cmdbuf_startstop(man, 0, true))
+ DRM_ERROR("Failed restarting command buffer contexts\n");
/* Send a new fence in case one was removed */
if (send_fence) {
@@ -1245,7 +1244,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
return -ENOMEM;
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
- &vmw_mob_ne_placement, 0, false, NULL,
+ &vmw_mob_ne_placement, 0, false,
&man->cmd_space);
if (ret)
return ret;
@@ -1307,6 +1306,8 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
if (!man)
return ERR_PTR(-ENOMEM);
+ man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
+ 2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
&dev_priv->dev->pdev->dev,
sizeof(SVGACBHeader),
@@ -1341,14 +1342,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
- for_each_cmdbuf_ctx(man, i, ctx) {
- ret = vmw_cmdbuf_startstop(man, i, true);
- if (ret) {
- DRM_ERROR("Failed starting command buffer "
- "context %u.\n", i);
- vmw_cmdbuf_man_destroy(man);
- return ERR_PTR(ret);
- }
+ ret = vmw_cmdbuf_startstop(man, 0, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer contexts\n");
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
}
return man;
@@ -1398,16 +1396,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_context *ctx;
- unsigned int i;
-
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- for_each_cmdbuf_ctx(man, i, ctx)
- if (vmw_cmdbuf_startstop(man, i, false))
- DRM_ERROR("Failed stopping command buffer "
- "context %u.\n", i);
+ if (vmw_cmdbuf_startstop(man, 0, false))
+ DRM_ERROR("Failed stopping command buffer contexts.\n");
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4212b3e..3767ac3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -746,6 +746,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
if (!dev_priv->has_dx && dx) {
@@ -768,7 +772,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
- false, true);
+ &ttm_opt_ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index d87861b..cbf54ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -387,6 +387,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
*/
static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup;
@@ -455,7 +456,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
@@ -572,6 +573,10 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
u32 num_entries;
@@ -579,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, false, true);
+ cotable_acc_size, &ttm_opt_ctx);
if (unlikely(ret))
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 0cd8890..d59d9dd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -47,6 +47,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct ttm_placement *placement,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
@@ -65,7 +66,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -95,6 +96,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
@@ -115,12 +117,11 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto out_unreserve;
}
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
- false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve:
if (!ret)
@@ -170,6 +171,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
@@ -200,14 +202,16 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
- buf->pin_count == 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ buf->pin_count == 0) {
+ ctx.interruptible = false;
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ }
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
@@ -286,6 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
*/
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
+ struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
@@ -314,7 +319,58 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
placement.num_placement = 1;
placement.placement = &pl;
- ret = ttm_bo_validate(bo, &placement, false, true);
+ ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
+
+
+/*
+ * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_dma_buffer_map_and_cache().
+ */
+void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/*
+ * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d..70e1a88 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -301,6 +301,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Guest Backed Resources.\n");
if (capabilities & SVGA_CAP_DX)
DRM_INFO(" DX Features.\n");
+ if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
+ DRM_INFO(" HP Command Queue.\n");
}
/**
@@ -1277,8 +1279,7 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
}
/**
@@ -1337,6 +1338,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
*/
void vmw_svga_disable(struct vmw_private *dev_priv)
{
+ /*
+ * Disabling SVGA will turn off device modesetting capabilities, so
+ * notify KMS about that so that it doesn't cache atomic state that
+ * isn't valid anymore, for example crtcs turned on.
+ * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
+ * but vmw_kms_lost_device() takes the reservation sem and thus we'll
+ * end up with lock order reversal. Thus, a master may actually perform
+ * a new modeset just after we call vmw_kms_lost_device() and race with
+ * vmw_svga_disable(), but that should at worst cause atomic KMS state
+ * to be inconsistent with the device, causing modesetting problems.
+ *
+ */
+ vmw_kms_lost_device(dev_priv->dev);
ttm_write_lock(&dev_priv->reservation_sem, false);
spin_lock(&dev_priv->svga_lock);
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
@@ -1368,28 +1382,23 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case PM_HIBERNATION_PREPARE:
- if (dev_priv->enable_fb)
- vmw_fb_off(dev_priv);
- ttm_suspend_lock(&dev_priv->reservation_sem);
-
/*
- * This empties VRAM and unbinds all GMR bindings.
- * Buffer contents is moved to swappable memory.
+ * Take the reservation sem in write mode, which will make sure
+ * there are no other processes holding a buffer object
+ * reservation, meaning we should be able to evict all buffer
+ * objects if needed.
+ * Once user-space processes have been frozen, we can release
+ * the lock again.
*/
- vmw_execbuf_release_pinned_bo(dev_priv);
- vmw_resource_evict_all(dev_priv);
- vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all(&dev_priv->bdev);
- vmw_fence_fifo_down(dev_priv->fman);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
- vmw_fence_fifo_up(dev_priv->fman);
- ttm_suspend_unlock(&dev_priv->reservation_sem);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
- break;
- case PM_RESTORE_PREPARE:
+ if (READ_ONCE(dev_priv->suspend_locked)) {
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ }
break;
default:
break;
@@ -1440,25 +1449,48 @@ static int vmw_pm_freeze(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
- dev_priv->suspended = true;
+ /*
+ * Unlock for vmw_kms_suspend.
+ * No user-space processes should be running now.
+ */
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ ret = vmw_kms_suspend(dev_priv->dev);
+ if (ret) {
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ DRM_ERROR("Failed to freeze modesetting.\n");
+ return ret;
+ }
if (dev_priv->enable_fb)
- vmw_fifo_resource_dec(dev_priv);
+ vmw_fb_off(dev_priv);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ if (dev_priv->enable_fb)
+ vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
- dev_priv->suspended = false;
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev);
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
return -EBUSY;
}
- if (dev_priv->enable_fb)
- __vmw_svga_disable(dev_priv);
+ vmw_fence_fifo_down(dev_priv->fman);
+ __vmw_svga_disable(dev_priv);
vmw_release_device_late(dev_priv);
-
return 0;
}
@@ -1482,7 +1514,16 @@ static int vmw_pm_restore(struct device *kdev)
if (dev_priv->enable_fb)
__vmw_svga_enable(dev_priv);
- dev_priv->suspended = false;
+ vmw_fence_fifo_up(dev_priv->fman);
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev_priv->dev);
+
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+
+ vmw_fb_refresh(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d08753e..f34f368 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,10 +43,10 @@
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20170612"
+#define VMWGFX_DRIVER_DATE "20180322"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -92,6 +92,8 @@ struct vmw_dma_buffer {
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
};
/**
@@ -423,6 +425,7 @@ struct vmw_private {
struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
+ struct drm_atomic_state *suspend_state;
/*
* Context and surface management.
@@ -494,8 +497,8 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
- bool suspended;
bool refuse_hibernation;
+ bool suspend_locked;
struct mutex release_mutex;
atomic_t num_fifo_resources;
@@ -673,11 +676,13 @@ extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
+
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
@@ -700,6 +705,8 @@ extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
+extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
+extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -766,6 +773,7 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
+extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -902,6 +910,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
+void vmw_fb_refresh(struct vmw_private *vmw_priv);
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -938,6 +947,9 @@ int vmw_kms_present(struct vmw_private *dev_priv,
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
+int vmw_kms_suspend(struct drm_device *dev);
+int vmw_kms_resume(struct drm_device *dev);
+void vmw_kms_lost_device(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1165,6 +1177,53 @@ extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
+/* CPU blit utilities - vmwgfx_blit.c */
+
+/**
+ * struct vmw_diff_cpy - CPU blit information structure
+ *
+ * @rect: The output bounding box rectangle.
+ * @line: The current line of the blit.
+ * @line_offset: Offset of the current line segment.
+ * @cpp: Bytes per pixel (granularity information).
+ * @memcpy: Which memcpy function to use.
+ */
+struct vmw_diff_cpy {
+ struct drm_rect rect;
+ size_t line;
+ size_t line_offset;
+ int cpp;
+ void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n);
+};
+
+#define VMW_CPU_BLIT_INITIALIZER { \
+ .do_cpy = vmw_memcpy, \
+}
+
+#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
+ .line = 0, \
+ .line_offset = 0, \
+ .rect = { .x1 = INT_MAX/2, \
+ .y1 = INT_MAX/2, \
+ .x2 = INT_MIN/2, \
+ .y2 = INT_MIN/2 \
+ }, \
+ .cpp = _cpp, \
+ .do_cpy = vmw_diff_memcpy, \
+}
+
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n);
+
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ u32 dst_offset, u32 dst_stride,
+ struct ttm_buffer_object *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff);
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87e8af5..c9d5cc2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3703,14 +3703,14 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
base);
+ struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
- false);
+ return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
@@ -3719,8 +3719,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
- false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -3729,7 +3728,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d23a18a..2582ffd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -43,8 +43,6 @@ struct vmw_fb_par {
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
- struct ttm_bo_kmap_obj map;
- void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -163,10 +161,17 @@ static int vmw_fb_blank(int blank, struct fb_info *info)
return 0;
}
-/*
- * Dirty code
+/**
+ * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
+ *
+ * @work: The struct work_struct associated with this task.
+ *
+ * This function flushes the dirty regions of the vmalloc framebuffer to the
+ * kms framebuffer, and if the kms framebuffer is visible, also updated the
+ * corresponding displays. Note that this function runs even if the kms
+ * framebuffer is not bound to a crtc and thus not visible, but it's turned
+ * off during hibernation using the par->dirty.active bool.
*/
-
static void vmw_fb_dirty_flush(struct work_struct *work)
{
struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
@@ -174,13 +179,15 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
unsigned long irq_flags;
- s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
u32 cpp, max_x, max_y;
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
+ struct vmw_dma_buffer *vbo = par->vmw_bo;
+ void *virtual;
- if (vmw_priv->suspended)
+ if (!READ_ONCE(par->dirty.active))
return;
mutex_lock(&par->bo_mutex);
@@ -188,10 +195,16 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
if (!cur_fb)
goto out_unlock;
+ (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
+ (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ virtual = vmw_dma_buffer_map_and_cache(vbo);
+ if (!virtual)
+ goto out_unreserve;
+
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
- goto out_unlock;
+ goto out_unreserve;
}
/*
@@ -221,7 +234,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
if (w && h) {
- dst_ptr = (u8 *)par->bo_ptr +
+ dst_ptr = (u8 *)virtual +
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
@@ -237,7 +250,12 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
clip.x2 = dst_x2;
clip.y1 = dst_y1;
clip.y2 = dst_y2;
+ }
+out_unreserve:
+ ttm_bo_unreserve(&vbo->base);
+ ttm_read_unlock(&vmw_priv->reservation_sem);
+ if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
vmw_fifo_flush(vmw_priv, false);
@@ -500,22 +518,12 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (cur_fb) {
- drm_framebuffer_unreference(cur_fb);
+ drm_framebuffer_put(cur_fb);
par->set_fb = NULL;
}
- if (par->vmw_bo && detach_bo) {
- struct vmw_private *vmw_priv = par->vmw_priv;
-
- if (par->bo_ptr) {
- ttm_bo_kunmap(&par->map);
- par->bo_ptr = NULL;
- }
- if (unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
- else if (vmw_priv->active_display_unit != vmw_du_legacy)
- vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
- }
+ if (par->vmw_bo && detach_bo && unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
return 0;
}
@@ -636,38 +644,6 @@ static int vmw_fb_set_par(struct fb_info *info)
if (ret)
goto out_unlock;
- if (!par->bo_ptr) {
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
-
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us. LDU doesn't require
- * additional pinning because set_config() would've pinned
- * it already
- */
- if (vmw_priv->active_display_unit != vmw_du_legacy) {
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev "
- "framebuffer.\n");
- goto out_unlock;
- }
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- if (vmw_priv->active_display_unit != vmw_du_legacy)
- vfb->unpin(vfb);
-
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- goto out_unlock;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
-
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
@@ -883,12 +859,6 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&info->deferred_work);
flush_delayed_work(&par->local_work);
- mutex_lock(&par->bo_mutex);
- drm_modeset_lock_all(vmw_priv->dev);
- (void) vmw_fb_kms_detach(par, true, false);
- drm_modeset_unlock_all(vmw_priv->dev);
- mutex_unlock(&par->bo_mutex);
-
return 0;
}
@@ -904,10 +874,24 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info;
par = info->par;
- vmw_fb_set_par(info);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
return 0;
}
+
+/**
+ * vmw_fb_refresh - Refresh fb display
+ *
+ * @vmw_priv: Pointer to device private
+ *
+ * Call into kms to show the fbdev display(s).
+ */
+void vmw_fb_refresh(struct vmw_private *vmw_priv)
+{
+ if (!vmw_priv->fb_info)
+ return;
+
+ vmw_fb_set_par(vmw_priv->fb_info);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index d6b1c50..9ed544f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -588,6 +588,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -596,7 +600,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
*/
ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- false, false);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -897,11 +901,12 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
- struct timeval tv;
+ struct timespec64 ts;
- do_gettimeofday(&tv);
- *eaction->tv_sec = tv.tv_sec;
- *eaction->tv_usec = tv.tv_usec;
+ ktime_get_ts64(&ts);
+ /* monotonic time, so no y2038 overflow */
+ *eaction->tv_sec = ts.tv_sec;
+ *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
}
drm_send_event_locked(dev, eaction->event);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 67f8446..c5e8eae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -316,7 +316,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
out_no_surface:
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
out_no_copy:
@@ -393,7 +393,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
out_no_copy:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fcd5814..f11601b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,7 +31,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_rect.h>
-
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -393,13 +392,13 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
- /* setup new image */
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
ret = vmw_cursor_update_image(dev_priv,
vps->surf->snooper.image,
- 64, 64, hotspot_x, hotspot_y);
+ 64, 64, hotspot_x,
+ hotspot_y);
} else if (vps->dmabuf) {
ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
plane->state->crtc_w,
@@ -441,31 +440,17 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct drm_crtc_state *crtc_state = NULL;
struct drm_framebuffer *new_fb = state->fb;
- bool visible;
-
- struct drm_rect src = {
- .x1 = state->src_x,
- .y1 = state->src_y,
- .x2 = state->src_x + state->src_w,
- .y2 = state->src_y + state->src_h,
- };
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
- struct drm_rect clip = dest;
int ret;
- ret = drm_plane_helper_check_update(plane, state->crtc, new_fb,
- &src, &dest, &clip,
- DRM_MODE_ROTATE_0,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &visible);
+ if (state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
if (!ret && new_fb) {
struct drm_crtc *crtc = state->crtc;
@@ -476,12 +461,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
vcs = vmw_connector_state_to_vcs(du->connector.state);
- if ((dest.x2 > new_fb->width ||
- dest.y2 > new_fb->height)) {
- DRM_ERROR("CRTC area outside of framebuffer\n");
- return -EINVAL;
- }
-
/* Only one active implicit framebuffer at a time. */
mutex_lock(&dev_priv->global_kms_state_mutex);
if (vcs->is_implicit && dev_priv->implicit_fb &&
@@ -517,11 +496,22 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct vmw_surface *surface = NULL;
struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect src = drm_plane_state_src(new_state);
+ struct drm_rect dest = drm_plane_state_dest(new_state);
/* Turning off */
if (!fb)
return ret;
+ ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
+ &src, &dest,
+ DRM_MODE_ROTATE_0,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &new_state->visible);
+ if (!ret)
+ return ret;
+
/* A lot of the code assumes this */
if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
@@ -586,13 +576,9 @@ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
-
}
@@ -695,9 +681,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
return NULL;
vps->pinned = 0;
-
- /* Mapping is managed by prepare_fb/cleanup_fb */
- memset(&vps->host_map, 0, sizeof(vps->host_map));
vps->cpp = 0;
/* Each ref counted resource needs to be acquired again */
@@ -759,11 +742,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
/* Should have been freed by cleanup_fb */
- if (vps->host_map.virtual) {
- DRM_ERROR("Host mapping not freed\n");
- ttm_bo_kunmap(&vps->host_map);
- }
-
if (vps->surf)
vmw_surface_unreference(&vps->surf);
@@ -908,11 +886,11 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
if (dev_priv->active_display_unit == vmw_du_screen_object)
ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL);
+ num_clips, inc, NULL, NULL);
else
ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL);
+ num_clips, inc, NULL, NULL);
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
@@ -948,11 +926,12 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
switch (dev_priv->active_display_unit) {
case vmw_du_screen_object:
return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
- user_fence_rep, vclips, num_clips);
+ user_fence_rep, vclips, num_clips,
+ NULL);
case vmw_du_screen_target:
return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
- 1, false, true);
+ 1, false, true, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
@@ -1110,12 +1089,12 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
case vmw_du_screen_target:
ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
clips, NULL, num_clips, increment,
- true, true);
+ true, true, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
clips, NULL, num_clips,
- increment, true, NULL);
+ increment, true, NULL, NULL);
break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1141,12 +1120,14 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
};
/**
- * Pin the dmabuffer to the start of vram.
+ * Pin the dmabuffer in a location suitable for access by the
+ * display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_dma_buffer *buf;
+ struct ttm_placement *placement;
int ret;
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
@@ -1163,12 +1144,24 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
- if (vfb->dmabuf)
- return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
- false);
+ if (vfb->dmabuf) {
+ if (dev_priv->capabilities & SVGA_CAP_3D) {
+ /*
+ * Use surface DMA to get content to
+ * sreen target surface.
+ */
+ placement = &vmw_vram_gmr_placement;
+ } else {
+ /* Use CPU blit. */
+ placement = &vmw_sys_placement;
+ }
+ } else {
+ /* Use surface / image update */
+ placement = &vmw_mob_placement;
+ }
- return vmw_dmabuf_pin_in_placement(dev_priv, buf,
- &vmw_mob_placement, false);
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
+ false);
default:
return -EINVAL;
}
@@ -1559,35 +1552,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
return drm_atomic_helper_check(dev, state);
}
-
-/**
- * vmw_kms_atomic_commit - Perform an atomic state commit
- *
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: Whether nonblocking behaviour is requested
- *
- * This is a simple wrapper around drm_atomic_helper_commit() for
- * us to clear the nonblocking value.
- *
- * Nonblocking commits currently cause synchronization issues
- * for vmwgfx.
- *
- * RETURNS
- * Zero for success or negative error code on failure.
- */
-int vmw_kms_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- return drm_atomic_helper_commit(dev, state, false);
-}
-
-
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
- .atomic_commit = vmw_kms_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
@@ -1601,7 +1569,7 @@ static int vmw_kms_generic_present(struct vmw_private *dev_priv,
{
return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
- num_clips, 1, NULL);
+ num_clips, 1, NULL, NULL);
}
@@ -1620,7 +1588,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
case vmw_du_screen_target:
ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
- num_clips, 1, NULL);
+ num_clips, 1, NULL, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
@@ -2348,10 +2316,16 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->dev_priv = dev_priv;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ /* If crtc is passed, no need to iterate over other display units */
+ if (dirty->crtc) {
+ units[num_units++] = vmw_crtc_to_du(dirty->crtc);
+ } else {
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ head) {
+ if (crtc->primary->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
}
for (k = 0; k < num_units; k++) {
@@ -2450,14 +2424,21 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
- bool validate_as_mob)
+ bool validate_as_mob,
+ bool for_cpu_blit)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false};
struct ttm_buffer_object *bo = &buf->base;
int ret;
ttm_bo_reserve(bo, false, false, NULL);
- ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
- validate_as_mob);
+ if (for_cpu_blit)
+ ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
+ else
+ ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+ validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
@@ -2531,9 +2512,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
-void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{
- vmw_kms_helper_buffer_revert(res->backup);
+ struct vmw_resource *res = ctx->res;
+
+ vmw_kms_helper_buffer_revert(ctx->buf);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2550,10 +2534,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible)
+ bool interruptible,
+ struct vmw_validation_ctx *ctx)
{
int ret = 0;
+ ctx->buf = NULL;
+ ctx->res = res;
+
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
@@ -2569,9 +2557,12 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
- res->dev_priv->has_mob);
+ res->dev_priv->has_mob,
+ false);
if (ret)
goto out_unreserve;
+
+ ctx->buf = vmw_dmabuf_reference(res->backup);
}
ret = vmw_resource_validate(res);
if (ret)
@@ -2579,7 +2570,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
return 0;
out_revert:
- vmw_kms_helper_buffer_revert(res->backup);
+ vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
@@ -2595,11 +2586,13 @@ out_unlock:
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
- struct vmw_fence_obj **out_fence)
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+ struct vmw_fence_obj **out_fence)
{
- if (res->backup || out_fence)
- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ struct vmw_resource *res = ctx->res;
+
+ if (ctx->buf || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
vmw_resource_unreserve(res, false, NULL, 0);
@@ -2865,3 +2858,61 @@ int vmw_kms_set_config(struct drm_mode_set *set,
return drm_atomic_helper_set_config(set, ctx);
}
+
+
+/**
+ * vmw_kms_suspend - Save modesetting state and turn modesetting off.
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ */
+int vmw_kms_suspend(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
+ if (IS_ERR(dev_priv->suspend_state)) {
+ int ret = PTR_ERR(dev_priv->suspend_state);
+
+ DRM_ERROR("Failed kms suspend: %d\n", ret);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_kms_resume - Re-enable modesetting and restore state
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ *
+ * State is resumed from a previous vmw_kms_suspend(). It's illegal
+ * to call this function without a previous vmw_kms_suspend().
+ */
+int vmw_kms_resume(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
+
+ if (WARN_ON(!dev_priv->suspend_state))
+ return 0;
+
+ ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+}
+
+/**
+ * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
+ *
+ * @dev: Pointer to the drm device
+ */
+void vmw_kms_lost_device(struct drm_device *dev)
+{
+ drm_atomic_helper_shutdown(dev);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index cd9da2d..6b7c012 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -50,6 +50,7 @@
* @unit: The current display unit. Set up by the helper before a call to @clip.
* @cmd: The allocated fifo space. Set up by the helper before the first @clip
* call.
+ * @crtc: The crtc for which to build dirty commands.
* @num_hits: Number of clip rect commands for this display unit.
* Cleared by the helper before the first @clip call. Updated by the @clip
* callback.
@@ -71,6 +72,7 @@ struct vmw_kms_dirty {
struct vmw_private *dev_priv;
struct vmw_display_unit *unit;
void *cmd;
+ struct drm_crtc *crtc;
u32 num_hits;
s32 fb_x;
s32 fb_y;
@@ -175,7 +177,6 @@ struct vmw_plane_state {
int pinned;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -240,6 +241,11 @@ struct vmw_display_unit {
int set_gui_y;
};
+struct vmw_validation_ctx {
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *buf;
+};
+
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
#define vmw_connector_to_du(x) \
@@ -287,7 +293,8 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
- bool validate_as_mob);
+ bool validate_as_mob,
+ bool for_cpu_blit);
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
@@ -296,9 +303,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user *
user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible);
-void vmw_kms_helper_resource_revert(struct vmw_resource *res);
-void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ bool interruptible,
+ struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
+void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
@@ -398,20 +406,23 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
- uint32_t num_clips);
+ uint32_t num_clips,
+ struct drm_crtc *crtc);
/*
* Screen Target Display Unit functions - vmwgfx_stdu.c
@@ -425,7 +436,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -435,9 +447,9 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
uint32_t num_clips,
int increment,
bool to_surface,
- bool interruptible);
+ bool interruptible,
+ struct drm_crtc *crtc);
int vmw_kms_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index b17f08f..d07c585 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables = batch->otables;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
SVGAOTableType i;
int ret;
@@ -256,15 +260,14 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
ret = ttm_bo_create(&dev_priv->bdev, bo_size,
ttm_bo_type_device,
&vmw_sys_ne_placement,
- 0, false, NULL,
- &batch->otable_bo);
+ 0, false, &batch->otable_bo);
if (unlikely(ret != 0))
goto out_no_bo;
ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(batch->otable_bo);
@@ -430,19 +433,24 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
{
int ret;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
BUG_ON(mob->pt_bo != NULL);
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
ttm_bo_type_device,
&vmw_sys_ne_placement,
- 0, false, NULL, &mob->pt_bo);
+ 0, false, &mob->pt_bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(mob->pt_bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 9700099..cdff992 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -328,7 +328,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
{
struct rpc_channel channel;
char *msg, *reply = NULL;
- size_t msg_len, reply_len = 0;
+ size_t reply_len = 0;
int ret = 0;
@@ -338,15 +338,12 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
if (!guest_info_param || !length)
return -EINVAL;
- msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
- msg = kzalloc(msg_len, GFP_KERNEL);
+ msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
- sprintf(msg, "info-get %s", guest_info_param);
-
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
vmw_send_msg(&channel, msg) ||
vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
@@ -388,7 +385,6 @@ int vmw_host_log(const char *log)
{
struct rpc_channel channel;
char *msg;
- int msg_len;
int ret = 0;
@@ -398,15 +394,12 @@ int vmw_host_log(const char *log)
if (!log)
return ret;
- msg_len = strlen(log) + strlen("log ") + 1;
- msg = kzalloc(msg_len, GFP_KERNEL);
+ msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
- sprintf(msg, "log %s", log);
-
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
vmw_send_msg(&channel, msg) ||
vmw_close_channel(&channel)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a96f90f..6b3a942 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,6 +354,7 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_dma_buffer_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -361,6 +362,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ vmw_dma_buffer_unmap(&vmw_user_bo->dma);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -384,8 +386,8 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
- 0, interruptible,
- NULL, acc_size, NULL, NULL, bo_free);
+ 0, interruptible, acc_size,
+ NULL, NULL, bo_free);
return ret;
}
@@ -968,6 +970,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
bool backup_dirty = false;
int ret;
@@ -992,7 +995,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
backup_dirty = res->backup_dirty;
ret = ttm_bo_validate(&res->backup->base,
res->func->backup_placement,
- true, false);
+ &ctx);
if (unlikely(ret != 0))
goto out_no_validate;
@@ -1238,6 +1241,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+ /*
+ * Kill any cached kernel maps before move. An optimization could
+ * be to do this iff source or destination memory type is VRAM.
+ */
+ vmw_dma_buffer_unmap(dma_buf);
+
if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n;
struct ttm_validate_buffer val_buf;
@@ -1261,6 +1270,21 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
+/**
+ * vmw_resource_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+}
+
/**
* vmw_query_readback_all - Read back cached query states
@@ -1446,6 +1470,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
*/
int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -1466,7 +1491,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
- interruptible, false);
+ &ctx);
if (ret) {
ttm_bo_unreserve(&vbo->base);
goto out_no_validate;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 63a4cd7..648f812 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -316,69 +316,21 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
- struct vmw_fence_obj *fence = NULL;
- struct drm_vmw_rect vclips;
int ret;
if (!vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
- flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
- ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
+ ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
if (ret) {
DRM_ERROR("Page flip error %d.\n", ret);
return ret;
}
- /* do a full screen dirty update */
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
- if (vfb->dmabuf)
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
- NULL, &vclips, 1, 1,
- true, &fence);
- else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
- NULL, &vclips, NULL,
- 0, 0, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- if (event) {
- struct drm_file *file_priv = event->base.file_priv;
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- }
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_update_implicit_fb(dev_priv, crtc);
return ret;
-
-out_no_fence:
- drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
- return ret;
}
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
@@ -453,7 +405,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+ struct drm_crtc *crtc = plane->state->crtc ?
+ plane->state->crtc : old_state->crtc;
+ if (vps->dmabuf)
+ vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
@@ -491,10 +447,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
}
size = new_state->crtc_w * new_state->crtc_h * 4;
+ dev_priv = vmw_priv(crtc->dev);
if (vps->dmabuf) {
- if (vps->dmabuf_size == size)
- return 0;
+ if (vps->dmabuf_size == size) {
+ /*
+ * Note that this might temporarily up the pin-count
+ * to 2, until cleanup_fb() is called.
+ */
+ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+ true);
+ }
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
@@ -504,7 +467,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!vps->dmabuf)
return -ENOMEM;
- dev_priv = vmw_priv(crtc->dev);
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
@@ -515,13 +477,16 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
-
- if (ret != 0)
+ if (ret) {
vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
- else
- vps->dmabuf_size = size;
+ return ret;
+ }
- return ret;
+ /*
+ * TTM already thinks the buffer is pinned, but make sure the
+ * pin_count is upped.
+ */
+ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
}
@@ -530,9 +495,71 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_pending_vblank_event *event = NULL;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (crtc && plane->state->fb) {
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_framebuffer *vfb =
+ vmw_framebuffer_to_vfb(plane->state->fb);
+ struct drm_vmw_rect vclips;
+
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
+ &vclips, 1, 1, true,
+ &fence, crtc);
+ else
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
+ &vclips, NULL, 0, 0,
+ 1, 1, &fence, crtc);
+
+ /*
+ * We cannot really fail this function, so if we do, then output
+ * an error and maintain consistent atomic state.
+ */
+ if (ret != 0)
+ DRM_ERROR("Failed to update screen.\n");
- if (crtc)
crtc->primary->fb = plane->state->fb;
+ } else {
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL
+ * together, otherwise it's an error.
+ * Here primary plane is being disable so should really blank
+ * the screen object display unit, if not already done.
+ */
+ return;
+ }
+
+ event = crtc->state->event;
+ /*
+ * In case of failure and other cases, vblank event will be sent in
+ * vmw_du_crtc_atomic_flush.
+ */
+ if (event && fence) {
+ struct drm_file *file_priv = event->base.file_priv;
+
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
+ }
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -892,6 +919,7 @@ static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -904,17 +932,19 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -923,6 +953,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
sdirty.base.dev_priv = dev_priv;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
sizeof(SVGASignedRect) * num_clips;
+ sdirty.base.crtc = crtc;
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
@@ -933,7 +964,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
@@ -994,6 +1025,7 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -1004,7 +1036,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
@@ -1013,7 +1046,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false);
+ false, false);
if (ret)
return ret;
@@ -1021,6 +1054,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out_revert;
+ dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
dirty.clip = vmw_sou_dmabuf_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
@@ -1092,6 +1126,7 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
* @num_clips: Number of clip rects in @vclips.
+ * @crtc: If crtc is passed, readback on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -1101,14 +1136,16 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
- uint32_t num_clips)
+ uint32_t num_clips,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
+ false);
if (ret)
return ret;
@@ -1116,6 +1153,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out_revert;
+ dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_readback_fifo_commit;
dirty.clip = vmw_sou_readback_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 9b832f1..73b8e9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -607,6 +607,10 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
if (!vmw_shader_dx_size)
@@ -616,7 +620,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
return -EINVAL;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
- false, true);
+ &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
@@ -730,6 +734,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -742,7 +750,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
@@ -800,6 +808,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_shader *shader;
struct vmw_resource *res;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -812,7 +824,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
@@ -970,6 +982,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
size_t size,
struct list_head *list)
{
+ struct ttm_operation_ctx ctx = { false, true };
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
@@ -1005,7 +1018,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
- ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 051d3b3..a0cb310 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -149,6 +149,10 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
size_t alloc_size;
size_t account_size;
int ret;
@@ -162,7 +166,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
- false, true);
+ &ctx);
ttm_read_unlock(&dev_priv->reservation_sem);
if (ret) {
if (ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 5a73eeb..d3573c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -329,6 +329,10 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
size_t size;
int ret;
@@ -345,7 +349,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for view"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b68d748..67331f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,6 @@ struct vmw_screen_target_display_unit {
bool defined;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -492,71 +491,17 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
- struct drm_vmw_rect vclips;
int ret;
- dev_priv = vmw_priv(crtc->dev);
- stdu = vmw_crtc_to_stdu(crtc);
-
if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
- /*
- * We're always async, but the helper doesn't know how to set async
- * so lie to the helper. Also, the helper expects someone
- * to pick the event up from the crtc state, and if nobody does,
- * it will free it. Since we handle the event in this function,
- * don't hand it to the helper.
- */
- flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
- ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
+ ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
if (ret) {
DRM_ERROR("Page flip error %d.\n", ret);
return ret;
}
- if (stdu->base.is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- /*
- * Now that we've bound a new surface to the screen target,
- * update the contents.
- */
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
- if (vfb->dmabuf)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips,
- 1, 1, true, false);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips,
- NULL, 0, 0, 1, 1, NULL);
- if (ret) {
- DRM_ERROR("Page flip update error %d.\n", ret);
- return ret;
- }
-
- if (event) {
- struct vmw_fence_obj *fence = NULL;
- struct drm_file *file_priv = event->base.file_priv;
-
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (!fence)
- return -ENOMEM;
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- vmw_fence_obj_unreference(&fence);
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
- }
-
return 0;
}
@@ -693,10 +638,9 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
container_of(dirty->unit, typeof(*stdu), base);
s32 width, height;
s32 src_pitch, dst_pitch;
- u8 *src, *dst;
- bool not_used;
- struct ttm_bo_kmap_obj guest_map;
- int ret;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
if (!dirty->num_hits)
return;
@@ -707,57 +651,38 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
- ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
- &guest_map);
- if (ret) {
- DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
- ret);
- goto out_cleanup;
- }
-
- /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
- src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
- src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
- src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
-
- dst_pitch = ddirty->pitch;
- dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
- dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
-
-
- /* Figure out the real direction */
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
- u8 *tmp;
- s32 tmp_pitch;
-
- tmp = src;
- tmp_pitch = src_pitch;
+ /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
- src = dst;
- src_pitch = dst_pitch;
+ src_pitch = ddirty->pitch;
+ src_bo = &ddirty->buf->base;
+ src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
- dst = tmp;
- dst_pitch = tmp_pitch;
+ /* Swap src and dst if the assumption was wrong. */
+ if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
+ swap(dst_pitch, src_pitch);
+ swap(dst_bo, src_bo);
+ swap(src_offset, dst_offset);
}
- /* CPU Blit */
- while (height-- > 0) {
- memcpy(dst, src, width * stdu->cpp);
- dst += dst_pitch;
- src += src_pitch;
- }
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+ src_bo, src_offset, src_pitch,
+ width * stdu->cpp, height, &diff);
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+ if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
+ drm_rect_visible(&diff.rect)) {
struct vmw_private *dev_priv;
struct vmw_stdu_update *cmd;
struct drm_clip_rect region;
int ret;
/* We are updating the actual surface, not a proxy */
- region.x1 = ddirty->left;
- region.x2 = ddirty->right;
- region.y1 = ddirty->top;
- region.y2 = ddirty->bottom;
+ region.x1 = diff.rect.x1;
+ region.x2 = diff.rect.x2;
+ region.y1 = diff.rect.y1;
+ region.y2 = diff.rect.y2;
ret = vmw_kms_update_proxy(
(struct vmw_resource *) &stdu->display_srf->res,
(const struct drm_clip_rect *) &region, 1, 1);
@@ -774,13 +699,12 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
}
vmw_stdu_populate_update(cmd, stdu->base.unit,
- ddirty->left, ddirty->right,
- ddirty->top, ddirty->bottom);
+ region.x1, region.x2,
+ region.y1, region.y2);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- ttm_bo_kunmap(&guest_map);
out_cleanup:
ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
@@ -802,6 +726,7 @@ out_cleanup:
* @to_surface: Whether to DMA to the screen target system as opposed to
* from the screen target system.
* @interruptible: Whether to perform waits interruptible if possible.
+ * @crtc: If crtc is passed, perform stdu dma on that crtc only.
*
* If DMA-ing till the screen target system, the function will also notify
* the screen target system that a bounding box of the cliprects has been
@@ -818,15 +743,22 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
uint32_t num_clips,
int increment,
bool to_surface,
- bool interruptible)
+ bool interruptible,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
+ bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ /*
+ * VMs without 3D support don't have the surface DMA command and
+ * we'll be using a CPU blit, and the framebuffer should be moved out
+ * of VRAM.
+ */
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false);
+ false, cpu_blit);
if (ret)
return ret;
@@ -845,13 +777,15 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (to_surface)
ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
- /* 2D VMs cannot use SVGA_3D_CMD_SURFACE_DMA so do CPU blit instead */
- if (!(dev_priv->capabilities & SVGA_CAP_3D)) {
+
+ if (cpu_blit) {
ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
ddirty.base.fifo_reserve_size = 0;
}
+ ddirty.base.crtc = crtc;
+
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
@@ -963,6 +897,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -975,17 +910,19 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
+ struct vmw_validation_ctx ctx;
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true);
+ ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
if (ret)
return ret;
@@ -1000,6 +937,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
sizeof(SVGA3dCopyBox) * num_clips +
sizeof(struct vmw_stdu_update);
+ sdirty.base.crtc = crtc;
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
sdirty.right = sdirty.bottom = S32_MIN;
@@ -1008,7 +946,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
- vmw_kms_helper_resource_finish(srf, out_fence);
+ vmw_kms_helper_resource_finish(&ctx, out_fence);
return ret;
}
@@ -1118,9 +1056,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->host_map.virtual)
- ttm_bo_kunmap(&vps->host_map);
-
if (vps->surf)
WARN_ON(!vps->pinned);
@@ -1282,24 +1217,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
* so cache these mappings
*/
if (vps->content_fb_type == SEPARATE_DMA &&
- !(dev_priv->capabilities & SVGA_CAP_3D)) {
- ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
- vps->surf->res.backup->base.num_pages,
- &vps->host_map);
- if (ret) {
- DRM_ERROR("Failed to map display buffer to CPU\n");
- goto out_srf_unpin;
- }
-
+ !(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width;
- }
return 0;
-out_srf_unpin:
- vmw_resource_unpin(&vps->surf->res);
- vps->pinned--;
-
out_srf_unref:
vmw_surface_unreference(&vps->surf);
return ret;
@@ -1322,41 +1244,104 @@ static void
vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_crtc *crtc = plane->state->crtc;
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_pending_vblank_event *event;
+ struct vmw_private *dev_priv;
int ret;
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
+ /*
+ * We cannot really fail this function, so if we do, then output an
+ * error and maintain consistent atomic state.
+ */
+ if (crtc && plane->state->fb) {
+ struct vmw_framebuffer *vfb =
+ vmw_framebuffer_to_vfb(plane->state->fb);
+ struct drm_vmw_rect vclips;
+ stdu = vmw_crtc_to_stdu(crtc);
+ dev_priv = vmw_priv(crtc->dev);
+
+ stdu->display_srf = vps->surf;
+ stdu->content_fb_type = vps->content_fb_type;
+ stdu->cpp = vps->cpp;
+
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (ret)
+ DRM_ERROR("Failed to bind surface to STDU.\n");
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
+ &vclips, 1, 1, true, false,
+ crtc);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
+ &vclips, NULL, 0, 0,
+ 1, 1, NULL, crtc);
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
- stdu->display_srf = vps->surf;
- stdu->content_fb_type = vps->content_fb_type;
- stdu->cpp = vps->cpp;
- memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
+ crtc->primary->fb = plane->state->fb;
+ } else {
+ crtc = old_state->crtc;
+ stdu = vmw_crtc_to_stdu(crtc);
+ dev_priv = vmw_priv(crtc->dev);
- if (!stdu->defined)
- return;
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL
+ * together, otherwise it's an error.
+ * Here primary plane is being disable so blank the screen
+ * target display unit, if not already done.
+ */
+ if (!stdu->defined)
+ return;
- if (plane->state->fb)
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- else
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (ret)
+ DRM_ERROR("Failed to blank STDU\n");
+
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
+
+ return;
+ }
+ event = crtc->state->event;
/*
- * We cannot really fail this function, so if we do, then output an
- * error and quit
+ * In case of failure and other cases, vblank event will be sent in
+ * vmw_du_crtc_atomic_flush.
*/
- if (ret)
- DRM_ERROR("Failed to bind surface to STDU.\n");
- else
- crtc->primary->fb = plane->state->fb;
+ if (event && (ret == 0)) {
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_file *file_priv = event->base.file_priv;
- ret = vmw_stdu_update_st(dev_priv, stdu);
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret)
- DRM_ERROR("Failed to update STDU.\n");
+ /*
+ * If fence is NULL, then already sync.
+ */
+ if (fence) {
+ ret = vmw_event_fence_action_queue(
+ file_priv, fence, &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
+
+ vmw_fence_obj_unreference(&fence);
+ }
+ } else {
+ (void) vmw_fifo_flush(dev_priv, false);
+ }
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 6ac094e..b236c48 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -345,7 +345,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
- vmw_fifo_resource_dec(dev_priv);
}
/**
@@ -407,6 +406,8 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
vmw_surface_define_encode(srf, cmd);
vmw_fifo_commit(dev_priv, submit_size);
+ vmw_fifo_resource_inc(dev_priv);
+
/*
* Surface memory usage accounting.
*/
@@ -558,6 +559,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
@@ -579,15 +581,11 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &srf->res;
BUG_ON(!res_free);
- if (!dev_priv->has_mob)
- vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
- if (!dev_priv->has_mob)
- vmw_fifo_resource_dec(dev_priv);
res_free(res);
return ret;
}
@@ -700,6 +698,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -741,7 +743,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
+ size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
@@ -1479,6 +1481,10 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
struct vmw_surface *srf;
int ret;
u32 num_layers;
@@ -1525,7 +1531,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, false, true);
+ user_accounting_size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index e8b8266..6f4205e8 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -29,37 +29,19 @@
#include "zx_drm_drv.h"
#include "zx_vou.h"
-struct zx_drm_private {
- struct drm_fbdev_cma *fbdev;
-};
-
-static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
-{
- struct zx_drm_private *priv = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
-static void zx_drm_lastclose(struct drm_device *drm)
-{
- struct zx_drm_private *priv = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
static struct drm_driver zx_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = zx_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -83,18 +65,12 @@ static struct drm_driver zx_drm_driver = {
static int zx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct zx_drm_private *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
drm = drm_dev_alloc(&zx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- drm->dev_private = priv;
dev_set_drvdata(dev, drm);
drm_mode_config_init(drm);
@@ -125,12 +101,9 @@ static int zx_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- priv->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
- priv->fbdev = NULL;
goto out_poll_fini;
}
@@ -141,10 +114,7 @@ static int zx_drm_bind(struct device *dev)
return 0;
out_fbdev_fini:
- if (priv->fbdev) {
- drm_fbdev_cma_fini(priv->fbdev);
- priv->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
out_poll_fini:
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
@@ -152,7 +122,6 @@ out_unbind:
component_unbind_all(dev, drm);
out_unregister:
dev_set_drvdata(dev, NULL);
- drm->dev_private = NULL;
drm_dev_unref(drm);
return ret;
}
@@ -160,18 +129,13 @@ out_unregister:
static void zx_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct zx_drm_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev) {
- drm_fbdev_cma_fini(priv->fbdev);
- priv->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
component_unbind_all(dev, drm);
dev_set_drvdata(dev, NULL);
- drm->dev_private = NULL;
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index b8abb1b..13ea90f 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -108,6 +108,7 @@ static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ &hdmi->connector,
mode);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 18e7634..94545ad 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -55,7 +55,6 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- struct drm_rect clip;
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
@@ -75,14 +74,9 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
if (!plane_state->crtc)
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
-
- return drm_plane_helper_check_state(plane_state, &clip,
- min_scale, max_scale,
- true, true);
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ min_scale, max_scale,
+ true, true);
}
static int zx_vl_get_fmt(uint32_t format)
@@ -292,7 +286,6 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- struct drm_rect clip;
if (!crtc || !fb)
return 0;
@@ -310,15 +303,10 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
if (!plane_state->crtc)
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
-
- return drm_plane_helper_check_state(plane_state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
static int zx_gl_get_fmt(uint32_t format)
OpenPOWER on IntegriCloud